Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2023-07-21' of ssh://git.freedesktop.org/git/drm/drm-misc into drm-next

drm-misc-next for 6.6:

UAPI Changes:
- syncobj: New DRM_IOCTL_SYNCOBJ_EVENTFD ioctl

Cross-subsystem Changes:
- Converge to use of_device_uevent()

Core Changes:
- GPU VA Manager
- improvements to make it clearer that drm_minor_type is uAPI

Driver Changes:
- ssd130x: Improve intermediate buffer size computation
- bridges:
- adv7511: Fix low refresh rate
- anx7625: Switch to macros instead of hardcoded values
- panel:
- ld9040: Backlight support, magic improved

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Maxime Ripard <mripard@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/urs7omo5xnhglztxgwgsslws7duqfj4jhzrtppaenuvdh2lwuz@4htm4iiqhkep

+3014 -215
+36
Documentation/gpu/drm-mm.rst
··· 466 466 .. kernel-doc:: drivers/gpu/drm/drm_mm.c 467 467 :export: 468 468 469 + DRM GPU VA Manager 470 + ================== 471 + 472 + Overview 473 + -------- 474 + 475 + .. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c 476 + :doc: Overview 477 + 478 + Split and Merge 479 + --------------- 480 + 481 + .. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c 482 + :doc: Split and Merge 483 + 484 + Locking 485 + ------- 486 + 487 + .. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c 488 + :doc: Locking 489 + 490 + Examples 491 + -------- 492 + 493 + .. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c 494 + :doc: Examples 495 + 496 + DRM GPU VA Manager Function References 497 + -------------------------------------- 498 + 499 + .. kernel-doc:: include/drm/drm_gpuva_mgr.h 500 + :internal: 501 + 502 + .. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c 503 + :export: 504 + 469 505 DRM Buddy Allocator 470 506 =================== 471 507
+1
drivers/gpu/drm/Makefile
··· 45 45 drm_vblank.o \ 46 46 drm_vblank_work.o \ 47 47 drm_vma_manager.o \ 48 + drm_gpuva_mgr.o \ 48 49 drm_writeback.o 49 50 drm-$(CONFIG_DRM_LEGACY) += \ 50 51 drm_agpsupport.o \
+1 -1
drivers/gpu/drm/arm/display/komeda/komeda_dev.c
··· 6 6 */ 7 7 #include <linux/io.h> 8 8 #include <linux/iommu.h> 9 - #include <linux/of_device.h> 9 + #include <linux/of.h> 10 10 #include <linux/of_graph.h> 11 11 #include <linux/of_reserved_mem.h> 12 12 #include <linux/platform_device.h>
+1
drivers/gpu/drm/arm/malidp_drv.c
··· 12 12 #include <linux/of_device.h> 13 13 #include <linux/of_graph.h> 14 14 #include <linux/of_reserved_mem.h> 15 + #include <linux/platform_device.h> 15 16 #include <linux/pm_runtime.h> 16 17 #include <linux/debugfs.h> 17 18
-1
drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
··· 7 7 8 8 #include <linux/device.h> 9 9 #include <linux/module.h> 10 - #include <linux/of_device.h> 11 10 #include <linux/slab.h> 12 11 #include <linux/clk.h> 13 12
+8 -3
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
··· 9 9 #include <linux/device.h> 10 10 #include <linux/gpio/consumer.h> 11 11 #include <linux/module.h> 12 - #include <linux/of_device.h> 12 + #include <linux/of.h> 13 13 #include <linux/slab.h> 14 14 15 15 #include <media/cec.h> ··· 786 786 else 787 787 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE; 788 788 789 - regmap_update_bits(adv7511->regmap, 0xfb, 790 - 0x6, low_refresh_rate << 1); 789 + if (adv7511->type == ADV7511) 790 + regmap_update_bits(adv7511->regmap, 0xfb, 791 + 0x6, low_refresh_rate << 1); 792 + else 793 + regmap_update_bits(adv7511->regmap, 0x4a, 794 + 0xc, low_refresh_rate << 2); 795 + 791 796 regmap_update_bits(adv7511->regmap, 0x17, 792 797 0x60, (vsync_polarity << 6) | (hsync_polarity << 5)); 793 798
+6 -6
drivers/gpu/drm/bridge/analogix/anx7625.c
··· 872 872 } 873 873 874 874 /* Read downstream capability */ 875 - ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, 0x68028, 1, &bcap); 875 + ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, DP_AUX_HDCP_BCAPS, 1, &bcap); 876 876 if (ret < 0) 877 877 return ret; 878 878 879 - if (!(bcap & 0x01)) { 879 + if (!(bcap & DP_BCAPS_HDCP_CAPABLE)) { 880 880 pr_warn("downstream not support HDCP 1.4, cap(%x).\n", bcap); 881 881 return 0; 882 882 } ··· 931 931 932 932 dev_dbg(dev, "set downstream sink into normal\n"); 933 933 /* Downstream sink enter into normal mode */ 934 - data = 1; 935 - ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data); 934 + data = DP_SET_POWER_D0; 935 + ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data); 936 936 if (ret < 0) 937 937 dev_err(dev, "IO error : set sink into normal mode fail\n"); 938 938 ··· 971 971 972 972 dev_dbg(dev, "notify downstream enter into standby\n"); 973 973 /* Downstream monitor enter into standby mode */ 974 - data = 2; 975 - ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data); 974 + data = DP_SET_POWER_D3; 975 + ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data); 976 976 if (ret < 0) 977 977 DRM_DEV_ERROR(dev, "IO error : mute video fail\n"); 978 978
+1 -2
drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
··· 14 14 #include <linux/interrupt.h> 15 15 #include <linux/iopoll.h> 16 16 #include <linux/module.h> 17 - #include <linux/of_address.h> 18 - #include <linux/of_device.h> 17 + #include <linux/of.h> 19 18 #include <linux/of_graph.h> 20 19 #include <linux/platform_device.h> 21 20 #include <linux/pm_runtime.h>
-1
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
··· 29 29 #include <linux/media-bus-format.h> 30 30 #include <linux/module.h> 31 31 #include <linux/of.h> 32 - #include <linux/of_device.h> 33 32 #include <linux/phy/phy.h> 34 33 #include <linux/phy/phy-dp.h> 35 34 #include <linux/platform_device.h>
+1 -1
drivers/gpu/drm/bridge/chipone-icn6211.c
··· 17 17 #include <linux/i2c.h> 18 18 #include <linux/media-bus-format.h> 19 19 #include <linux/module.h> 20 - #include <linux/of_device.h> 20 + #include <linux/of.h> 21 21 #include <linux/regmap.h> 22 22 #include <linux/regulator/consumer.h> 23 23
-1
drivers/gpu/drm/bridge/display-connector.c
··· 10 10 #include <linux/module.h> 11 11 #include <linux/mutex.h> 12 12 #include <linux/of.h> 13 - #include <linux/of_device.h> 14 13 #include <linux/platform_device.h> 15 14 #include <linux/regulator/consumer.h> 16 15
-1
drivers/gpu/drm/bridge/fsl-ldb.c
··· 8 8 #include <linux/mfd/syscon.h> 9 9 #include <linux/module.h> 10 10 #include <linux/of.h> 11 - #include <linux/of_device.h> 12 11 #include <linux/of_graph.h> 13 12 #include <linux/platform_device.h> 14 13 #include <linux/regmap.h>
+1 -1
drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
··· 9 9 #include <linux/mfd/syscon.h> 10 10 #include <linux/module.h> 11 11 #include <linux/of.h> 12 - #include <linux/of_device.h> 13 12 #include <linux/of_graph.h> 14 13 #include <linux/phy/phy.h> 14 + #include <linux/platform_device.h> 15 15 #include <linux/pm_runtime.h> 16 16 #include <linux/regmap.h> 17 17
+1
drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
··· 12 12 #include <linux/of_device.h> 13 13 #include <linux/of_graph.h> 14 14 #include <linux/phy/phy.h> 15 + #include <linux/platform_device.h> 15 16 #include <linux/pm_runtime.h> 16 17 #include <linux/regmap.h> 17 18
-1
drivers/gpu/drm/bridge/lontium-lt9211.c
··· 16 16 #include <linux/i2c.h> 17 17 #include <linux/media-bus-format.h> 18 18 #include <linux/module.h> 19 - #include <linux/of_device.h> 20 19 #include <linux/of_graph.h> 21 20 #include <linux/regmap.h> 22 21 #include <linux/regulator/consumer.h>
-1
drivers/gpu/drm/bridge/lvds-codec.c
··· 7 7 #include <linux/gpio/consumer.h> 8 8 #include <linux/module.h> 9 9 #include <linux/of.h> 10 - #include <linux/of_device.h> 11 10 #include <linux/of_graph.h> 12 11 #include <linux/platform_device.h> 13 12 #include <linux/regulator/consumer.h>
+1 -1
drivers/gpu/drm/bridge/nwl-dsi.c
··· 16 16 #include <linux/module.h> 17 17 #include <linux/mux/consumer.h> 18 18 #include <linux/of.h> 19 - #include <linux/of_platform.h> 20 19 #include <linux/phy/phy.h> 20 + #include <linux/platform_device.h> 21 21 #include <linux/regmap.h> 22 22 #include <linux/reset.h> 23 23 #include <linux/sys_soc.h>
-1
drivers/gpu/drm/bridge/parade-ps8622.c
··· 12 12 #include <linux/i2c.h> 13 13 #include <linux/module.h> 14 14 #include <linux/of.h> 15 - #include <linux/of_device.h> 16 15 #include <linux/pm.h> 17 16 #include <linux/regulator/consumer.h> 18 17
+2 -1
drivers/gpu/drm/bridge/samsung-dsim.c
··· 16 16 #include <linux/delay.h> 17 17 #include <linux/irq.h> 18 18 #include <linux/media-bus-format.h> 19 - #include <linux/of_device.h> 19 + #include <linux/of.h> 20 20 #include <linux/phy/phy.h> 21 + #include <linux/platform_device.h> 21 22 22 23 #include <video/mipi_display.h> 23 24
+2 -1
drivers/gpu/drm/bridge/simple-bridge.c
··· 8 8 9 9 #include <linux/gpio/consumer.h> 10 10 #include <linux/module.h> 11 - #include <linux/of_device.h> 11 + #include <linux/of.h> 12 12 #include <linux/of_graph.h> 13 + #include <linux/platform_device.h> 13 14 #include <linux/regulator/consumer.h> 14 15 15 16 #include <drm/drm_atomic_helper.h>
+1 -1
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
··· 14 14 #include <linux/irq.h> 15 15 #include <linux/module.h> 16 16 #include <linux/mutex.h> 17 - #include <linux/of_device.h> 17 + #include <linux/of.h> 18 18 #include <linux/pinctrl/consumer.h> 19 19 #include <linux/regmap.h> 20 20 #include <linux/dma-mapping.h>
+1 -1
drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
··· 13 13 #include <linux/debugfs.h> 14 14 #include <linux/iopoll.h> 15 15 #include <linux/module.h> 16 - #include <linux/of_device.h> 16 + #include <linux/platform_device.h> 17 17 #include <linux/pm_runtime.h> 18 18 #include <linux/reset.h> 19 19
+1 -1
drivers/gpu/drm/bridge/ti-sn65dsi83.c
··· 31 31 #include <linux/i2c.h> 32 32 #include <linux/media-bus-format.h> 33 33 #include <linux/module.h> 34 - #include <linux/of_device.h> 34 + #include <linux/of.h> 35 35 #include <linux/of_graph.h> 36 36 #include <linux/regmap.h> 37 37 #include <linux/regulator/consumer.h>
+40
drivers/gpu/drm/drm_debugfs.c
··· 39 39 #include <drm/drm_file.h> 40 40 #include <drm/drm_gem.h> 41 41 #include <drm/drm_managed.h> 42 + #include <drm/drm_gpuva_mgr.h> 42 43 43 44 #include "drm_crtc_internal.h" 44 45 #include "drm_internal.h" ··· 176 175 .release = single_release, 177 176 }; 178 177 178 + /** 179 + * drm_debugfs_gpuva_info - dump the given DRM GPU VA space 180 + * @m: pointer to the &seq_file to write 181 + * @mgr: the &drm_gpuva_manager representing the GPU VA space 182 + * 183 + * Dumps the GPU VA mappings of a given DRM GPU VA manager. 184 + * 185 + * For each DRM GPU VA space drivers should call this function from their 186 + * &drm_info_list's show callback. 187 + * 188 + * Returns: 0 on success, -ENODEV if the &mgr is not initialized 189 + */ 190 + int drm_debugfs_gpuva_info(struct seq_file *m, 191 + struct drm_gpuva_manager *mgr) 192 + { 193 + struct drm_gpuva *va, *kva = &mgr->kernel_alloc_node; 194 + 195 + if (!mgr->name) 196 + return -ENODEV; 197 + 198 + seq_printf(m, "DRM GPU VA space (%s) [0x%016llx;0x%016llx]\n", 199 + mgr->name, mgr->mm_start, mgr->mm_start + mgr->mm_range); 200 + seq_printf(m, "Kernel reserved node [0x%016llx;0x%016llx]\n", 201 + kva->va.addr, kva->va.addr + kva->va.range); 202 + seq_puts(m, "\n"); 203 + seq_puts(m, " VAs | start | range | end | object | object offset\n"); 204 + seq_puts(m, "-------------------------------------------------------------------------------------------------------------\n"); 205 + drm_gpuva_for_each_va(va, mgr) { 206 + if (unlikely(va == kva)) 207 + continue; 208 + 209 + seq_printf(m, " | 0x%016llx | 0x%016llx | 0x%016llx | 0x%016llx | 0x%016llx\n", 210 + va->va.addr, va->va.range, va->va.addr + va->va.range, 211 + (u64)(uintptr_t)va->gem.obj, va->gem.offset); 212 + } 213 + 214 + return 0; 215 + } 216 + EXPORT_SYMBOL(drm_debugfs_gpuva_info); 179 217 180 218 /** 181 219 * drm_debugfs_create_files - Initialize a given set of debugfs files for DRM
+4 -4
drivers/gpu/drm/drm_drv.c
··· 84 84 */ 85 85 86 86 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, 87 - unsigned int type) 87 + enum drm_minor_type type) 88 88 { 89 89 switch (type) { 90 90 case DRM_MINOR_PRIMARY: ··· 116 116 } 117 117 } 118 118 119 - static int drm_minor_alloc(struct drm_device *dev, unsigned int type) 119 + static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type) 120 120 { 121 121 struct drm_minor *minor; 122 122 unsigned long flags; ··· 160 160 return 0; 161 161 } 162 162 163 - static int drm_minor_register(struct drm_device *dev, unsigned int type) 163 + static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type) 164 164 { 165 165 struct drm_minor *minor; 166 166 unsigned long flags; ··· 203 203 return ret; 204 204 } 205 205 206 - static void drm_minor_unregister(struct drm_device *dev, unsigned int type) 206 + static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type) 207 207 { 208 208 struct drm_minor *minor; 209 209 unsigned long flags;
+3
drivers/gpu/drm/drm_gem.c
··· 164 164 if (!obj->resv) 165 165 obj->resv = &obj->_resv; 166 166 167 + if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA)) 168 + drm_gem_gpuva_init(obj); 169 + 167 170 drm_vma_node_reset(&obj->vma_node); 168 171 INIT_LIST_HEAD(&obj->lru_node); 169 172 }
+1725
drivers/gpu/drm/drm_gpuva_mgr.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2022 Red Hat. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Danilo Krummrich <dakr@redhat.com> 25 + * 26 + */ 27 + 28 + #include <drm/drm_gpuva_mgr.h> 29 + 30 + #include <linux/interval_tree_generic.h> 31 + #include <linux/mm.h> 32 + 33 + /** 34 + * DOC: Overview 35 + * 36 + * The DRM GPU VA Manager, represented by struct drm_gpuva_manager keeps track 37 + * of a GPU's virtual address (VA) space and manages the corresponding virtual 38 + * mappings represented by &drm_gpuva objects. It also keeps track of the 39 + * mapping's backing &drm_gem_object buffers. 40 + * 41 + * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing 42 + * all existent GPU VA mappings using this &drm_gem_object as backing buffer. 43 + * 44 + * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also 45 + * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'. 46 + * 47 + * The GPU VA manager internally uses a rb-tree to manage the 48 + * &drm_gpuva mappings within a GPU's virtual address space. 49 + * 50 + * The &drm_gpuva_manager contains a special &drm_gpuva representing the 51 + * portion of VA space reserved by the kernel. This node is initialized together 52 + * with the GPU VA manager instance and removed when the GPU VA manager is 53 + * destroyed. 54 + * 55 + * In a typical application drivers would embed struct drm_gpuva_manager and 56 + * struct drm_gpuva within their own driver specific structures, there won't be 57 + * any memory allocations of its own nor memory allocations of &drm_gpuva 58 + * entries. 59 + * 60 + * The data structures needed to store &drm_gpuvas within the &drm_gpuva_manager 61 + * are contained within struct drm_gpuva already. Hence, for inserting 62 + * &drm_gpuva entries from within dma-fence signalling critical sections it is 63 + * enough to pre-allocate the &drm_gpuva structures. 64 + */ 65 + 66 + /** 67 + * DOC: Split and Merge 68 + * 69 + * Besides its capability to manage and represent a GPU VA space, the 70 + * &drm_gpuva_manager also provides functions to let the &drm_gpuva_manager 71 + * calculate a sequence of operations to satisfy a given map or unmap request. 72 + * 73 + * Therefore the DRM GPU VA manager provides an algorithm implementing splitting 74 + * and merging of existent GPU VA mappings with the ones that are requested to 75 + * be mapped or unmapped. This feature is required by the Vulkan API to 76 + * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this 77 + * as VM BIND. 78 + * 79 + * Drivers can call drm_gpuva_sm_map() to receive a sequence of callbacks 80 + * containing map, unmap and remap operations for a given newly requested 81 + * mapping. The sequence of callbacks represents the set of operations to 82 + * execute in order to integrate the new mapping cleanly into the current state 83 + * of the GPU VA space. 84 + * 85 + * Depending on how the new GPU VA mapping intersects with the existent mappings 86 + * of the GPU VA space the &drm_gpuva_fn_ops callbacks contain an arbitrary 87 + * amount of unmap operations, a maximum of two remap operations and a single 88 + * map operation. The caller might receive no callback at all if no operation is 89 + * required, e.g. if the requested mapping already exists in the exact same way. 90 + * 91 + * The single map operation represents the original map operation requested by 92 + * the caller. 93 + * 94 + * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the 95 + * &drm_gpuva to unmap is physically contiguous with the original mapping 96 + * request. Optionally, if 'keep' is set, drivers may keep the actual page table 97 + * entries for this &drm_gpuva, adding the missing page table entries only and 98 + * update the &drm_gpuva_manager's view of things accordingly. 99 + * 100 + * Drivers may do the same optimization, namely delta page table updates, also 101 + * for remap operations. This is possible since &drm_gpuva_op_remap consists of 102 + * one unmap operation and one or two map operations, such that drivers can 103 + * derive the page table update delta accordingly. 104 + * 105 + * Note that there can't be more than two existent mappings to split up, one at 106 + * the beginning and one at the end of the new mapping, hence there is a 107 + * maximum of two remap operations. 108 + * 109 + * Analogous to drm_gpuva_sm_map() drm_gpuva_sm_unmap() uses &drm_gpuva_fn_ops 110 + * to call back into the driver in order to unmap a range of GPU VA space. The 111 + * logic behind this function is way simpler though: For all existent mappings 112 + * enclosed by the given range unmap operations are created. For mappings which 113 + * are only partically located within the given range, remap operations are 114 + * created such that those mappings are split up and re-mapped partically. 115 + * 116 + * As an alternative to drm_gpuva_sm_map() and drm_gpuva_sm_unmap(), 117 + * drm_gpuva_sm_map_ops_create() and drm_gpuva_sm_unmap_ops_create() can be used 118 + * to directly obtain an instance of struct drm_gpuva_ops containing a list of 119 + * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list 120 + * contains the &drm_gpuva_ops analogous to the callbacks one would receive when 121 + * calling drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). While this way requires 122 + * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to 123 + * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory 124 + * allocations are possible (e.g. to allocate GPU page tables) and once in the 125 + * dma-fence signalling critical path. 126 + * 127 + * To update the &drm_gpuva_manager's view of the GPU VA space 128 + * drm_gpuva_insert() and drm_gpuva_remove() may be used. These functions can 129 + * safely be used from &drm_gpuva_fn_ops callbacks originating from 130 + * drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). However, it might be more 131 + * convenient to use the provided helper functions drm_gpuva_map(), 132 + * drm_gpuva_remap() and drm_gpuva_unmap() instead. 133 + * 134 + * The following diagram depicts the basic relationships of existent GPU VA 135 + * mappings, a newly requested mapping and the resulting mappings as implemented 136 + * by drm_gpuva_sm_map() - it doesn't cover any arbitrary combinations of these. 137 + * 138 + * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs 139 + * could be kept. 140 + * 141 + * :: 142 + * 143 + * 0 a 1 144 + * old: |-----------| (bo_offset=n) 145 + * 146 + * 0 a 1 147 + * req: |-----------| (bo_offset=n) 148 + * 149 + * 0 a 1 150 + * new: |-----------| (bo_offset=n) 151 + * 152 + * 153 + * 2) Requested mapping is identical, except for the BO offset, hence replace 154 + * the mapping. 155 + * 156 + * :: 157 + * 158 + * 0 a 1 159 + * old: |-----------| (bo_offset=n) 160 + * 161 + * 0 a 1 162 + * req: |-----------| (bo_offset=m) 163 + * 164 + * 0 a 1 165 + * new: |-----------| (bo_offset=m) 166 + * 167 + * 168 + * 3) Requested mapping is identical, except for the backing BO, hence replace 169 + * the mapping. 170 + * 171 + * :: 172 + * 173 + * 0 a 1 174 + * old: |-----------| (bo_offset=n) 175 + * 176 + * 0 b 1 177 + * req: |-----------| (bo_offset=n) 178 + * 179 + * 0 b 1 180 + * new: |-----------| (bo_offset=n) 181 + * 182 + * 183 + * 4) Existent mapping is a left aligned subset of the requested one, hence 184 + * replace the existent one. 185 + * 186 + * :: 187 + * 188 + * 0 a 1 189 + * old: |-----| (bo_offset=n) 190 + * 191 + * 0 a 2 192 + * req: |-----------| (bo_offset=n) 193 + * 194 + * 0 a 2 195 + * new: |-----------| (bo_offset=n) 196 + * 197 + * .. note:: 198 + * We expect to see the same result for a request with a different BO 199 + * and/or non-contiguous BO offset. 200 + * 201 + * 202 + * 5) Requested mapping's range is a left aligned subset of the existent one, 203 + * but backed by a different BO. Hence, map the requested mapping and split 204 + * the existent one adjusting its BO offset. 205 + * 206 + * :: 207 + * 208 + * 0 a 2 209 + * old: |-----------| (bo_offset=n) 210 + * 211 + * 0 b 1 212 + * req: |-----| (bo_offset=n) 213 + * 214 + * 0 b 1 a' 2 215 + * new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1) 216 + * 217 + * .. note:: 218 + * We expect to see the same result for a request with a different BO 219 + * and/or non-contiguous BO offset. 220 + * 221 + * 222 + * 6) Existent mapping is a superset of the requested mapping. Split it up, but 223 + * indicate that the backing PTEs could be kept. 224 + * 225 + * :: 226 + * 227 + * 0 a 2 228 + * old: |-----------| (bo_offset=n) 229 + * 230 + * 0 a 1 231 + * req: |-----| (bo_offset=n) 232 + * 233 + * 0 a 1 a' 2 234 + * new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1) 235 + * 236 + * 237 + * 7) Requested mapping's range is a right aligned subset of the existent one, 238 + * but backed by a different BO. Hence, map the requested mapping and split 239 + * the existent one, without adjusting the BO offset. 240 + * 241 + * :: 242 + * 243 + * 0 a 2 244 + * old: |-----------| (bo_offset=n) 245 + * 246 + * 1 b 2 247 + * req: |-----| (bo_offset=m) 248 + * 249 + * 0 a 1 b 2 250 + * new: |-----|-----| (a.bo_offset=n,b.bo_offset=m) 251 + * 252 + * 253 + * 8) Existent mapping is a superset of the requested mapping. Split it up, but 254 + * indicate that the backing PTEs could be kept. 255 + * 256 + * :: 257 + * 258 + * 0 a 2 259 + * old: |-----------| (bo_offset=n) 260 + * 261 + * 1 a 2 262 + * req: |-----| (bo_offset=n+1) 263 + * 264 + * 0 a' 1 a 2 265 + * new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1) 266 + * 267 + * 268 + * 9) Existent mapping is overlapped at the end by the requested mapping backed 269 + * by a different BO. Hence, map the requested mapping and split up the 270 + * existent one, without adjusting the BO offset. 271 + * 272 + * :: 273 + * 274 + * 0 a 2 275 + * old: |-----------| (bo_offset=n) 276 + * 277 + * 1 b 3 278 + * req: |-----------| (bo_offset=m) 279 + * 280 + * 0 a 1 b 3 281 + * new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m) 282 + * 283 + * 284 + * 10) Existent mapping is overlapped by the requested mapping, both having the 285 + * same backing BO with a contiguous offset. Indicate the backing PTEs of 286 + * the old mapping could be kept. 287 + * 288 + * :: 289 + * 290 + * 0 a 2 291 + * old: |-----------| (bo_offset=n) 292 + * 293 + * 1 a 3 294 + * req: |-----------| (bo_offset=n+1) 295 + * 296 + * 0 a' 1 a 3 297 + * new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1) 298 + * 299 + * 300 + * 11) Requested mapping's range is a centered subset of the existent one 301 + * having a different backing BO. Hence, map the requested mapping and split 302 + * up the existent one in two mappings, adjusting the BO offset of the right 303 + * one accordingly. 304 + * 305 + * :: 306 + * 307 + * 0 a 3 308 + * old: |-----------------| (bo_offset=n) 309 + * 310 + * 1 b 2 311 + * req: |-----| (bo_offset=m) 312 + * 313 + * 0 a 1 b 2 a' 3 314 + * new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2) 315 + * 316 + * 317 + * 12) Requested mapping is a contiguous subset of the existent one. Split it 318 + * up, but indicate that the backing PTEs could be kept. 319 + * 320 + * :: 321 + * 322 + * 0 a 3 323 + * old: |-----------------| (bo_offset=n) 324 + * 325 + * 1 a 2 326 + * req: |-----| (bo_offset=n+1) 327 + * 328 + * 0 a' 1 a 2 a'' 3 329 + * old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2) 330 + * 331 + * 332 + * 13) Existent mapping is a right aligned subset of the requested one, hence 333 + * replace the existent one. 334 + * 335 + * :: 336 + * 337 + * 1 a 2 338 + * old: |-----| (bo_offset=n+1) 339 + * 340 + * 0 a 2 341 + * req: |-----------| (bo_offset=n) 342 + * 343 + * 0 a 2 344 + * new: |-----------| (bo_offset=n) 345 + * 346 + * .. note:: 347 + * We expect to see the same result for a request with a different bo 348 + * and/or non-contiguous bo_offset. 349 + * 350 + * 351 + * 14) Existent mapping is a centered subset of the requested one, hence 352 + * replace the existent one. 353 + * 354 + * :: 355 + * 356 + * 1 a 2 357 + * old: |-----| (bo_offset=n+1) 358 + * 359 + * 0 a 3 360 + * req: |----------------| (bo_offset=n) 361 + * 362 + * 0 a 3 363 + * new: |----------------| (bo_offset=n) 364 + * 365 + * .. note:: 366 + * We expect to see the same result for a request with a different bo 367 + * and/or non-contiguous bo_offset. 368 + * 369 + * 370 + * 15) Existent mappings is overlapped at the beginning by the requested mapping 371 + * backed by a different BO. Hence, map the requested mapping and split up 372 + * the existent one, adjusting its BO offset accordingly. 373 + * 374 + * :: 375 + * 376 + * 1 a 3 377 + * old: |-----------| (bo_offset=n) 378 + * 379 + * 0 b 2 380 + * req: |-----------| (bo_offset=m) 381 + * 382 + * 0 b 2 a' 3 383 + * new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2) 384 + */ 385 + 386 + /** 387 + * DOC: Locking 388 + * 389 + * Generally, the GPU VA manager does not take care of locking itself, it is 390 + * the drivers responsibility to take care about locking. Drivers might want to 391 + * protect the following operations: inserting, removing and iterating 392 + * &drm_gpuva objects as well as generating all kinds of operations, such as 393 + * split / merge or prefetch. 394 + * 395 + * The GPU VA manager also does not take care of the locking of the backing 396 + * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to 397 + * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively 398 + * a driver specific external lock. For the latter see also 399 + * drm_gem_gpuva_set_lock(). 400 + * 401 + * However, the GPU VA manager contains lockdep checks to ensure callers of its 402 + * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is 403 + * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink(). 404 + */ 405 + 406 + /** 407 + * DOC: Examples 408 + * 409 + * This section gives two examples on how to let the DRM GPUVA Manager generate 410 + * &drm_gpuva_op in order to satisfy a given map or unmap request and how to 411 + * make use of them. 412 + * 413 + * The below code is strictly limited to illustrate the generic usage pattern. 414 + * To maintain simplicitly, it doesn't make use of any abstractions for common 415 + * code, different (asyncronous) stages with fence signalling critical paths, 416 + * any other helpers or error handling in terms of freeing memory and dropping 417 + * previously taken locks. 418 + * 419 + * 1) Obtain a list of &drm_gpuva_op to create a new mapping:: 420 + * 421 + * // Allocates a new &drm_gpuva. 422 + * struct drm_gpuva * driver_gpuva_alloc(void); 423 + * 424 + * // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva 425 + * // structure in individual driver structures and lock the dma-resv with 426 + * // drm_exec or similar helpers. 427 + * int driver_mapping_create(struct drm_gpuva_manager *mgr, 428 + * u64 addr, u64 range, 429 + * struct drm_gem_object *obj, u64 offset) 430 + * { 431 + * struct drm_gpuva_ops *ops; 432 + * struct drm_gpuva_op *op 433 + * 434 + * driver_lock_va_space(); 435 + * ops = drm_gpuva_sm_map_ops_create(mgr, addr, range, 436 + * obj, offset); 437 + * if (IS_ERR(ops)) 438 + * return PTR_ERR(ops); 439 + * 440 + * drm_gpuva_for_each_op(op, ops) { 441 + * struct drm_gpuva *va; 442 + * 443 + * switch (op->op) { 444 + * case DRM_GPUVA_OP_MAP: 445 + * va = driver_gpuva_alloc(); 446 + * if (!va) 447 + * ; // unwind previous VA space updates, 448 + * // free memory and unlock 449 + * 450 + * driver_vm_map(); 451 + * drm_gpuva_map(mgr, va, &op->map); 452 + * drm_gpuva_link(va); 453 + * 454 + * break; 455 + * case DRM_GPUVA_OP_REMAP: { 456 + * struct drm_gpuva *prev = NULL, *next = NULL; 457 + * 458 + * va = op->remap.unmap->va; 459 + * 460 + * if (op->remap.prev) { 461 + * prev = driver_gpuva_alloc(); 462 + * if (!prev) 463 + * ; // unwind previous VA space 464 + * // updates, free memory and 465 + * // unlock 466 + * } 467 + * 468 + * if (op->remap.next) { 469 + * next = driver_gpuva_alloc(); 470 + * if (!next) 471 + * ; // unwind previous VA space 472 + * // updates, free memory and 473 + * // unlock 474 + * } 475 + * 476 + * driver_vm_remap(); 477 + * drm_gpuva_remap(prev, next, &op->remap); 478 + * 479 + * drm_gpuva_unlink(va); 480 + * if (prev) 481 + * drm_gpuva_link(prev); 482 + * if (next) 483 + * drm_gpuva_link(next); 484 + * 485 + * break; 486 + * } 487 + * case DRM_GPUVA_OP_UNMAP: 488 + * va = op->unmap->va; 489 + * 490 + * driver_vm_unmap(); 491 + * drm_gpuva_unlink(va); 492 + * drm_gpuva_unmap(&op->unmap); 493 + * 494 + * break; 495 + * default: 496 + * break; 497 + * } 498 + * } 499 + * driver_unlock_va_space(); 500 + * 501 + * return 0; 502 + * } 503 + * 504 + * 2) Receive a callback for each &drm_gpuva_op to create a new mapping:: 505 + * 506 + * struct driver_context { 507 + * struct drm_gpuva_manager *mgr; 508 + * struct drm_gpuva *new_va; 509 + * struct drm_gpuva *prev_va; 510 + * struct drm_gpuva *next_va; 511 + * }; 512 + * 513 + * // ops to pass to drm_gpuva_manager_init() 514 + * static const struct drm_gpuva_fn_ops driver_gpuva_ops = { 515 + * .sm_step_map = driver_gpuva_map, 516 + * .sm_step_remap = driver_gpuva_remap, 517 + * .sm_step_unmap = driver_gpuva_unmap, 518 + * }; 519 + * 520 + * // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva 521 + * // structure in individual driver structures and lock the dma-resv with 522 + * // drm_exec or similar helpers. 523 + * int driver_mapping_create(struct drm_gpuva_manager *mgr, 524 + * u64 addr, u64 range, 525 + * struct drm_gem_object *obj, u64 offset) 526 + * { 527 + * struct driver_context ctx; 528 + * struct drm_gpuva_ops *ops; 529 + * struct drm_gpuva_op *op; 530 + * int ret = 0; 531 + * 532 + * ctx.mgr = mgr; 533 + * 534 + * ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL); 535 + * ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL); 536 + * ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL); 537 + * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) { 538 + * ret = -ENOMEM; 539 + * goto out; 540 + * } 541 + * 542 + * driver_lock_va_space(); 543 + * ret = drm_gpuva_sm_map(mgr, &ctx, addr, range, obj, offset); 544 + * driver_unlock_va_space(); 545 + * 546 + * out: 547 + * kfree(ctx.new_va); 548 + * kfree(ctx.prev_va); 549 + * kfree(ctx.next_va); 550 + * return ret; 551 + * } 552 + * 553 + * int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx) 554 + * { 555 + * struct driver_context *ctx = __ctx; 556 + * 557 + * drm_gpuva_map(ctx->mgr, ctx->new_va, &op->map); 558 + * 559 + * drm_gpuva_link(ctx->new_va); 560 + * 561 + * // prevent the new GPUVA from being freed in 562 + * // driver_mapping_create() 563 + * ctx->new_va = NULL; 564 + * 565 + * return 0; 566 + * } 567 + * 568 + * int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx) 569 + * { 570 + * struct driver_context *ctx = __ctx; 571 + * 572 + * drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap); 573 + * 574 + * drm_gpuva_unlink(op->remap.unmap->va); 575 + * kfree(op->remap.unmap->va); 576 + * 577 + * if (op->remap.prev) { 578 + * drm_gpuva_link(ctx->prev_va); 579 + * ctx->prev_va = NULL; 580 + * } 581 + * 582 + * if (op->remap.next) { 583 + * drm_gpuva_link(ctx->next_va); 584 + * ctx->next_va = NULL; 585 + * } 586 + * 587 + * return 0; 588 + * } 589 + * 590 + * int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx) 591 + * { 592 + * drm_gpuva_unlink(op->unmap.va); 593 + * drm_gpuva_unmap(&op->unmap); 594 + * kfree(op->unmap.va); 595 + * 596 + * return 0; 597 + * } 598 + */ 599 + 600 + #define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node) 601 + 602 + #define GPUVA_START(node) ((node)->va.addr) 603 + #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1) 604 + 605 + /* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain 606 + * about this. 607 + */ 608 + INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last, 609 + GPUVA_START, GPUVA_LAST, static __maybe_unused, 610 + drm_gpuva_it) 611 + 612 + static int __drm_gpuva_insert(struct drm_gpuva_manager *mgr, 613 + struct drm_gpuva *va); 614 + static void __drm_gpuva_remove(struct drm_gpuva *va); 615 + 616 + static bool 617 + drm_gpuva_check_overflow(u64 addr, u64 range) 618 + { 619 + u64 end; 620 + 621 + return WARN(check_add_overflow(addr, range, &end), 622 + "GPUVA address limited to %zu bytes.\n", sizeof(end)); 623 + } 624 + 625 + static bool 626 + drm_gpuva_in_mm_range(struct drm_gpuva_manager *mgr, u64 addr, u64 range) 627 + { 628 + u64 end = addr + range; 629 + u64 mm_start = mgr->mm_start; 630 + u64 mm_end = mm_start + mgr->mm_range; 631 + 632 + return addr >= mm_start && end <= mm_end; 633 + } 634 + 635 + static bool 636 + drm_gpuva_in_kernel_node(struct drm_gpuva_manager *mgr, u64 addr, u64 range) 637 + { 638 + u64 end = addr + range; 639 + u64 kstart = mgr->kernel_alloc_node.va.addr; 640 + u64 krange = mgr->kernel_alloc_node.va.range; 641 + u64 kend = kstart + krange; 642 + 643 + return krange && addr < kend && kstart < end; 644 + } 645 + 646 + static bool 647 + drm_gpuva_range_valid(struct drm_gpuva_manager *mgr, 648 + u64 addr, u64 range) 649 + { 650 + return !drm_gpuva_check_overflow(addr, range) && 651 + drm_gpuva_in_mm_range(mgr, addr, range) && 652 + !drm_gpuva_in_kernel_node(mgr, addr, range); 653 + } 654 + 655 + /** 656 + * drm_gpuva_manager_init() - initialize a &drm_gpuva_manager 657 + * @mgr: pointer to the &drm_gpuva_manager to initialize 658 + * @name: the name of the GPU VA space 659 + * @start_offset: the start offset of the GPU VA space 660 + * @range: the size of the GPU VA space 661 + * @reserve_offset: the start of the kernel reserved GPU VA area 662 + * @reserve_range: the size of the kernel reserved GPU VA area 663 + * @ops: &drm_gpuva_fn_ops called on &drm_gpuva_sm_map / &drm_gpuva_sm_unmap 664 + * 665 + * The &drm_gpuva_manager must be initialized with this function before use. 666 + * 667 + * Note that @mgr must be cleared to 0 before calling this function. The given 668 + * &name is expected to be managed by the surrounding driver structures. 669 + */ 670 + void 671 + drm_gpuva_manager_init(struct drm_gpuva_manager *mgr, 672 + const char *name, 673 + u64 start_offset, u64 range, 674 + u64 reserve_offset, u64 reserve_range, 675 + const struct drm_gpuva_fn_ops *ops) 676 + { 677 + mgr->rb.tree = RB_ROOT_CACHED; 678 + INIT_LIST_HEAD(&mgr->rb.list); 679 + 680 + drm_gpuva_check_overflow(start_offset, range); 681 + mgr->mm_start = start_offset; 682 + mgr->mm_range = range; 683 + 684 + mgr->name = name ? name : "unknown"; 685 + mgr->ops = ops; 686 + 687 + memset(&mgr->kernel_alloc_node, 0, sizeof(struct drm_gpuva)); 688 + 689 + if (reserve_range) { 690 + mgr->kernel_alloc_node.va.addr = reserve_offset; 691 + mgr->kernel_alloc_node.va.range = reserve_range; 692 + 693 + if (likely(!drm_gpuva_check_overflow(reserve_offset, 694 + reserve_range))) 695 + __drm_gpuva_insert(mgr, &mgr->kernel_alloc_node); 696 + } 697 + } 698 + EXPORT_SYMBOL_GPL(drm_gpuva_manager_init); 699 + 700 + /** 701 + * drm_gpuva_manager_destroy() - cleanup a &drm_gpuva_manager 702 + * @mgr: pointer to the &drm_gpuva_manager to clean up 703 + * 704 + * Note that it is a bug to call this function on a manager that still 705 + * holds GPU VA mappings. 706 + */ 707 + void 708 + drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr) 709 + { 710 + mgr->name = NULL; 711 + 712 + if (mgr->kernel_alloc_node.va.range) 713 + __drm_gpuva_remove(&mgr->kernel_alloc_node); 714 + 715 + WARN(!RB_EMPTY_ROOT(&mgr->rb.tree.rb_root), 716 + "GPUVA tree is not empty, potentially leaking memory."); 717 + } 718 + EXPORT_SYMBOL_GPL(drm_gpuva_manager_destroy); 719 + 720 + static int 721 + __drm_gpuva_insert(struct drm_gpuva_manager *mgr, 722 + struct drm_gpuva *va) 723 + { 724 + struct rb_node *node; 725 + struct list_head *head; 726 + 727 + if (drm_gpuva_it_iter_first(&mgr->rb.tree, 728 + GPUVA_START(va), 729 + GPUVA_LAST(va))) 730 + return -EEXIST; 731 + 732 + va->mgr = mgr; 733 + 734 + drm_gpuva_it_insert(va, &mgr->rb.tree); 735 + 736 + node = rb_prev(&va->rb.node); 737 + if (node) 738 + head = &(to_drm_gpuva(node))->rb.entry; 739 + else 740 + head = &mgr->rb.list; 741 + 742 + list_add(&va->rb.entry, head); 743 + 744 + return 0; 745 + } 746 + 747 + /** 748 + * drm_gpuva_insert() - insert a &drm_gpuva 749 + * @mgr: the &drm_gpuva_manager to insert the &drm_gpuva in 750 + * @va: the &drm_gpuva to insert 751 + * 752 + * Insert a &drm_gpuva with a given address and range into a 753 + * &drm_gpuva_manager. 754 + * 755 + * It is safe to use this function using the safe versions of iterating the GPU 756 + * VA space, such as drm_gpuva_for_each_va_safe() and 757 + * drm_gpuva_for_each_va_range_safe(). 758 + * 759 + * Returns: 0 on success, negative error code on failure. 760 + */ 761 + int 762 + drm_gpuva_insert(struct drm_gpuva_manager *mgr, 763 + struct drm_gpuva *va) 764 + { 765 + u64 addr = va->va.addr; 766 + u64 range = va->va.range; 767 + 768 + if (unlikely(!drm_gpuva_range_valid(mgr, addr, range))) 769 + return -EINVAL; 770 + 771 + return __drm_gpuva_insert(mgr, va); 772 + } 773 + EXPORT_SYMBOL_GPL(drm_gpuva_insert); 774 + 775 + static void 776 + __drm_gpuva_remove(struct drm_gpuva *va) 777 + { 778 + drm_gpuva_it_remove(va, &va->mgr->rb.tree); 779 + list_del_init(&va->rb.entry); 780 + } 781 + 782 + /** 783 + * drm_gpuva_remove() - remove a &drm_gpuva 784 + * @va: the &drm_gpuva to remove 785 + * 786 + * This removes the given &va from the underlaying tree. 787 + * 788 + * It is safe to use this function using the safe versions of iterating the GPU 789 + * VA space, such as drm_gpuva_for_each_va_safe() and 790 + * drm_gpuva_for_each_va_range_safe(). 791 + */ 792 + void 793 + drm_gpuva_remove(struct drm_gpuva *va) 794 + { 795 + struct drm_gpuva_manager *mgr = va->mgr; 796 + 797 + if (unlikely(va == &mgr->kernel_alloc_node)) { 798 + WARN(1, "Can't destroy kernel reserved node.\n"); 799 + return; 800 + } 801 + 802 + __drm_gpuva_remove(va); 803 + } 804 + EXPORT_SYMBOL_GPL(drm_gpuva_remove); 805 + 806 + /** 807 + * drm_gpuva_link() - link a &drm_gpuva 808 + * @va: the &drm_gpuva to link 809 + * 810 + * This adds the given &va to the GPU VA list of the &drm_gem_object it is 811 + * associated with. 812 + * 813 + * This function expects the caller to protect the GEM's GPUVA list against 814 + * concurrent access using the GEMs dma_resv lock. 815 + */ 816 + void 817 + drm_gpuva_link(struct drm_gpuva *va) 818 + { 819 + struct drm_gem_object *obj = va->gem.obj; 820 + 821 + if (unlikely(!obj)) 822 + return; 823 + 824 + drm_gem_gpuva_assert_lock_held(obj); 825 + 826 + list_add_tail(&va->gem.entry, &obj->gpuva.list); 827 + } 828 + EXPORT_SYMBOL_GPL(drm_gpuva_link); 829 + 830 + /** 831 + * drm_gpuva_unlink() - unlink a &drm_gpuva 832 + * @va: the &drm_gpuva to unlink 833 + * 834 + * This removes the given &va from the GPU VA list of the &drm_gem_object it is 835 + * associated with. 836 + * 837 + * This function expects the caller to protect the GEM's GPUVA list against 838 + * concurrent access using the GEMs dma_resv lock. 839 + */ 840 + void 841 + drm_gpuva_unlink(struct drm_gpuva *va) 842 + { 843 + struct drm_gem_object *obj = va->gem.obj; 844 + 845 + if (unlikely(!obj)) 846 + return; 847 + 848 + drm_gem_gpuva_assert_lock_held(obj); 849 + 850 + list_del_init(&va->gem.entry); 851 + } 852 + EXPORT_SYMBOL_GPL(drm_gpuva_unlink); 853 + 854 + /** 855 + * drm_gpuva_find_first() - find the first &drm_gpuva in the given range 856 + * @mgr: the &drm_gpuva_manager to search in 857 + * @addr: the &drm_gpuvas address 858 + * @range: the &drm_gpuvas range 859 + * 860 + * Returns: the first &drm_gpuva within the given range 861 + */ 862 + struct drm_gpuva * 863 + drm_gpuva_find_first(struct drm_gpuva_manager *mgr, 864 + u64 addr, u64 range) 865 + { 866 + u64 last = addr + range - 1; 867 + 868 + return drm_gpuva_it_iter_first(&mgr->rb.tree, addr, last); 869 + } 870 + EXPORT_SYMBOL_GPL(drm_gpuva_find_first); 871 + 872 + /** 873 + * drm_gpuva_find() - find a &drm_gpuva 874 + * @mgr: the &drm_gpuva_manager to search in 875 + * @addr: the &drm_gpuvas address 876 + * @range: the &drm_gpuvas range 877 + * 878 + * Returns: the &drm_gpuva at a given &addr and with a given &range 879 + */ 880 + struct drm_gpuva * 881 + drm_gpuva_find(struct drm_gpuva_manager *mgr, 882 + u64 addr, u64 range) 883 + { 884 + struct drm_gpuva *va; 885 + 886 + va = drm_gpuva_find_first(mgr, addr, range); 887 + if (!va) 888 + goto out; 889 + 890 + if (va->va.addr != addr || 891 + va->va.range != range) 892 + goto out; 893 + 894 + return va; 895 + 896 + out: 897 + return NULL; 898 + } 899 + EXPORT_SYMBOL_GPL(drm_gpuva_find); 900 + 901 + /** 902 + * drm_gpuva_find_prev() - find the &drm_gpuva before the given address 903 + * @mgr: the &drm_gpuva_manager to search in 904 + * @start: the given GPU VA's start address 905 + * 906 + * Find the adjacent &drm_gpuva before the GPU VA with given &start address. 907 + * 908 + * Note that if there is any free space between the GPU VA mappings no mapping 909 + * is returned. 910 + * 911 + * Returns: a pointer to the found &drm_gpuva or NULL if none was found 912 + */ 913 + struct drm_gpuva * 914 + drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start) 915 + { 916 + if (!drm_gpuva_range_valid(mgr, start - 1, 1)) 917 + return NULL; 918 + 919 + return drm_gpuva_it_iter_first(&mgr->rb.tree, start - 1, start); 920 + } 921 + EXPORT_SYMBOL_GPL(drm_gpuva_find_prev); 922 + 923 + /** 924 + * drm_gpuva_find_next() - find the &drm_gpuva after the given address 925 + * @mgr: the &drm_gpuva_manager to search in 926 + * @end: the given GPU VA's end address 927 + * 928 + * Find the adjacent &drm_gpuva after the GPU VA with given &end address. 929 + * 930 + * Note that if there is any free space between the GPU VA mappings no mapping 931 + * is returned. 932 + * 933 + * Returns: a pointer to the found &drm_gpuva or NULL if none was found 934 + */ 935 + struct drm_gpuva * 936 + drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end) 937 + { 938 + if (!drm_gpuva_range_valid(mgr, end, 1)) 939 + return NULL; 940 + 941 + return drm_gpuva_it_iter_first(&mgr->rb.tree, end, end + 1); 942 + } 943 + EXPORT_SYMBOL_GPL(drm_gpuva_find_next); 944 + 945 + /** 946 + * drm_gpuva_interval_empty() - indicate whether a given interval of the VA space 947 + * is empty 948 + * @mgr: the &drm_gpuva_manager to check the range for 949 + * @addr: the start address of the range 950 + * @range: the range of the interval 951 + * 952 + * Returns: true if the interval is empty, false otherwise 953 + */ 954 + bool 955 + drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range) 956 + { 957 + return !drm_gpuva_find_first(mgr, addr, range); 958 + } 959 + EXPORT_SYMBOL_GPL(drm_gpuva_interval_empty); 960 + 961 + /** 962 + * drm_gpuva_map() - helper to insert a &drm_gpuva according to a 963 + * &drm_gpuva_op_map 964 + * @mgr: the &drm_gpuva_manager 965 + * @va: the &drm_gpuva to insert 966 + * @op: the &drm_gpuva_op_map to initialize @va with 967 + * 968 + * Initializes the @va from the @op and inserts it into the given @mgr. 969 + */ 970 + void 971 + drm_gpuva_map(struct drm_gpuva_manager *mgr, 972 + struct drm_gpuva *va, 973 + struct drm_gpuva_op_map *op) 974 + { 975 + drm_gpuva_init_from_op(va, op); 976 + drm_gpuva_insert(mgr, va); 977 + } 978 + EXPORT_SYMBOL_GPL(drm_gpuva_map); 979 + 980 + /** 981 + * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a 982 + * &drm_gpuva_op_remap 983 + * @prev: the &drm_gpuva to remap when keeping the start of a mapping 984 + * @next: the &drm_gpuva to remap when keeping the end of a mapping 985 + * @op: the &drm_gpuva_op_remap to initialize @prev and @next with 986 + * 987 + * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or 988 + * @next. 989 + */ 990 + void 991 + drm_gpuva_remap(struct drm_gpuva *prev, 992 + struct drm_gpuva *next, 993 + struct drm_gpuva_op_remap *op) 994 + { 995 + struct drm_gpuva *curr = op->unmap->va; 996 + struct drm_gpuva_manager *mgr = curr->mgr; 997 + 998 + drm_gpuva_remove(curr); 999 + 1000 + if (op->prev) { 1001 + drm_gpuva_init_from_op(prev, op->prev); 1002 + drm_gpuva_insert(mgr, prev); 1003 + } 1004 + 1005 + if (op->next) { 1006 + drm_gpuva_init_from_op(next, op->next); 1007 + drm_gpuva_insert(mgr, next); 1008 + } 1009 + } 1010 + EXPORT_SYMBOL_GPL(drm_gpuva_remap); 1011 + 1012 + /** 1013 + * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a 1014 + * &drm_gpuva_op_unmap 1015 + * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove 1016 + * 1017 + * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap. 1018 + */ 1019 + void 1020 + drm_gpuva_unmap(struct drm_gpuva_op_unmap *op) 1021 + { 1022 + drm_gpuva_remove(op->va); 1023 + } 1024 + EXPORT_SYMBOL_GPL(drm_gpuva_unmap); 1025 + 1026 + static int 1027 + op_map_cb(const struct drm_gpuva_fn_ops *fn, void *priv, 1028 + u64 addr, u64 range, 1029 + struct drm_gem_object *obj, u64 offset) 1030 + { 1031 + struct drm_gpuva_op op = {}; 1032 + 1033 + op.op = DRM_GPUVA_OP_MAP; 1034 + op.map.va.addr = addr; 1035 + op.map.va.range = range; 1036 + op.map.gem.obj = obj; 1037 + op.map.gem.offset = offset; 1038 + 1039 + return fn->sm_step_map(&op, priv); 1040 + } 1041 + 1042 + static int 1043 + op_remap_cb(const struct drm_gpuva_fn_ops *fn, void *priv, 1044 + struct drm_gpuva_op_map *prev, 1045 + struct drm_gpuva_op_map *next, 1046 + struct drm_gpuva_op_unmap *unmap) 1047 + { 1048 + struct drm_gpuva_op op = {}; 1049 + struct drm_gpuva_op_remap *r; 1050 + 1051 + op.op = DRM_GPUVA_OP_REMAP; 1052 + r = &op.remap; 1053 + r->prev = prev; 1054 + r->next = next; 1055 + r->unmap = unmap; 1056 + 1057 + return fn->sm_step_remap(&op, priv); 1058 + } 1059 + 1060 + static int 1061 + op_unmap_cb(const struct drm_gpuva_fn_ops *fn, void *priv, 1062 + struct drm_gpuva *va, bool merge) 1063 + { 1064 + struct drm_gpuva_op op = {}; 1065 + 1066 + op.op = DRM_GPUVA_OP_UNMAP; 1067 + op.unmap.va = va; 1068 + op.unmap.keep = merge; 1069 + 1070 + return fn->sm_step_unmap(&op, priv); 1071 + } 1072 + 1073 + static int 1074 + __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, 1075 + const struct drm_gpuva_fn_ops *ops, void *priv, 1076 + u64 req_addr, u64 req_range, 1077 + struct drm_gem_object *req_obj, u64 req_offset) 1078 + { 1079 + struct drm_gpuva *va, *next, *prev = NULL; 1080 + u64 req_end = req_addr + req_range; 1081 + int ret; 1082 + 1083 + if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range))) 1084 + return -EINVAL; 1085 + 1086 + drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) { 1087 + struct drm_gem_object *obj = va->gem.obj; 1088 + u64 offset = va->gem.offset; 1089 + u64 addr = va->va.addr; 1090 + u64 range = va->va.range; 1091 + u64 end = addr + range; 1092 + bool merge = !!va->gem.obj; 1093 + 1094 + if (addr == req_addr) { 1095 + merge &= obj == req_obj && 1096 + offset == req_offset; 1097 + 1098 + if (end == req_end) { 1099 + ret = op_unmap_cb(ops, priv, va, merge); 1100 + if (ret) 1101 + return ret; 1102 + break; 1103 + } 1104 + 1105 + if (end < req_end) { 1106 + ret = op_unmap_cb(ops, priv, va, merge); 1107 + if (ret) 1108 + return ret; 1109 + goto next; 1110 + } 1111 + 1112 + if (end > req_end) { 1113 + struct drm_gpuva_op_map n = { 1114 + .va.addr = req_end, 1115 + .va.range = range - req_range, 1116 + .gem.obj = obj, 1117 + .gem.offset = offset + req_range, 1118 + }; 1119 + struct drm_gpuva_op_unmap u = { 1120 + .va = va, 1121 + .keep = merge, 1122 + }; 1123 + 1124 + ret = op_remap_cb(ops, priv, NULL, &n, &u); 1125 + if (ret) 1126 + return ret; 1127 + break; 1128 + } 1129 + } else if (addr < req_addr) { 1130 + u64 ls_range = req_addr - addr; 1131 + struct drm_gpuva_op_map p = { 1132 + .va.addr = addr, 1133 + .va.range = ls_range, 1134 + .gem.obj = obj, 1135 + .gem.offset = offset, 1136 + }; 1137 + struct drm_gpuva_op_unmap u = { .va = va }; 1138 + 1139 + merge &= obj == req_obj && 1140 + offset + ls_range == req_offset; 1141 + u.keep = merge; 1142 + 1143 + if (end == req_end) { 1144 + ret = op_remap_cb(ops, priv, &p, NULL, &u); 1145 + if (ret) 1146 + return ret; 1147 + break; 1148 + } 1149 + 1150 + if (end < req_end) { 1151 + ret = op_remap_cb(ops, priv, &p, NULL, &u); 1152 + if (ret) 1153 + return ret; 1154 + goto next; 1155 + } 1156 + 1157 + if (end > req_end) { 1158 + struct drm_gpuva_op_map n = { 1159 + .va.addr = req_end, 1160 + .va.range = end - req_end, 1161 + .gem.obj = obj, 1162 + .gem.offset = offset + ls_range + 1163 + req_range, 1164 + }; 1165 + 1166 + ret = op_remap_cb(ops, priv, &p, &n, &u); 1167 + if (ret) 1168 + return ret; 1169 + break; 1170 + } 1171 + } else if (addr > req_addr) { 1172 + merge &= obj == req_obj && 1173 + offset == req_offset + 1174 + (addr - req_addr); 1175 + 1176 + if (end == req_end) { 1177 + ret = op_unmap_cb(ops, priv, va, merge); 1178 + if (ret) 1179 + return ret; 1180 + break; 1181 + } 1182 + 1183 + if (end < req_end) { 1184 + ret = op_unmap_cb(ops, priv, va, merge); 1185 + if (ret) 1186 + return ret; 1187 + goto next; 1188 + } 1189 + 1190 + if (end > req_end) { 1191 + struct drm_gpuva_op_map n = { 1192 + .va.addr = req_end, 1193 + .va.range = end - req_end, 1194 + .gem.obj = obj, 1195 + .gem.offset = offset + req_end - addr, 1196 + }; 1197 + struct drm_gpuva_op_unmap u = { 1198 + .va = va, 1199 + .keep = merge, 1200 + }; 1201 + 1202 + ret = op_remap_cb(ops, priv, NULL, &n, &u); 1203 + if (ret) 1204 + return ret; 1205 + break; 1206 + } 1207 + } 1208 + next: 1209 + prev = va; 1210 + } 1211 + 1212 + return op_map_cb(ops, priv, 1213 + req_addr, req_range, 1214 + req_obj, req_offset); 1215 + } 1216 + 1217 + static int 1218 + __drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, 1219 + const struct drm_gpuva_fn_ops *ops, void *priv, 1220 + u64 req_addr, u64 req_range) 1221 + { 1222 + struct drm_gpuva *va, *next; 1223 + u64 req_end = req_addr + req_range; 1224 + int ret; 1225 + 1226 + if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range))) 1227 + return -EINVAL; 1228 + 1229 + drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) { 1230 + struct drm_gpuva_op_map prev = {}, next = {}; 1231 + bool prev_split = false, next_split = false; 1232 + struct drm_gem_object *obj = va->gem.obj; 1233 + u64 offset = va->gem.offset; 1234 + u64 addr = va->va.addr; 1235 + u64 range = va->va.range; 1236 + u64 end = addr + range; 1237 + 1238 + if (addr < req_addr) { 1239 + prev.va.addr = addr; 1240 + prev.va.range = req_addr - addr; 1241 + prev.gem.obj = obj; 1242 + prev.gem.offset = offset; 1243 + 1244 + prev_split = true; 1245 + } 1246 + 1247 + if (end > req_end) { 1248 + next.va.addr = req_end; 1249 + next.va.range = end - req_end; 1250 + next.gem.obj = obj; 1251 + next.gem.offset = offset + (req_end - addr); 1252 + 1253 + next_split = true; 1254 + } 1255 + 1256 + if (prev_split || next_split) { 1257 + struct drm_gpuva_op_unmap unmap = { .va = va }; 1258 + 1259 + ret = op_remap_cb(ops, priv, 1260 + prev_split ? &prev : NULL, 1261 + next_split ? &next : NULL, 1262 + &unmap); 1263 + if (ret) 1264 + return ret; 1265 + } else { 1266 + ret = op_unmap_cb(ops, priv, va, false); 1267 + if (ret) 1268 + return ret; 1269 + } 1270 + } 1271 + 1272 + return 0; 1273 + } 1274 + 1275 + /** 1276 + * drm_gpuva_sm_map() - creates the &drm_gpuva_op split/merge steps 1277 + * @mgr: the &drm_gpuva_manager representing the GPU VA space 1278 + * @req_addr: the start address of the new mapping 1279 + * @req_range: the range of the new mapping 1280 + * @req_obj: the &drm_gem_object to map 1281 + * @req_offset: the offset within the &drm_gem_object 1282 + * @priv: pointer to a driver private data structure 1283 + * 1284 + * This function iterates the given range of the GPU VA space. It utilizes the 1285 + * &drm_gpuva_fn_ops to call back into the driver providing the split and merge 1286 + * steps. 1287 + * 1288 + * Drivers may use these callbacks to update the GPU VA space right away within 1289 + * the callback. In case the driver decides to copy and store the operations for 1290 + * later processing neither this function nor &drm_gpuva_sm_unmap is allowed to 1291 + * be called before the &drm_gpuva_manager's view of the GPU VA space was 1292 + * updated with the previous set of operations. To update the 1293 + * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(), 1294 + * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be 1295 + * used. 1296 + * 1297 + * A sequence of callbacks can contain map, unmap and remap operations, but 1298 + * the sequence of callbacks might also be empty if no operation is required, 1299 + * e.g. if the requested mapping already exists in the exact same way. 1300 + * 1301 + * There can be an arbitrary amount of unmap operations, a maximum of two remap 1302 + * operations and a single map operation. The latter one represents the original 1303 + * map operation requested by the caller. 1304 + * 1305 + * Returns: 0 on success or a negative error code 1306 + */ 1307 + int 1308 + drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv, 1309 + u64 req_addr, u64 req_range, 1310 + struct drm_gem_object *req_obj, u64 req_offset) 1311 + { 1312 + const struct drm_gpuva_fn_ops *ops = mgr->ops; 1313 + 1314 + if (unlikely(!(ops && ops->sm_step_map && 1315 + ops->sm_step_remap && 1316 + ops->sm_step_unmap))) 1317 + return -EINVAL; 1318 + 1319 + return __drm_gpuva_sm_map(mgr, ops, priv, 1320 + req_addr, req_range, 1321 + req_obj, req_offset); 1322 + } 1323 + EXPORT_SYMBOL_GPL(drm_gpuva_sm_map); 1324 + 1325 + /** 1326 + * drm_gpuva_sm_unmap() - creates the &drm_gpuva_ops to split on unmap 1327 + * @mgr: the &drm_gpuva_manager representing the GPU VA space 1328 + * @priv: pointer to a driver private data structure 1329 + * @req_addr: the start address of the range to unmap 1330 + * @req_range: the range of the mappings to unmap 1331 + * 1332 + * This function iterates the given range of the GPU VA space. It utilizes the 1333 + * &drm_gpuva_fn_ops to call back into the driver providing the operations to 1334 + * unmap and, if required, split existent mappings. 1335 + * 1336 + * Drivers may use these callbacks to update the GPU VA space right away within 1337 + * the callback. In case the driver decides to copy and store the operations for 1338 + * later processing neither this function nor &drm_gpuva_sm_map is allowed to be 1339 + * called before the &drm_gpuva_manager's view of the GPU VA space was updated 1340 + * with the previous set of operations. To update the &drm_gpuva_manager's view 1341 + * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or 1342 + * drm_gpuva_destroy_unlocked() should be used. 1343 + * 1344 + * A sequence of callbacks can contain unmap and remap operations, depending on 1345 + * whether there are actual overlapping mappings to split. 1346 + * 1347 + * There can be an arbitrary amount of unmap operations and a maximum of two 1348 + * remap operations. 1349 + * 1350 + * Returns: 0 on success or a negative error code 1351 + */ 1352 + int 1353 + drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv, 1354 + u64 req_addr, u64 req_range) 1355 + { 1356 + const struct drm_gpuva_fn_ops *ops = mgr->ops; 1357 + 1358 + if (unlikely(!(ops && ops->sm_step_remap && 1359 + ops->sm_step_unmap))) 1360 + return -EINVAL; 1361 + 1362 + return __drm_gpuva_sm_unmap(mgr, ops, priv, 1363 + req_addr, req_range); 1364 + } 1365 + EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap); 1366 + 1367 + static struct drm_gpuva_op * 1368 + gpuva_op_alloc(struct drm_gpuva_manager *mgr) 1369 + { 1370 + const struct drm_gpuva_fn_ops *fn = mgr->ops; 1371 + struct drm_gpuva_op *op; 1372 + 1373 + if (fn && fn->op_alloc) 1374 + op = fn->op_alloc(); 1375 + else 1376 + op = kzalloc(sizeof(*op), GFP_KERNEL); 1377 + 1378 + if (unlikely(!op)) 1379 + return NULL; 1380 + 1381 + return op; 1382 + } 1383 + 1384 + static void 1385 + gpuva_op_free(struct drm_gpuva_manager *mgr, 1386 + struct drm_gpuva_op *op) 1387 + { 1388 + const struct drm_gpuva_fn_ops *fn = mgr->ops; 1389 + 1390 + if (fn && fn->op_free) 1391 + fn->op_free(op); 1392 + else 1393 + kfree(op); 1394 + } 1395 + 1396 + static int 1397 + drm_gpuva_sm_step(struct drm_gpuva_op *__op, 1398 + void *priv) 1399 + { 1400 + struct { 1401 + struct drm_gpuva_manager *mgr; 1402 + struct drm_gpuva_ops *ops; 1403 + } *args = priv; 1404 + struct drm_gpuva_manager *mgr = args->mgr; 1405 + struct drm_gpuva_ops *ops = args->ops; 1406 + struct drm_gpuva_op *op; 1407 + 1408 + op = gpuva_op_alloc(mgr); 1409 + if (unlikely(!op)) 1410 + goto err; 1411 + 1412 + memcpy(op, __op, sizeof(*op)); 1413 + 1414 + if (op->op == DRM_GPUVA_OP_REMAP) { 1415 + struct drm_gpuva_op_remap *__r = &__op->remap; 1416 + struct drm_gpuva_op_remap *r = &op->remap; 1417 + 1418 + r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap), 1419 + GFP_KERNEL); 1420 + if (unlikely(!r->unmap)) 1421 + goto err_free_op; 1422 + 1423 + if (__r->prev) { 1424 + r->prev = kmemdup(__r->prev, sizeof(*r->prev), 1425 + GFP_KERNEL); 1426 + if (unlikely(!r->prev)) 1427 + goto err_free_unmap; 1428 + } 1429 + 1430 + if (__r->next) { 1431 + r->next = kmemdup(__r->next, sizeof(*r->next), 1432 + GFP_KERNEL); 1433 + if (unlikely(!r->next)) 1434 + goto err_free_prev; 1435 + } 1436 + } 1437 + 1438 + list_add_tail(&op->entry, &ops->list); 1439 + 1440 + return 0; 1441 + 1442 + err_free_unmap: 1443 + kfree(op->remap.unmap); 1444 + err_free_prev: 1445 + kfree(op->remap.prev); 1446 + err_free_op: 1447 + gpuva_op_free(mgr, op); 1448 + err: 1449 + return -ENOMEM; 1450 + } 1451 + 1452 + static const struct drm_gpuva_fn_ops gpuva_list_ops = { 1453 + .sm_step_map = drm_gpuva_sm_step, 1454 + .sm_step_remap = drm_gpuva_sm_step, 1455 + .sm_step_unmap = drm_gpuva_sm_step, 1456 + }; 1457 + 1458 + /** 1459 + * drm_gpuva_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge 1460 + * @mgr: the &drm_gpuva_manager representing the GPU VA space 1461 + * @req_addr: the start address of the new mapping 1462 + * @req_range: the range of the new mapping 1463 + * @req_obj: the &drm_gem_object to map 1464 + * @req_offset: the offset within the &drm_gem_object 1465 + * 1466 + * This function creates a list of operations to perform splitting and merging 1467 + * of existent mapping(s) with the newly requested one. 1468 + * 1469 + * The list can be iterated with &drm_gpuva_for_each_op and must be processed 1470 + * in the given order. It can contain map, unmap and remap operations, but it 1471 + * also can be empty if no operation is required, e.g. if the requested mapping 1472 + * already exists is the exact same way. 1473 + * 1474 + * There can be an arbitrary amount of unmap operations, a maximum of two remap 1475 + * operations and a single map operation. The latter one represents the original 1476 + * map operation requested by the caller. 1477 + * 1478 + * Note that before calling this function again with another mapping request it 1479 + * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The 1480 + * previously obtained operations must be either processed or abandoned. To 1481 + * update the &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(), 1482 + * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be 1483 + * used. 1484 + * 1485 + * After the caller finished processing the returned &drm_gpuva_ops, they must 1486 + * be freed with &drm_gpuva_ops_free. 1487 + * 1488 + * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure 1489 + */ 1490 + struct drm_gpuva_ops * 1491 + drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr, 1492 + u64 req_addr, u64 req_range, 1493 + struct drm_gem_object *req_obj, u64 req_offset) 1494 + { 1495 + struct drm_gpuva_ops *ops; 1496 + struct { 1497 + struct drm_gpuva_manager *mgr; 1498 + struct drm_gpuva_ops *ops; 1499 + } args; 1500 + int ret; 1501 + 1502 + ops = kzalloc(sizeof(*ops), GFP_KERNEL); 1503 + if (unlikely(!ops)) 1504 + return ERR_PTR(-ENOMEM); 1505 + 1506 + INIT_LIST_HEAD(&ops->list); 1507 + 1508 + args.mgr = mgr; 1509 + args.ops = ops; 1510 + 1511 + ret = __drm_gpuva_sm_map(mgr, &gpuva_list_ops, &args, 1512 + req_addr, req_range, 1513 + req_obj, req_offset); 1514 + if (ret) 1515 + goto err_free_ops; 1516 + 1517 + return ops; 1518 + 1519 + err_free_ops: 1520 + drm_gpuva_ops_free(mgr, ops); 1521 + return ERR_PTR(ret); 1522 + } 1523 + EXPORT_SYMBOL_GPL(drm_gpuva_sm_map_ops_create); 1524 + 1525 + /** 1526 + * drm_gpuva_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on 1527 + * unmap 1528 + * @mgr: the &drm_gpuva_manager representing the GPU VA space 1529 + * @req_addr: the start address of the range to unmap 1530 + * @req_range: the range of the mappings to unmap 1531 + * 1532 + * This function creates a list of operations to perform unmapping and, if 1533 + * required, splitting of the mappings overlapping the unmap range. 1534 + * 1535 + * The list can be iterated with &drm_gpuva_for_each_op and must be processed 1536 + * in the given order. It can contain unmap and remap operations, depending on 1537 + * whether there are actual overlapping mappings to split. 1538 + * 1539 + * There can be an arbitrary amount of unmap operations and a maximum of two 1540 + * remap operations. 1541 + * 1542 + * Note that before calling this function again with another range to unmap it 1543 + * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The 1544 + * previously obtained operations must be processed or abandoned. To update the 1545 + * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(), 1546 + * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be 1547 + * used. 1548 + * 1549 + * After the caller finished processing the returned &drm_gpuva_ops, they must 1550 + * be freed with &drm_gpuva_ops_free. 1551 + * 1552 + * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure 1553 + */ 1554 + struct drm_gpuva_ops * 1555 + drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr, 1556 + u64 req_addr, u64 req_range) 1557 + { 1558 + struct drm_gpuva_ops *ops; 1559 + struct { 1560 + struct drm_gpuva_manager *mgr; 1561 + struct drm_gpuva_ops *ops; 1562 + } args; 1563 + int ret; 1564 + 1565 + ops = kzalloc(sizeof(*ops), GFP_KERNEL); 1566 + if (unlikely(!ops)) 1567 + return ERR_PTR(-ENOMEM); 1568 + 1569 + INIT_LIST_HEAD(&ops->list); 1570 + 1571 + args.mgr = mgr; 1572 + args.ops = ops; 1573 + 1574 + ret = __drm_gpuva_sm_unmap(mgr, &gpuva_list_ops, &args, 1575 + req_addr, req_range); 1576 + if (ret) 1577 + goto err_free_ops; 1578 + 1579 + return ops; 1580 + 1581 + err_free_ops: 1582 + drm_gpuva_ops_free(mgr, ops); 1583 + return ERR_PTR(ret); 1584 + } 1585 + EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap_ops_create); 1586 + 1587 + /** 1588 + * drm_gpuva_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch 1589 + * @mgr: the &drm_gpuva_manager representing the GPU VA space 1590 + * @addr: the start address of the range to prefetch 1591 + * @range: the range of the mappings to prefetch 1592 + * 1593 + * This function creates a list of operations to perform prefetching. 1594 + * 1595 + * The list can be iterated with &drm_gpuva_for_each_op and must be processed 1596 + * in the given order. It can contain prefetch operations. 1597 + * 1598 + * There can be an arbitrary amount of prefetch operations. 1599 + * 1600 + * After the caller finished processing the returned &drm_gpuva_ops, they must 1601 + * be freed with &drm_gpuva_ops_free. 1602 + * 1603 + * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure 1604 + */ 1605 + struct drm_gpuva_ops * 1606 + drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr, 1607 + u64 addr, u64 range) 1608 + { 1609 + struct drm_gpuva_ops *ops; 1610 + struct drm_gpuva_op *op; 1611 + struct drm_gpuva *va; 1612 + u64 end = addr + range; 1613 + int ret; 1614 + 1615 + ops = kzalloc(sizeof(*ops), GFP_KERNEL); 1616 + if (!ops) 1617 + return ERR_PTR(-ENOMEM); 1618 + 1619 + INIT_LIST_HEAD(&ops->list); 1620 + 1621 + drm_gpuva_for_each_va_range(va, mgr, addr, end) { 1622 + op = gpuva_op_alloc(mgr); 1623 + if (!op) { 1624 + ret = -ENOMEM; 1625 + goto err_free_ops; 1626 + } 1627 + 1628 + op->op = DRM_GPUVA_OP_PREFETCH; 1629 + op->prefetch.va = va; 1630 + list_add_tail(&op->entry, &ops->list); 1631 + } 1632 + 1633 + return ops; 1634 + 1635 + err_free_ops: 1636 + drm_gpuva_ops_free(mgr, ops); 1637 + return ERR_PTR(ret); 1638 + } 1639 + EXPORT_SYMBOL_GPL(drm_gpuva_prefetch_ops_create); 1640 + 1641 + /** 1642 + * drm_gpuva_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM 1643 + * @mgr: the &drm_gpuva_manager representing the GPU VA space 1644 + * @obj: the &drm_gem_object to unmap 1645 + * 1646 + * This function creates a list of operations to perform unmapping for every 1647 + * GPUVA attached to a GEM. 1648 + * 1649 + * The list can be iterated with &drm_gpuva_for_each_op and consists out of an 1650 + * arbitrary amount of unmap operations. 1651 + * 1652 + * After the caller finished processing the returned &drm_gpuva_ops, they must 1653 + * be freed with &drm_gpuva_ops_free. 1654 + * 1655 + * It is the callers responsibility to protect the GEMs GPUVA list against 1656 + * concurrent access using the GEMs dma_resv lock. 1657 + * 1658 + * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure 1659 + */ 1660 + struct drm_gpuva_ops * 1661 + drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr, 1662 + struct drm_gem_object *obj) 1663 + { 1664 + struct drm_gpuva_ops *ops; 1665 + struct drm_gpuva_op *op; 1666 + struct drm_gpuva *va; 1667 + int ret; 1668 + 1669 + drm_gem_gpuva_assert_lock_held(obj); 1670 + 1671 + ops = kzalloc(sizeof(*ops), GFP_KERNEL); 1672 + if (!ops) 1673 + return ERR_PTR(-ENOMEM); 1674 + 1675 + INIT_LIST_HEAD(&ops->list); 1676 + 1677 + drm_gem_for_each_gpuva(va, obj) { 1678 + op = gpuva_op_alloc(mgr); 1679 + if (!op) { 1680 + ret = -ENOMEM; 1681 + goto err_free_ops; 1682 + } 1683 + 1684 + op->op = DRM_GPUVA_OP_UNMAP; 1685 + op->unmap.va = va; 1686 + list_add_tail(&op->entry, &ops->list); 1687 + } 1688 + 1689 + return ops; 1690 + 1691 + err_free_ops: 1692 + drm_gpuva_ops_free(mgr, ops); 1693 + return ERR_PTR(ret); 1694 + } 1695 + EXPORT_SYMBOL_GPL(drm_gpuva_gem_unmap_ops_create); 1696 + 1697 + /** 1698 + * drm_gpuva_ops_free() - free the given &drm_gpuva_ops 1699 + * @mgr: the &drm_gpuva_manager the ops were created for 1700 + * @ops: the &drm_gpuva_ops to free 1701 + * 1702 + * Frees the given &drm_gpuva_ops structure including all the ops associated 1703 + * with it. 1704 + */ 1705 + void 1706 + drm_gpuva_ops_free(struct drm_gpuva_manager *mgr, 1707 + struct drm_gpuva_ops *ops) 1708 + { 1709 + struct drm_gpuva_op *op, *next; 1710 + 1711 + drm_gpuva_for_each_op_safe(op, next, ops) { 1712 + list_del(&op->entry); 1713 + 1714 + if (op->op == DRM_GPUVA_OP_REMAP) { 1715 + kfree(op->remap.prev); 1716 + kfree(op->remap.next); 1717 + kfree(op->remap.unmap); 1718 + } 1719 + 1720 + gpuva_op_free(mgr, op); 1721 + } 1722 + 1723 + kfree(ops); 1724 + } 1725 + EXPORT_SYMBOL_GPL(drm_gpuva_ops_free);
+2
drivers/gpu/drm/drm_internal.h
··· 245 245 struct drm_file *file_private); 246 246 int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, 247 247 struct drm_file *file_private); 248 + int drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data, 249 + struct drm_file *file_private); 248 250 int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, 249 251 struct drm_file *file_private); 250 252 int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
+2
drivers/gpu/drm/drm_ioctl.c
··· 701 701 DRM_RENDER_ALLOW), 702 702 DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl, 703 703 DRM_RENDER_ALLOW), 704 + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_EVENTFD, drm_syncobj_eventfd_ioctl, 705 + DRM_RENDER_ALLOW), 704 706 DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl, 705 707 DRM_RENDER_ALLOW), 706 708 DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
+1
drivers/gpu/drm/drm_mipi_dsi.c
··· 27 27 28 28 #include <linux/device.h> 29 29 #include <linux/module.h> 30 + #include <linux/of.h> 30 31 #include <linux/of_device.h> 31 32 #include <linux/pm_runtime.h> 32 33 #include <linux/slab.h>
+142 -6
drivers/gpu/drm/drm_syncobj.c
··· 136 136 * requirement is inherited from the wait-before-signal behavior required by 137 137 * the Vulkan timeline semaphore API. 138 138 * 139 + * Alternatively, &DRM_IOCTL_SYNCOBJ_EVENTFD can be used to wait without 140 + * blocking: an eventfd will be signaled when the syncobj is. This is useful to 141 + * integrate the wait in an event loop. 142 + * 139 143 * 140 144 * Import/export of syncobjs 141 145 * ------------------------- ··· 189 185 190 186 #include <linux/anon_inodes.h> 191 187 #include <linux/dma-fence-unwrap.h> 188 + #include <linux/eventfd.h> 192 189 #include <linux/file.h> 193 190 #include <linux/fs.h> 194 191 #include <linux/sched/signal.h> ··· 216 211 217 212 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 218 213 struct syncobj_wait_entry *wait); 214 + 215 + struct syncobj_eventfd_entry { 216 + struct list_head node; 217 + struct dma_fence *fence; 218 + struct dma_fence_cb fence_cb; 219 + struct drm_syncobj *syncobj; 220 + struct eventfd_ctx *ev_fd_ctx; 221 + u64 point; 222 + u32 flags; 223 + }; 224 + 225 + static void 226 + syncobj_eventfd_entry_func(struct drm_syncobj *syncobj, 227 + struct syncobj_eventfd_entry *entry); 219 228 220 229 /** 221 230 * drm_syncobj_find - lookup and reference a sync object. ··· 293 274 spin_unlock(&syncobj->lock); 294 275 } 295 276 277 + static void 278 + syncobj_eventfd_entry_free(struct syncobj_eventfd_entry *entry) 279 + { 280 + eventfd_ctx_put(entry->ev_fd_ctx); 281 + dma_fence_put(entry->fence); 282 + /* This happens either inside the syncobj lock, or after the node has 283 + * already been removed from the list. 284 + */ 285 + list_del(&entry->node); 286 + kfree(entry); 287 + } 288 + 289 + static void 290 + drm_syncobj_add_eventfd(struct drm_syncobj *syncobj, 291 + struct syncobj_eventfd_entry *entry) 292 + { 293 + spin_lock(&syncobj->lock); 294 + list_add_tail(&entry->node, &syncobj->ev_fd_list); 295 + syncobj_eventfd_entry_func(syncobj, entry); 296 + spin_unlock(&syncobj->lock); 297 + } 298 + 296 299 /** 297 300 * drm_syncobj_add_point - add new timeline point to the syncobj 298 301 * @syncobj: sync object to add timeline point do ··· 329 288 struct dma_fence *fence, 330 289 uint64_t point) 331 290 { 332 - struct syncobj_wait_entry *cur, *tmp; 291 + struct syncobj_wait_entry *wait_cur, *wait_tmp; 292 + struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp; 333 293 struct dma_fence *prev; 334 294 335 295 dma_fence_get(fence); ··· 344 302 dma_fence_chain_init(chain, prev, fence, point); 345 303 rcu_assign_pointer(syncobj->fence, &chain->base); 346 304 347 - list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) 348 - syncobj_wait_syncobj_func(syncobj, cur); 305 + list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node) 306 + syncobj_wait_syncobj_func(syncobj, wait_cur); 307 + list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node) 308 + syncobj_eventfd_entry_func(syncobj, ev_fd_cur); 349 309 spin_unlock(&syncobj->lock); 350 310 351 311 /* Walk the chain once to trigger garbage collection */ ··· 367 323 struct dma_fence *fence) 368 324 { 369 325 struct dma_fence *old_fence; 370 - struct syncobj_wait_entry *cur, *tmp; 326 + struct syncobj_wait_entry *wait_cur, *wait_tmp; 327 + struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp; 371 328 372 329 if (fence) 373 330 dma_fence_get(fence); ··· 380 335 rcu_assign_pointer(syncobj->fence, fence); 381 336 382 337 if (fence != old_fence) { 383 - list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) 384 - syncobj_wait_syncobj_func(syncobj, cur); 338 + list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node) 339 + syncobj_wait_syncobj_func(syncobj, wait_cur); 340 + list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node) 341 + syncobj_eventfd_entry_func(syncobj, ev_fd_cur); 385 342 } 386 343 387 344 spin_unlock(&syncobj->lock); ··· 519 472 struct drm_syncobj *syncobj = container_of(kref, 520 473 struct drm_syncobj, 521 474 refcount); 475 + struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp; 476 + 522 477 drm_syncobj_replace_fence(syncobj, NULL); 478 + 479 + list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node) 480 + syncobj_eventfd_entry_free(ev_fd_cur); 481 + 523 482 kfree(syncobj); 524 483 } 525 484 EXPORT_SYMBOL(drm_syncobj_free); ··· 554 501 555 502 kref_init(&syncobj->refcount); 556 503 INIT_LIST_HEAD(&syncobj->cb_list); 504 + INIT_LIST_HEAD(&syncobj->ev_fd_list); 557 505 spin_lock_init(&syncobj->lock); 558 506 559 507 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { ··· 1358 1304 return ret; 1359 1305 } 1360 1306 1307 + static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence, 1308 + struct dma_fence_cb *cb) 1309 + { 1310 + struct syncobj_eventfd_entry *entry = 1311 + container_of(cb, struct syncobj_eventfd_entry, fence_cb); 1312 + 1313 + eventfd_signal(entry->ev_fd_ctx, 1); 1314 + syncobj_eventfd_entry_free(entry); 1315 + } 1316 + 1317 + static void 1318 + syncobj_eventfd_entry_func(struct drm_syncobj *syncobj, 1319 + struct syncobj_eventfd_entry *entry) 1320 + { 1321 + int ret; 1322 + struct dma_fence *fence; 1323 + 1324 + /* This happens inside the syncobj lock */ 1325 + fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1)); 1326 + ret = dma_fence_chain_find_seqno(&fence, entry->point); 1327 + if (ret != 0 || !fence) { 1328 + dma_fence_put(fence); 1329 + return; 1330 + } 1331 + 1332 + list_del_init(&entry->node); 1333 + entry->fence = fence; 1334 + 1335 + if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) { 1336 + eventfd_signal(entry->ev_fd_ctx, 1); 1337 + syncobj_eventfd_entry_free(entry); 1338 + } else { 1339 + ret = dma_fence_add_callback(fence, &entry->fence_cb, 1340 + syncobj_eventfd_entry_fence_func); 1341 + if (ret == -ENOENT) { 1342 + eventfd_signal(entry->ev_fd_ctx, 1); 1343 + syncobj_eventfd_entry_free(entry); 1344 + } 1345 + } 1346 + } 1347 + 1348 + int 1349 + drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data, 1350 + struct drm_file *file_private) 1351 + { 1352 + struct drm_syncobj_eventfd *args = data; 1353 + struct drm_syncobj *syncobj; 1354 + struct eventfd_ctx *ev_fd_ctx; 1355 + struct syncobj_eventfd_entry *entry; 1356 + 1357 + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 1358 + return -EOPNOTSUPP; 1359 + 1360 + if (args->flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) 1361 + return -EINVAL; 1362 + 1363 + if (args->pad) 1364 + return -EINVAL; 1365 + 1366 + syncobj = drm_syncobj_find(file_private, args->handle); 1367 + if (!syncobj) 1368 + return -ENOENT; 1369 + 1370 + ev_fd_ctx = eventfd_ctx_fdget(args->fd); 1371 + if (IS_ERR(ev_fd_ctx)) 1372 + return PTR_ERR(ev_fd_ctx); 1373 + 1374 + entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1375 + if (!entry) { 1376 + eventfd_ctx_put(ev_fd_ctx); 1377 + return -ENOMEM; 1378 + } 1379 + entry->syncobj = syncobj; 1380 + entry->ev_fd_ctx = ev_fd_ctx; 1381 + entry->point = args->point; 1382 + entry->flags = args->flags; 1383 + 1384 + drm_syncobj_add_eventfd(syncobj, entry); 1385 + drm_syncobj_put(syncobj); 1386 + 1387 + return 0; 1388 + } 1361 1389 1362 1390 int 1363 1391 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
··· 8 8 #include <linux/delay.h> 9 9 #include <linux/dma-fence.h> 10 10 #include <linux/dma-mapping.h> 11 + #include <linux/mod_devicetable.h> 11 12 #include <linux/module.h> 12 - #include <linux/of_device.h> 13 13 #include <linux/platform_device.h> 14 14 #include <linux/pm_runtime.h> 15 15 #include <linux/regulator/consumer.h>
+1 -1
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
··· 12 12 #include <linux/iopoll.h> 13 13 #include <linux/irq.h> 14 14 #include <linux/mfd/syscon.h> 15 - #include <linux/of_device.h> 15 + #include <linux/of.h> 16 16 #include <linux/platform_device.h> 17 17 #include <linux/pm_runtime.h> 18 18 #include <linux/regmap.h>
-1
drivers/gpu/drm/exynos/exynos7_drm_decon.c
··· 12 12 #include <linux/kernel.h> 13 13 #include <linux/of.h> 14 14 #include <linux/of_address.h> 15 - #include <linux/of_device.h> 16 15 #include <linux/platform_device.h> 17 16 #include <linux/pm_runtime.h> 18 17
+2 -1
drivers/gpu/drm/exynos/exynos_drm_dsi.c
··· 8 8 */ 9 9 10 10 #include <linux/component.h> 11 - #include <linux/of_device.h> 11 + #include <linux/of.h> 12 + #include <linux/platform_device.h> 12 13 13 14 #include <drm/bridge/samsung-dsim.h> 14 15 #include <drm/drm_probe_helper.h>
-1
drivers/gpu/drm/exynos/exynos_drm_fimd.c
··· 12 12 #include <linux/kernel.h> 13 13 #include <linux/mfd/syscon.h> 14 14 #include <linux/of.h> 15 - #include <linux/of_device.h> 16 15 #include <linux/platform_device.h> 17 16 #include <linux/pm_runtime.h> 18 17 #include <linux/regmap.h>
+1 -1
drivers/gpu/drm/exynos/exynos_drm_rotator.c
··· 12 12 #include <linux/interrupt.h> 13 13 #include <linux/io.h> 14 14 #include <linux/kernel.h> 15 - #include <linux/of_device.h> 15 + #include <linux/of.h> 16 16 #include <linux/platform_device.h> 17 17 #include <linux/pm_runtime.h> 18 18 #include <linux/sizes.h>
+1 -1
drivers/gpu/drm/exynos/exynos_drm_scaler.c
··· 11 11 #include <linux/interrupt.h> 12 12 #include <linux/io.h> 13 13 #include <linux/kernel.h> 14 - #include <linux/of_device.h> 14 + #include <linux/of.h> 15 15 #include <linux/platform_device.h> 16 16 #include <linux/pm_runtime.h> 17 17
+1 -1
drivers/gpu/drm/exynos/exynos_hdmi.c
··· 21 21 #include <linux/irq.h> 22 22 #include <linux/kernel.h> 23 23 #include <linux/mfd/syscon.h> 24 + #include <linux/of.h> 24 25 #include <linux/of_address.h> 25 - #include <linux/of_device.h> 26 26 #include <linux/of_graph.h> 27 27 #include <linux/platform_device.h> 28 28 #include <linux/pm_runtime.h>
-1
drivers/gpu/drm/exynos/exynos_mixer.c
··· 18 18 #include <linux/kernel.h> 19 19 #include <linux/ktime.h> 20 20 #include <linux/of.h> 21 - #include <linux/of_device.h> 22 21 #include <linux/platform_device.h> 23 22 #include <linux/pm_runtime.h> 24 23 #include <linux/regulator/consumer.h>
+1 -1
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
··· 11 11 * Xinwei Kong <kong.kongxinwei@hisilicon.com> 12 12 */ 13 13 14 - #include <linux/of_platform.h> 15 14 #include <linux/component.h> 16 15 #include <linux/module.h> 16 + #include <linux/of.h> 17 17 #include <linux/of_graph.h> 18 18 #include <linux/platform_device.h> 19 19
+3 -2
drivers/gpu/drm/imx/dcss/dcss-dev.c
··· 4 4 */ 5 5 6 6 #include <linux/clk.h> 7 - #include <linux/of_device.h> 7 + #include <linux/of.h> 8 8 #include <linux/of_graph.h> 9 + #include <linux/platform_device.h> 9 10 #include <linux/pm_runtime.h> 10 11 #include <linux/slab.h> 11 12 #include <drm/drm_bridge_connector.h> ··· 199 198 200 199 dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0); 201 200 if (!dcss->of_port) { 202 - dev_err(dev, "no port@0 node in %s\n", dev->of_node->full_name); 201 + dev_err(dev, "no port@0 node in %pOF\n", dev->of_node); 203 202 ret = -ENODEV; 204 203 goto clks_err; 205 204 }
+1 -1
drivers/gpu/drm/imx/lcdc/imx-lcdc.c
··· 19 19 #include <linux/bitfield.h> 20 20 #include <linux/clk.h> 21 21 #include <linux/dma-mapping.h> 22 + #include <linux/mod_devicetable.h> 22 23 #include <linux/module.h> 23 - #include <linux/of_device.h> 24 24 #include <linux/platform_device.h> 25 25 26 26 #define IMX21LCDC_LSSAR 0x0000 /* LCDC Screen Start Address Register */
+1 -1
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
··· 14 14 #include <linux/media-bus-format.h> 15 15 #include <linux/module.h> 16 16 #include <linux/mutex.h> 17 - #include <linux/of_device.h> 17 + #include <linux/of.h> 18 18 #include <linux/of_reserved_mem.h> 19 19 #include <linux/platform_device.h> 20 20 #include <linux/pm.h>
+1 -1
drivers/gpu/drm/ingenic/ingenic-ipu.c
··· 14 14 #include <linux/interrupt.h> 15 15 #include <linux/module.h> 16 16 #include <linux/of.h> 17 - #include <linux/of_device.h> 17 + #include <linux/platform_device.h> 18 18 #include <linux/regmap.h> 19 19 #include <linux/time.h> 20 20
+2 -1
drivers/gpu/drm/lima/lima_drv.c
··· 2 2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 3 4 4 #include <linux/module.h> 5 - #include <linux/of_platform.h> 5 + #include <linux/of.h> 6 + #include <linux/platform_device.h> 6 7 #include <linux/uaccess.h> 7 8 #include <linux/slab.h> 8 9 #include <linux/pm_runtime.h>
+1 -1
drivers/gpu/drm/logicvc/logicvc_drm.c
··· 10 10 #include <linux/module.h> 11 11 #include <linux/of.h> 12 12 #include <linux/of_address.h> 13 - #include <linux/of_device.h> 14 13 #include <linux/of_reserved_mem.h> 14 + #include <linux/platform_device.h> 15 15 #include <linux/regmap.h> 16 16 #include <linux/types.h> 17 17
+1 -1
drivers/gpu/drm/mcde/mcde_drv.c
··· 469 469 static struct platform_driver mcde_driver = { 470 470 .driver = { 471 471 .name = "mcde", 472 - .of_match_table = of_match_ptr(mcde_of_match), 472 + .of_match_table = mcde_of_match, 473 473 }, 474 474 .probe = mcde_probe, 475 475 .remove_new = mcde_remove,
+1 -2
drivers/gpu/drm/mediatek/mtk_disp_aal.c
··· 6 6 #include <linux/clk.h> 7 7 #include <linux/component.h> 8 8 #include <linux/module.h> 9 - #include <linux/of_device.h> 10 - #include <linux/of_irq.h> 9 + #include <linux/of.h> 11 10 #include <linux/platform_device.h> 12 11 #include <linux/soc/mediatek/mtk-cmdq.h> 13 12
+1 -2
drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
··· 6 6 #include <linux/clk.h> 7 7 #include <linux/component.h> 8 8 #include <linux/module.h> 9 - #include <linux/of_device.h> 10 - #include <linux/of_irq.h> 9 + #include <linux/of.h> 11 10 #include <linux/platform_device.h> 12 11 #include <linux/soc/mediatek/mtk-cmdq.h> 13 12
+1 -2
drivers/gpu/drm/mediatek/mtk_disp_color.c
··· 6 6 #include <linux/clk.h> 7 7 #include <linux/component.h> 8 8 #include <linux/module.h> 9 - #include <linux/of_device.h> 10 - #include <linux/of_irq.h> 9 + #include <linux/of.h> 11 10 #include <linux/platform_device.h> 12 11 #include <linux/soc/mediatek/mtk-cmdq.h> 13 12
+1 -2
drivers/gpu/drm/mediatek/mtk_disp_gamma.c
··· 6 6 #include <linux/clk.h> 7 7 #include <linux/component.h> 8 8 #include <linux/module.h> 9 - #include <linux/of_device.h> 10 - #include <linux/of_irq.h> 9 + #include <linux/of.h> 11 10 #include <linux/platform_device.h> 12 11 #include <linux/soc/mediatek/mtk-cmdq.h> 13 12
+1 -2
drivers/gpu/drm/mediatek/mtk_disp_merge.c
··· 5 5 6 6 #include <linux/clk.h> 7 7 #include <linux/component.h> 8 - #include <linux/of_device.h> 9 - #include <linux/of_irq.h> 8 + #include <linux/of.h> 10 9 #include <linux/platform_device.h> 11 10 #include <linux/reset.h> 12 11 #include <linux/soc/mediatek/mtk-cmdq.h>
+1 -2
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
··· 10 10 #include <linux/clk.h> 11 11 #include <linux/component.h> 12 12 #include <linux/module.h> 13 - #include <linux/of_device.h> 14 - #include <linux/of_irq.h> 13 + #include <linux/of.h> 15 14 #include <linux/platform_device.h> 16 15 #include <linux/pm_runtime.h> 17 16 #include <linux/soc/mediatek/mtk-cmdq.h>
+2 -1
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
··· 7 7 #include <drm/drm_of.h> 8 8 #include <linux/clk.h> 9 9 #include <linux/component.h> 10 - #include <linux/of_device.h> 10 + #include <linux/of.h> 11 11 #include <linux/of_address.h> 12 + #include <linux/of_platform.h> 12 13 #include <linux/platform_device.h> 13 14 #include <linux/pm_runtime.h> 14 15 #include <linux/reset.h>
+1 -2
drivers/gpu/drm/mediatek/mtk_disp_rdma.c
··· 8 8 #include <linux/clk.h> 9 9 #include <linux/component.h> 10 10 #include <linux/module.h> 11 - #include <linux/of_device.h> 12 - #include <linux/of_irq.h> 11 + #include <linux/of.h> 13 12 #include <linux/platform_device.h> 14 13 #include <linux/pm_runtime.h> 15 14 #include <linux/soc/mediatek/mtk-cmdq.h>
-1
drivers/gpu/drm/mediatek/mtk_dpi.c
··· 10 10 #include <linux/kernel.h> 11 11 #include <linux/media-bus-format.h> 12 12 #include <linux/of.h> 13 - #include <linux/of_device.h> 14 13 #include <linux/of_graph.h> 15 14 #include <linux/pinctrl/consumer.h> 16 15 #include <linux/platform_device.h>
+1
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
··· 6 6 #include <linux/clk.h> 7 7 #include <linux/dma-mapping.h> 8 8 #include <linux/mailbox_controller.h> 9 + #include <linux/of.h> 9 10 #include <linux/pm_runtime.h> 10 11 #include <linux/soc/mediatek/mtk-cmdq.h> 11 12 #include <linux/soc/mediatek/mtk-mmsys.h>
+2 -1
drivers/gpu/drm/mediatek/mtk_drm_drv.c
··· 7 7 #include <linux/component.h> 8 8 #include <linux/iommu.h> 9 9 #include <linux/module.h> 10 - #include <linux/of_address.h> 10 + #include <linux/of.h> 11 11 #include <linux/of_platform.h> 12 + #include <linux/platform_device.h> 12 13 #include <linux/pm_runtime.h> 13 14 #include <linux/dma-mapping.h> 14 15
+1 -1
drivers/gpu/drm/mediatek/mtk_ethdr.c
··· 7 7 #include <drm/drm_framebuffer.h> 8 8 #include <linux/clk.h> 9 9 #include <linux/component.h> 10 - #include <linux/of_device.h> 10 + #include <linux/of.h> 11 11 #include <linux/of_address.h> 12 12 #include <linux/platform_device.h> 13 13 #include <linux/reset.h>
+1 -2
drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
··· 6 6 #include <drm/drm_fourcc.h> 7 7 #include <linux/clk.h> 8 8 #include <linux/component.h> 9 - #include <linux/of_address.h> 10 - #include <linux/of_device.h> 9 + #include <linux/mod_devicetable.h> 11 10 #include <linux/platform_device.h> 12 11 #include <linux/pm_runtime.h> 13 12 #include <linux/soc/mediatek/mtk-cmdq.h>
-1
drivers/gpu/drm/meson/meson_drv.h
··· 9 9 10 10 #include <linux/device.h> 11 11 #include <linux/of.h> 12 - #include <linux/of_device.h> 13 12 #include <linux/regmap.h> 14 13 15 14 struct drm_crtc;
+2 -1
drivers/gpu/drm/meson/meson_dw_hdmi.c
··· 9 9 #include <linux/component.h> 10 10 #include <linux/kernel.h> 11 11 #include <linux/module.h> 12 - #include <linux/of_device.h> 12 + #include <linux/of.h> 13 13 #include <linux/of_graph.h> 14 + #include <linux/platform_device.h> 14 15 #include <linux/regulator/consumer.h> 15 16 #include <linux/reset.h> 16 17
+2 -1
drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
··· 7 7 8 8 #include <linux/clk.h> 9 9 #include <linux/kernel.h> 10 + #include <linux/mod_devicetable.h> 10 11 #include <linux/module.h> 11 - #include <linux/of_device.h> 12 12 #include <linux/of_graph.h> 13 + #include <linux/platform_device.h> 13 14 #include <linux/reset.h> 14 15 #include <linux/phy/phy.h> 15 16 #include <linux/bitfield.h>
-1
drivers/gpu/drm/meson/meson_encoder_dsi.c
··· 7 7 8 8 #include <linux/kernel.h> 9 9 #include <linux/module.h> 10 - #include <linux/of_device.h> 11 10 #include <linux/of_graph.h> 12 11 13 12 #include <drm/drm_atomic_helper.h>
+3 -1
drivers/gpu/drm/meson/meson_encoder_hdmi.c
··· 9 9 #include <linux/component.h> 10 10 #include <linux/kernel.h> 11 11 #include <linux/module.h> 12 - #include <linux/of_device.h> 12 + #include <linux/of.h> 13 13 #include <linux/of_graph.h> 14 + #include <linux/of_platform.h> 15 + #include <linux/platform_device.h> 14 16 #include <linux/regulator/consumer.h> 15 17 #include <linux/reset.h> 16 18
+2
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 3 3 4 4 #include <linux/clk.h> 5 5 #include <linux/interconnect.h> 6 + #include <linux/of_platform.h> 7 + #include <linux/platform_device.h> 6 8 #include <linux/pm_domain.h> 7 9 #include <linux/pm_opp.h> 8 10 #include <soc/qcom/cmd-db.h>
+1 -1
drivers/gpu/drm/msm/dp/dp_audio.c
··· 6 6 7 7 #define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ 8 8 9 - #include <linux/of_platform.h> 9 + #include <linux/platform_device.h> 10 10 11 11 #include <drm/display/drm_dp_helper.h> 12 12 #include <drm/drm_edid.h>
+1 -1
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 10 10 #include <linux/gpio/consumer.h> 11 11 #include <linux/interrupt.h> 12 12 #include <linux/mfd/syscon.h> 13 - #include <linux/of_device.h> 13 + #include <linux/of.h> 14 14 #include <linux/of_graph.h> 15 15 #include <linux/of_irq.h> 16 16 #include <linux/pinctrl/consumer.h>
+2
drivers/gpu/drm/msm/hdmi/hdmi.c
··· 7 7 8 8 #include <linux/of_irq.h> 9 9 #include <linux/of_gpio.h> 10 + #include <linux/of_platform.h> 11 + #include <linux/platform_device.h> 10 12 11 13 #include <drm/drm_bridge_connector.h> 12 14 #include <drm/drm_of.h>
+2 -1
drivers/gpu/drm/msm/hdmi/hdmi_phy.c
··· 3 3 * Copyright (c) 2016, The Linux Foundation. All rights reserved. 4 4 */ 5 5 6 - #include <linux/of_device.h> 6 + #include <linux/of.h> 7 + #include <linux/platform_device.h> 7 8 8 9 #include "hdmi.h" 9 10
+2
drivers/gpu/drm/msm/msm_mdss.c
··· 10 10 #include <linux/irqchip.h> 11 11 #include <linux/irqdesc.h> 12 12 #include <linux/irqchip/chained_irq.h> 13 + #include <linux/of_platform.h> 14 + #include <linux/platform_device.h> 13 15 #include <linux/pm_runtime.h> 14 16 #include <linux/reset.h> 15 17
-1
drivers/gpu/drm/mxsfb/lcdif_drv.c
··· 10 10 #include <linux/io.h> 11 11 #include <linux/module.h> 12 12 #include <linux/of.h> 13 - #include <linux/of_device.h> 14 13 #include <linux/of_graph.h> 15 14 #include <linux/platform_device.h> 16 15 #include <linux/pm_runtime.h>
+2 -1
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
··· 11 11 #include <linux/gpio/consumer.h> 12 12 #include <linux/media-bus-format.h> 13 13 #include <linux/module.h> 14 - #include <linux/of_device.h> 14 + #include <linux/of.h> 15 + #include <linux/platform_device.h> 15 16 #include <linux/regmap.h> 16 17 #include <linux/regulator/consumer.h> 17 18 #include <linux/spi/spi.h>
+1 -1
drivers/gpu/drm/panel/panel-auo-a030jtn01.c
··· 11 11 #include <linux/device.h> 12 12 #include <linux/gpio/consumer.h> 13 13 #include <linux/media-bus-format.h> 14 + #include <linux/mod_devicetable.h> 14 15 #include <linux/module.h> 15 - #include <linux/of_device.h> 16 16 #include <linux/regmap.h> 17 17 #include <linux/regulator/consumer.h> 18 18 #include <linux/spi/spi.h>
-1
drivers/gpu/drm/panel/panel-boe-himax8279d.c
··· 10 10 #include <linux/kernel.h> 11 11 #include <linux/module.h> 12 12 #include <linux/of.h> 13 - #include <linux/of_device.h> 14 13 15 14 #include <linux/gpio/consumer.h> 16 15 #include <linux/regulator/consumer.h>
-1
drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
··· 8 8 #include <linux/gpio/consumer.h> 9 9 #include <linux/module.h> 10 10 #include <linux/of.h> 11 - #include <linux/of_device.h> 12 11 #include <linux/regulator/consumer.h> 13 12 14 13 #include <drm/drm_connector.h>
+1 -1
drivers/gpu/drm/panel/panel-dsi-cm.c
··· 11 11 #include <linux/gpio/consumer.h> 12 12 #include <linux/jiffies.h> 13 13 #include <linux/module.h> 14 - #include <linux/of_device.h> 14 + #include <linux/of.h> 15 15 #include <linux/regulator/consumer.h> 16 16 17 17 #include <drm/drm_connector.h>
-1
drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
··· 7 7 #include <linux/delay.h> 8 8 #include <linux/mod_devicetable.h> 9 9 #include <linux/module.h> 10 - #include <linux/of_device.h> 11 10 #include <linux/regulator/consumer.h> 12 11 13 12 #include <drm/drm_mipi_dsi.h>
+1 -1
drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
··· 11 11 #include <linux/gpio/consumer.h> 12 12 #include <linux/delay.h> 13 13 #include <linux/module.h> 14 - #include <linux/of_device.h> 14 + #include <linux/mod_devicetable.h> 15 15 #include <linux/regulator/consumer.h> 16 16 17 17 #define FEIYANG_INIT_CMD_LEN 2
+1 -1
drivers/gpu/drm/panel/panel-himax-hx8394.c
··· 15 15 #include <linux/media-bus-format.h> 16 16 #include <linux/mod_devicetable.h> 17 17 #include <linux/module.h> 18 - #include <linux/of_device.h> 18 + #include <linux/of.h> 19 19 #include <linux/regulator/consumer.h> 20 20 21 21 #include <video/mipi_display.h>
+2 -1
drivers/gpu/drm/panel/panel-ilitek-ili9322.c
··· 22 22 #include <linux/bitops.h> 23 23 #include <linux/gpio/consumer.h> 24 24 #include <linux/module.h> 25 - #include <linux/of_device.h> 25 + #include <linux/of.h> 26 + #include <linux/platform_device.h> 26 27 #include <linux/regmap.h> 27 28 #include <linux/regulator/consumer.h> 28 29 #include <linux/spi/spi.h>
+1 -1
drivers/gpu/drm/panel/panel-ilitek-ili9341.c
··· 23 23 #include <linux/delay.h> 24 24 #include <linux/gpio/consumer.h> 25 25 #include <linux/module.h> 26 - #include <linux/of_device.h> 26 + #include <linux/of.h> 27 27 #include <linux/regulator/consumer.h> 28 28 #include <linux/spi/spi.h> 29 29
+1 -1
drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
··· 9 9 #include <linux/errno.h> 10 10 #include <linux/kernel.h> 11 11 #include <linux/module.h> 12 - #include <linux/of_device.h> 12 + #include <linux/of.h> 13 13 14 14 #include <linux/gpio/consumer.h> 15 15 #include <linux/regulator/consumer.h>
+2 -1
drivers/gpu/drm/panel/panel-innolux-ej030na.c
··· 11 11 #include <linux/gpio/consumer.h> 12 12 #include <linux/media-bus-format.h> 13 13 #include <linux/module.h> 14 - #include <linux/of_device.h> 14 + #include <linux/of.h> 15 + #include <linux/platform_device.h> 15 16 #include <linux/regmap.h> 16 17 #include <linux/regulator/consumer.h> 17 18 #include <linux/spi/spi.h>
-1
drivers/gpu/drm/panel/panel-innolux-p079zca.c
··· 7 7 #include <linux/gpio/consumer.h> 8 8 #include <linux/module.h> 9 9 #include <linux/of.h> 10 - #include <linux/of_device.h> 11 10 #include <linux/regulator/consumer.h> 12 11 13 12 #include <video/mipi_display.h>
+1 -1
drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
··· 16 16 #include <linux/gpio/consumer.h> 17 17 #include <linux/delay.h> 18 18 #include <linux/module.h> 19 - #include <linux/of_device.h> 19 + #include <linux/of.h> 20 20 #include <linux/regulator/consumer.h> 21 21 22 22 #define JD9365DA_INIT_CMD_LEN 2
-1
drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
··· 8 8 #include <linux/media-bus-format.h> 9 9 #include <linux/module.h> 10 10 #include <linux/of.h> 11 - #include <linux/of_device.h> 12 11 #include <linux/regulator/consumer.h> 13 12 14 13 #include <video/display_timing.h>
+1 -1
drivers/gpu/drm/panel/panel-lvds.c
··· 10 10 11 11 #include <linux/gpio/consumer.h> 12 12 #include <linux/module.h> 13 - #include <linux/of_platform.h> 13 + #include <linux/of.h> 14 14 #include <linux/platform_device.h> 15 15 #include <linux/regulator/consumer.h> 16 16 #include <linux/slab.h>
-1
drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
··· 18 18 #include <linux/media-bus-format.h> 19 19 #include <linux/module.h> 20 20 #include <linux/of.h> 21 - #include <linux/of_device.h> 22 21 #include <linux/regulator/consumer.h> 23 22 #include <linux/spi/spi.h> 24 23
+1 -1
drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
··· 10 10 #include <linux/gpio/consumer.h> 11 11 #include <linux/media-bus-format.h> 12 12 #include <linux/module.h> 13 - #include <linux/of_device.h> 13 + #include <linux/of.h> 14 14 #include <linux/regulator/consumer.h> 15 15 16 16 #include <video/mipi_display.h>
+1 -1
drivers/gpu/drm/panel/panel-newvision-nv3051d.c
··· 13 13 #include <linux/gpio/consumer.h> 14 14 #include <linux/media-bus-format.h> 15 15 #include <linux/module.h> 16 - #include <linux/of_device.h> 16 + #include <linux/of.h> 17 17 #include <linux/regulator/consumer.h> 18 18 19 19 #include <video/display_timing.h>
+2 -1
drivers/gpu/drm/panel/panel-newvision-nv3052c.c
··· 11 11 #include <linux/gpio/consumer.h> 12 12 #include <linux/media-bus-format.h> 13 13 #include <linux/module.h> 14 - #include <linux/of_device.h> 14 + #include <linux/of.h> 15 + #include <linux/platform_device.h> 15 16 #include <linux/regulator/consumer.h> 16 17 #include <linux/spi/spi.h> 17 18 #include <video/mipi_display.h>
+1 -1
drivers/gpu/drm/panel/panel-novatek-nt35510.c
··· 26 26 #include <linux/bitops.h> 27 27 #include <linux/gpio/consumer.h> 28 28 #include <linux/module.h> 29 - #include <linux/of_device.h> 29 + #include <linux/of.h> 30 30 #include <linux/regmap.h> 31 31 #include <linux/regulator/consumer.h> 32 32
-1
drivers/gpu/drm/panel/panel-novatek-nt35560.c
··· 18 18 #include <linux/gpio/consumer.h> 19 19 #include <linux/module.h> 20 20 #include <linux/of.h> 21 - #include <linux/of_device.h> 22 21 #include <linux/regulator/consumer.h> 23 22 24 23 #include <video/mipi_display.h>
+1 -1
drivers/gpu/drm/panel/panel-novatek-nt35950.c
··· 8 8 #include <linux/delay.h> 9 9 #include <linux/gpio/consumer.h> 10 10 #include <linux/module.h> 11 - #include <linux/of_device.h> 11 + #include <linux/of.h> 12 12 #include <linux/of_graph.h> 13 13 #include <linux/regulator/consumer.h> 14 14
+1 -1
drivers/gpu/drm/panel/panel-novatek-nt36523.c
··· 9 9 #include <linux/delay.h> 10 10 #include <linux/gpio/consumer.h> 11 11 #include <linux/module.h> 12 - #include <linux/of_device.h> 12 + #include <linux/of.h> 13 13 #include <linux/of_graph.h> 14 14 #include <linux/regulator/consumer.h> 15 15
-1
drivers/gpu/drm/panel/panel-novatek-nt36672a.c
··· 16 16 #include <linux/kernel.h> 17 17 #include <linux/module.h> 18 18 #include <linux/of.h> 19 - #include <linux/of_device.h> 20 19 21 20 #include <linux/gpio/consumer.h> 22 21 #include <linux/pinctrl/consumer.h>
-1
drivers/gpu/drm/panel/panel-novatek-nt39016.c
··· 12 12 #include <linux/media-bus-format.h> 13 13 #include <linux/module.h> 14 14 #include <linux/of.h> 15 - #include <linux/of_device.h> 16 15 #include <linux/regmap.h> 17 16 #include <linux/regulator/consumer.h> 18 17 #include <linux/spi/spi.h>
-1
drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
··· 12 12 #include <linux/media-bus-format.h> 13 13 #include <linux/module.h> 14 14 #include <linux/of.h> 15 - #include <linux/of_device.h> 16 15 #include <linux/regmap.h> 17 16 #include <linux/regulator/consumer.h> 18 17 #include <linux/spi/spi.h>
-1
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
··· 47 47 #include <linux/media-bus-format.h> 48 48 #include <linux/module.h> 49 49 #include <linux/of.h> 50 - #include <linux/of_device.h> 51 50 #include <linux/of_graph.h> 52 51 #include <linux/pm.h> 53 52
+35 -6
drivers/gpu/drm/panel/panel-samsung-ld9040.c
··· 8 8 * Andrzej Hajda <a.hajda@samsung.com> 9 9 */ 10 10 11 + #include <linux/backlight.h> 11 12 #include <linux/delay.h> 12 13 #include <linux/gpio/consumer.h> 13 14 #include <linux/module.h> ··· 181 180 { 182 181 ld9040_dcs_write_seq_static(ctx, MCS_USER_SETTING, 0x5a, 0x5a); 183 182 ld9040_dcs_write_seq_static(ctx, MCS_PANEL_CONDITION, 184 - 0x05, 0x65, 0x96, 0x71, 0x7d, 0x19, 0x3b, 0x0d, 185 - 0x19, 0x7e, 0x0d, 0xe2, 0x00, 0x00, 0x7e, 0x7d, 186 - 0x07, 0x07, 0x20, 0x20, 0x20, 0x02, 0x02); 183 + 0x05, 0x5e, 0x96, 0x6b, 0x7d, 0x0d, 0x3f, 0x00, 184 + 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 185 + 0x07, 0x05, 0x1f, 0x1f, 0x1f, 0x00, 0x00); 187 186 ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL, 188 - 0x02, 0x08, 0x08, 0x10, 0x10); 187 + 0x02, 0x06, 0x0a, 0x10, 0x10); 189 188 ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04); 190 189 ld9040_dcs_write_seq_static(ctx, MCS_POWER_CTRL, 191 190 0x0a, 0x87, 0x25, 0x6a, 0x44, 0x02, 0x88); 192 - ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16); 191 + ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0f, 0x00, 0x16); 193 192 ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00); 194 193 ld9040_brightness_set(ctx); 195 194 ld9040_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE); ··· 311 310 return 0; 312 311 } 313 312 313 + static int ld9040_bl_update_status(struct backlight_device *dev) 314 + { 315 + struct ld9040 *ctx = bl_get_data(dev); 316 + 317 + ctx->brightness = backlight_get_brightness(dev); 318 + ld9040_brightness_set(ctx); 319 + 320 + return 0; 321 + } 322 + 323 + static const struct backlight_ops ld9040_bl_ops = { 324 + .update_status = ld9040_bl_update_status, 325 + }; 326 + 327 + static const struct backlight_properties ld9040_bl_props = { 328 + .type = BACKLIGHT_RAW, 329 + .scale = BACKLIGHT_SCALE_NON_LINEAR, 330 + .max_brightness = ARRAY_SIZE(ld9040_gammas) - 1, 331 + .brightness = ARRAY_SIZE(ld9040_gammas) - 1, 332 + }; 333 + 314 334 static int ld9040_probe(struct spi_device *spi) 315 335 { 336 + struct backlight_device *bldev; 316 337 struct device *dev = &spi->dev; 317 338 struct ld9040 *ctx; 318 339 int ret; ··· 346 323 spi_set_drvdata(spi, ctx); 347 324 348 325 ctx->dev = dev; 349 - ctx->brightness = ARRAY_SIZE(ld9040_gammas) - 1; 326 + ctx->brightness = ld9040_bl_props.brightness; 350 327 351 328 ret = ld9040_parse_dt(ctx); 352 329 if (ret < 0) ··· 375 352 376 353 drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs, 377 354 DRM_MODE_CONNECTOR_DPI); 355 + 356 + bldev = devm_backlight_device_register(dev, dev_name(dev), dev, 357 + ctx, &ld9040_bl_ops, 358 + &ld9040_bl_props); 359 + if (IS_ERR(bldev)) 360 + return PTR_ERR(bldev); 378 361 379 362 drm_panel_add(&ctx->panel); 380 363
+1 -1
drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
··· 11 11 #include <linux/gpio/consumer.h> 12 12 #include <linux/regulator/consumer.h> 13 13 #include <linux/delay.h> 14 - #include <linux/of_device.h> 14 + #include <linux/mod_devicetable.h> 15 15 #include <linux/module.h> 16 16 17 17 struct s6d16d0 {
-1
drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
··· 11 11 #include <linux/module.h> 12 12 #include <linux/regulator/consumer.h> 13 13 #include <linux/of.h> 14 - #include <linux/of_device.h> 15 14 16 15 #include <video/mipi_display.h> 17 16 #include <drm/drm_mipi_dsi.h>
+1 -1
drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
··· 12 12 #include <linux/delay.h> 13 13 #include <linux/gpio/consumer.h> 14 14 #include <linux/module.h> 15 - #include <linux/of_device.h> 15 + #include <linux/of.h> 16 16 #include <linux/regulator/consumer.h> 17 17 18 18 #include <drm/drm_mipi_dsi.h>
+1 -1
drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
··· 6 6 7 7 #include <linux/module.h> 8 8 #include <linux/delay.h> 9 - #include <linux/of_device.h> 9 + #include <linux/mod_devicetable.h> 10 10 11 11 #include <drm/drm_mipi_dsi.h> 12 12 #include <drm/drm_print.h>
-1
drivers/gpu/drm/panel/panel-samsung-sofef00.c
··· 8 8 #include <linux/gpio/consumer.h> 9 9 #include <linux/module.h> 10 10 #include <linux/of.h> 11 - #include <linux/of_device.h> 12 11 #include <linux/regulator/consumer.h> 13 12 #include <linux/backlight.h> 14 13
-4
drivers/gpu/drm/panel/panel-simple.c
··· 141 141 142 142 bool prepared; 143 143 144 - ktime_t prepared_time; 145 144 ktime_t unprepared_time; 146 145 147 146 const struct panel_desc *desc; ··· 349 350 350 351 if (p->desc->delay.prepare) 351 352 msleep(p->desc->delay.prepare); 352 - 353 - p->prepared_time = ktime_get_boottime(); 354 353 355 354 return 0; 356 355 } ··· 563 566 return -ENOMEM; 564 567 565 568 panel->enabled = false; 566 - panel->prepared_time = 0; 567 569 panel->desc = desc; 568 570 569 571 panel->supply = devm_regulator_get(dev, "power");
+1 -1
drivers/gpu/drm/panel/panel-sitronix-st7701.c
··· 12 12 #include <linux/gpio/consumer.h> 13 13 #include <linux/delay.h> 14 14 #include <linux/module.h> 15 - #include <linux/of_device.h> 15 + #include <linux/of.h> 16 16 #include <linux/regulator/consumer.h> 17 17 18 18 #include <video/mipi_display.h>
+1 -1
drivers/gpu/drm/panel/panel-sitronix-st7703.c
··· 13 13 #include <linux/media-bus-format.h> 14 14 #include <linux/mod_devicetable.h> 15 15 #include <linux/module.h> 16 - #include <linux/of_device.h> 16 + #include <linux/of.h> 17 17 #include <linux/regulator/consumer.h> 18 18 19 19 #include <video/display_timing.h>
-1
drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
··· 14 14 #include <linux/gpio/consumer.h> 15 15 #include <linux/module.h> 16 16 #include <linux/of.h> 17 - #include <linux/of_device.h> 18 17 #include <linux/regulator/consumer.h> 19 18 20 19 #include <video/mipi_display.h>
+1 -1
drivers/gpu/drm/panel/panel-truly-nt35597.c
··· 7 7 #include <linux/delay.h> 8 8 #include <linux/gpio/consumer.h> 9 9 #include <linux/module.h> 10 - #include <linux/of_device.h> 10 + #include <linux/of.h> 11 11 #include <linux/of_graph.h> 12 12 #include <linux/pinctrl/consumer.h> 13 13 #include <linux/regulator/consumer.h>
+1 -1
drivers/gpu/drm/panel/panel-visionox-rm69299.c
··· 5 5 6 6 #include <linux/delay.h> 7 7 #include <linux/module.h> 8 - #include <linux/of_device.h> 8 + #include <linux/mod_devicetable.h> 9 9 #include <linux/gpio/consumer.h> 10 10 #include <linux/regulator/consumer.h> 11 11
+2 -1
drivers/gpu/drm/panfrost/panfrost_drv.c
··· 4 4 /* Copyright 2019 Collabora ltd. */ 5 5 6 6 #include <linux/module.h> 7 - #include <linux/of_platform.h> 7 + #include <linux/of.h> 8 8 #include <linux/pagemap.h> 9 + #include <linux/platform_device.h> 9 10 #include <linux/pm_runtime.h> 10 11 #include <drm/panfrost_drm.h> 11 12 #include <drm/drm_drv.h>
+1
drivers/gpu/drm/pl111/pl111_versatile.c
··· 15 15 #include <linux/module.h> 16 16 #include <linux/of.h> 17 17 #include <linux/of_platform.h> 18 + #include <linux/platform_device.h> 18 19 #include <linux/regmap.h> 19 20 #include <linux/vexpress.h> 20 21
+1 -1
drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
··· 12 12 #include <linux/io.h> 13 13 #include <linux/mm.h> 14 14 #include <linux/module.h> 15 - #include <linux/of_device.h> 15 + #include <linux/of.h> 16 16 #include <linux/platform_device.h> 17 17 #include <linux/pm.h> 18 18 #include <linux/slab.h>
+2
drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
··· 20 20 21 21 #include <linux/device.h> 22 22 #include <linux/dma-buf.h> 23 + #include <linux/of.h> 23 24 #include <linux/of_graph.h> 24 25 #include <linux/of_platform.h> 26 + #include <linux/platform_device.h> 25 27 #include <linux/wait.h> 26 28 27 29 #include "rcar_du_crtc.h"
+1
drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c
··· 22 22 #include <linux/bitops.h> 23 23 #include <linux/dma-mapping.h> 24 24 #include <linux/of_platform.h> 25 + #include <linux/platform_device.h> 25 26 #include <linux/scatterlist.h> 26 27 #include <linux/slab.h> 27 28 #include <linux/videodev2.h>
-1
drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
··· 12 12 #include <linux/math64.h> 13 13 #include <linux/module.h> 14 14 #include <linux/of.h> 15 - #include <linux/of_device.h> 16 15 #include <linux/of_graph.h> 17 16 #include <linux/platform_device.h> 18 17 #include <linux/reset.h>
-1
drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c
··· 10 10 #include <linux/iopoll.h> 11 11 #include <linux/module.h> 12 12 #include <linux/of.h> 13 - #include <linux/of_device.h> 14 13 #include <linux/of_graph.h> 15 14 #include <linux/platform_device.h> 16 15 #include <linux/pm_runtime.h>
+2 -1
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
··· 10 10 11 11 #include <linux/component.h> 12 12 #include <linux/mfd/syscon.h> 13 - #include <linux/of_device.h> 13 + #include <linux/of.h> 14 14 #include <linux/of_graph.h> 15 + #include <linux/platform_device.h> 15 16 #include <linux/regmap.h> 16 17 #include <linux/reset.h> 17 18 #include <linux/clk.h>
+2
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
··· 12 12 #include <linux/mfd/syscon.h> 13 13 #include <linux/module.h> 14 14 #include <linux/of_device.h> 15 + #include <linux/of_platform.h> 15 16 #include <linux/phy/phy.h> 17 + #include <linux/platform_device.h> 16 18 #include <linux/pm_runtime.h> 17 19 #include <linux/regmap.h> 18 20
+2 -1
drivers/gpu/drm/rockchip/inno_hdmi.c
··· 11 11 #include <linux/err.h> 12 12 #include <linux/hdmi.h> 13 13 #include <linux/mfd/syscon.h> 14 + #include <linux/mod_devicetable.h> 14 15 #include <linux/module.h> 15 16 #include <linux/mutex.h> 16 - #include <linux/of_device.h> 17 + #include <linux/platform_device.h> 17 18 18 19 #include <drm/drm_atomic_helper.h> 19 20 #include <drm/drm_edid.h>
+1
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
··· 7 7 */ 8 8 9 9 #include <linux/dma-mapping.h> 10 + #include <linux/platform_device.h> 10 11 #include <linux/pm_runtime.h> 11 12 #include <linux/module.h> 12 13 #include <linux/of_graph.h>
-1
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
··· 12 12 #include <linux/log2.h> 13 13 #include <linux/module.h> 14 14 #include <linux/of.h> 15 - #include <linux/of_device.h> 16 15 #include <linux/overflow.h> 17 16 #include <linux/platform_device.h> 18 17 #include <linux/pm_runtime.h>
-1
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
··· 13 13 #include <linux/mfd/syscon.h> 14 14 #include <linux/module.h> 15 15 #include <linux/of.h> 16 - #include <linux/of_device.h> 17 16 #include <linux/of_graph.h> 18 17 #include <linux/platform_device.h> 19 18 #include <linux/pm_runtime.h>
+1 -1
drivers/gpu/drm/solomon/ssd130x.c
··· 153 153 const struct drm_format_info *fi; 154 154 unsigned int pitch; 155 155 156 - fi = drm_format_info(DRM_FORMAT_C1); 156 + fi = drm_format_info(DRM_FORMAT_R1); 157 157 if (!fi) 158 158 return -EINVAL; 159 159
+1 -3
drivers/gpu/drm/sprd/sprd_dpu.c
··· 9 9 #include <linux/io.h> 10 10 #include <linux/module.h> 11 11 #include <linux/of.h> 12 - #include <linux/of_address.h> 13 - #include <linux/of_device.h> 14 12 #include <linux/of_graph.h> 15 - #include <linux/of_irq.h> 13 + #include <linux/platform_device.h> 16 14 #include <linux/wait.h> 17 15 #include <linux/workqueue.h> 18 16
+2 -1
drivers/gpu/drm/sprd/sprd_drm.c
··· 5 5 6 6 #include <linux/component.h> 7 7 #include <linux/dma-mapping.h> 8 + #include <linux/mod_devicetable.h> 8 9 #include <linux/module.h> 9 10 #include <linux/mutex.h> 10 11 #include <linux/of_graph.h> 11 - #include <linux/of_platform.h> 12 + #include <linux/platform_device.h> 12 13 13 14 #include <drm/drm_atomic_helper.h> 14 15 #include <drm/drm_drv.h>
+2 -4
drivers/gpu/drm/sprd/sprd_dsi.c
··· 5 5 6 6 #include <linux/component.h> 7 7 #include <linux/module.h> 8 - #include <linux/of_address.h> 9 - #include <linux/of_device.h> 10 - #include <linux/of_irq.h> 11 - #include <linux/of_graph.h> 8 + #include <linux/of.h> 9 + #include <linux/platform_device.h> 12 10 #include <video/mipi_display.h> 13 11 14 12 #include <drm/drm_atomic_helper.h>
+2
drivers/gpu/drm/sti/sti_drv.c
··· 8 8 #include <linux/dma-mapping.h> 9 9 #include <linux/kernel.h> 10 10 #include <linux/module.h> 11 + #include <linux/of.h> 11 12 #include <linux/of_platform.h> 13 + #include <linux/platform_device.h> 12 14 13 15 #include <drm/drm_atomic.h> 14 16 #include <drm/drm_atomic_helper.h>
+2 -1
drivers/gpu/drm/stm/drv.c
··· 10 10 11 11 #include <linux/component.h> 12 12 #include <linux/dma-mapping.h> 13 + #include <linux/mod_devicetable.h> 13 14 #include <linux/module.h> 14 - #include <linux/of_platform.h> 15 + #include <linux/platform_device.h> 15 16 #include <linux/pm_runtime.h> 16 17 17 18 #include <drm/drm_aperture.h>
-1
drivers/gpu/drm/stm/ltdc.c
··· 14 14 #include <linux/interrupt.h> 15 15 #include <linux/media-bus-format.h> 16 16 #include <linux/module.h> 17 - #include <linux/of_address.h> 18 17 #include <linux/of_graph.h> 19 18 #include <linux/pinctrl/consumer.h> 20 19 #include <linux/platform_device.h>
+1 -1
drivers/gpu/drm/sun4i/sun4i_frontend.c
··· 7 7 #include <linux/clk.h> 8 8 #include <linux/component.h> 9 9 #include <linux/module.h> 10 - #include <linux/of_device.h> 10 + #include <linux/of.h> 11 11 #include <linux/platform_device.h> 12 12 #include <linux/pm_runtime.h> 13 13 #include <linux/regmap.h>
+1
drivers/gpu/drm/sun4i/sun4i_frontend.h
··· 8 8 #define _SUN4I_FRONTEND_H_ 9 9 10 10 #include <linux/list.h> 11 + #include <linux/mod_devicetable.h> 11 12 12 13 #define SUN4I_FRONTEND_EN_REG 0x000 13 14 #define SUN4I_FRONTEND_EN_EN BIT(0)
+1 -1
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
··· 10 10 #include <linux/i2c.h> 11 11 #include <linux/iopoll.h> 12 12 #include <linux/module.h> 13 - #include <linux/of_device.h> 13 + #include <linux/of.h> 14 14 #include <linux/platform_device.h> 15 15 #include <linux/pm_runtime.h> 16 16 #include <linux/regmap.h>
+3 -3
drivers/gpu/drm/sun4i/sun4i_tcon.c
··· 10 10 #include <linux/ioport.h> 11 11 #include <linux/media-bus-format.h> 12 12 #include <linux/module.h> 13 - #include <linux/of_address.h> 14 - #include <linux/of_device.h> 15 - #include <linux/of_irq.h> 13 + #include <linux/of.h> 14 + #include <linux/of_platform.h> 15 + #include <linux/platform_device.h> 16 16 #include <linux/regmap.h> 17 17 #include <linux/reset.h> 18 18
+1 -1
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
··· 5 5 6 6 #include <linux/component.h> 7 7 #include <linux/module.h> 8 - #include <linux/of_device.h> 8 + #include <linux/of.h> 9 9 #include <linux/platform_device.h> 10 10 11 11 #include <drm/drm_modeset_helper_vtables.h>
+2 -1
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
··· 4 4 */ 5 5 6 6 #include <linux/delay.h> 7 - #include <linux/of_address.h> 7 + #include <linux/of.h> 8 8 #include <linux/of_platform.h> 9 + #include <linux/platform_device.h> 9 10 10 11 #include "sun8i_dw_hdmi.h" 11 12
+2
drivers/gpu/drm/sun4i/sun8i_mixer.c
··· 10 10 #include <linux/component.h> 11 11 #include <linux/dma-mapping.h> 12 12 #include <linux/module.h> 13 + #include <linux/of.h> 13 14 #include <linux/of_device.h> 14 15 #include <linux/of_graph.h> 16 + #include <linux/platform_device.h> 15 17 #include <linux/reset.h> 16 18 17 19 #include <drm/drm_atomic_helper.h>
+1 -1
drivers/gpu/drm/sun4i/sun8i_tcon_top.c
··· 7 7 #include <linux/device.h> 8 8 #include <linux/io.h> 9 9 #include <linux/module.h> 10 - #include <linux/of_device.h> 10 + #include <linux/of.h> 11 11 #include <linux/of_graph.h> 12 12 #include <linux/platform_device.h> 13 13
+2 -1
drivers/gpu/drm/tegra/dc.c
··· 11 11 #include <linux/iommu.h> 12 12 #include <linux/interconnect.h> 13 13 #include <linux/module.h> 14 - #include <linux/of_device.h> 14 + #include <linux/of.h> 15 + #include <linux/platform_device.h> 15 16 #include <linux/pm_domain.h> 16 17 #include <linux/pm_opp.h> 17 18 #include <linux/pm_runtime.h>
+1 -1
drivers/gpu/drm/tegra/dpaux.c
··· 8 8 #include <linux/interrupt.h> 9 9 #include <linux/io.h> 10 10 #include <linux/module.h> 11 - #include <linux/of_device.h> 11 + #include <linux/of.h> 12 12 #include <linux/pinctrl/pinconf-generic.h> 13 13 #include <linux/pinctrl/pinctrl.h> 14 14 #include <linux/pinctrl/pinmux.h>
+2 -1
drivers/gpu/drm/tegra/gr2d.c
··· 7 7 #include <linux/delay.h> 8 8 #include <linux/iommu.h> 9 9 #include <linux/module.h> 10 - #include <linux/of_device.h> 10 + #include <linux/of.h> 11 + #include <linux/platform_device.h> 11 12 #include <linux/pm_runtime.h> 12 13 #include <linux/reset.h> 13 14
+1 -1
drivers/gpu/drm/tegra/gr3d.c
··· 9 9 #include <linux/host1x.h> 10 10 #include <linux/iommu.h> 11 11 #include <linux/module.h> 12 - #include <linux/of_device.h> 12 + #include <linux/of.h> 13 13 #include <linux/platform_device.h> 14 14 #include <linux/pm_domain.h> 15 15 #include <linux/pm_opp.h>
+2 -1
drivers/gpu/drm/tegra/hdmi.c
··· 10 10 #include <linux/hdmi.h> 11 11 #include <linux/math64.h> 12 12 #include <linux/module.h> 13 - #include <linux/of_device.h> 13 + #include <linux/of.h> 14 + #include <linux/platform_device.h> 14 15 #include <linux/pm_opp.h> 15 16 #include <linux/pm_runtime.h> 16 17 #include <linux/regulator/consumer.h>
+1 -1
drivers/gpu/drm/tegra/hub.c
··· 9 9 #include <linux/host1x.h> 10 10 #include <linux/module.h> 11 11 #include <linux/of.h> 12 - #include <linux/of_device.h> 13 12 #include <linux/of_graph.h> 13 + #include <linux/of_platform.h> 14 14 #include <linux/platform_device.h> 15 15 #include <linux/pm_runtime.h> 16 16 #include <linux/reset.h>
-2
drivers/gpu/drm/tegra/nvdec.c
··· 11 11 #include <linux/iopoll.h> 12 12 #include <linux/module.h> 13 13 #include <linux/of.h> 14 - #include <linux/of_device.h> 15 - #include <linux/of_platform.h> 16 14 #include <linux/platform_device.h> 17 15 #include <linux/pm_runtime.h> 18 16 #include <linux/reset.h>
+1 -1
drivers/gpu/drm/tegra/sor.c
··· 8 8 #include <linux/debugfs.h> 9 9 #include <linux/io.h> 10 10 #include <linux/module.h> 11 - #include <linux/of_device.h> 11 + #include <linux/of.h> 12 12 #include <linux/platform_device.h> 13 13 #include <linux/pm_runtime.h> 14 14 #include <linux/regulator/consumer.h>
-2
drivers/gpu/drm/tegra/vic.c
··· 10 10 #include <linux/iommu.h> 11 11 #include <linux/module.h> 12 12 #include <linux/of.h> 13 - #include <linux/of_device.h> 14 - #include <linux/of_platform.h> 15 13 #include <linux/platform_device.h> 16 14 #include <linux/pm_runtime.h> 17 15 #include <linux/reset.h>
-2
drivers/gpu/drm/tidss/tidss_dispc.c
··· 15 15 #include <linux/module.h> 16 16 #include <linux/mfd/syscon.h> 17 17 #include <linux/of.h> 18 - #include <linux/of_graph.h> 19 - #include <linux/of_device.h> 20 18 #include <linux/platform_device.h> 21 19 #include <linux/pm_runtime.h> 22 20 #include <linux/regmap.h>
+1 -1
drivers/gpu/drm/tidss/tidss_drv.c
··· 5 5 */ 6 6 7 7 #include <linux/console.h> 8 - #include <linux/of_device.h> 8 + #include <linux/of.h> 9 9 #include <linux/module.h> 10 10 #include <linux/pm_runtime.h> 11 11
+2 -1
drivers/gpu/drm/vc4/vc4_crtc.c
··· 31 31 32 32 #include <linux/clk.h> 33 33 #include <linux/component.h> 34 - #include <linux/of_device.h> 34 + #include <linux/of.h> 35 + #include <linux/platform_device.h> 35 36 #include <linux/pm_runtime.h> 36 37 37 38 #include <drm/drm_atomic.h>
+2 -2
drivers/gpu/drm/vc4/vc4_dpi.c
··· 22 22 #include <linux/clk.h> 23 23 #include <linux/component.h> 24 24 #include <linux/media-bus-format.h> 25 - #include <linux/of_graph.h> 26 - #include <linux/of_platform.h> 25 + #include <linux/mod_devicetable.h> 26 + #include <linux/platform_device.h> 27 27 #include "vc4_drv.h" 28 28 #include "vc4_regs.h" 29 29
+1 -1
drivers/gpu/drm/vc4/vc4_drv.c
··· 26 26 #include <linux/dma-mapping.h> 27 27 #include <linux/io.h> 28 28 #include <linux/module.h> 29 - #include <linux/of_platform.h> 29 + #include <linux/of_device.h> 30 30 #include <linux/platform_device.h> 31 31 #include <linux/pm_runtime.h> 32 32
+2 -1
drivers/gpu/drm/vc4/vc4_dsi.c
··· 25 25 #include <linux/dma-mapping.h> 26 26 #include <linux/dmaengine.h> 27 27 #include <linux/io.h> 28 + #include <linux/of.h> 28 29 #include <linux/of_address.h> 29 - #include <linux/of_platform.h> 30 + #include <linux/platform_device.h> 30 31 #include <linux/pm_runtime.h> 31 32 32 33 #include <drm/drm_atomic_helper.h>
+1 -1
drivers/gpu/drm/vc4/vc4_hdmi.c
··· 41 41 #include <linux/component.h> 42 42 #include <linux/gpio/consumer.h> 43 43 #include <linux/i2c.h> 44 + #include <linux/of.h> 44 45 #include <linux/of_address.h> 45 - #include <linux/of_platform.h> 46 46 #include <linux/pm_runtime.h> 47 47 #include <linux/rational.h> 48 48 #include <linux/reset.h>
+2 -2
drivers/gpu/drm/vc4/vc4_txp.c
··· 9 9 10 10 #include <linux/clk.h> 11 11 #include <linux/component.h> 12 - #include <linux/of_graph.h> 13 - #include <linux/of_platform.h> 12 + #include <linux/mod_devicetable.h> 13 + #include <linux/platform_device.h> 14 14 #include <linux/pm_runtime.h> 15 15 16 16 #include <drm/drm_atomic.h>
+2 -2
drivers/gpu/drm/vc4/vc4_vec.c
··· 21 21 #include <drm/drm_simple_kms_helper.h> 22 22 #include <linux/clk.h> 23 23 #include <linux/component.h> 24 - #include <linux/of_graph.h> 25 - #include <linux/of_platform.h> 24 + #include <linux/of.h> 25 + #include <linux/platform_device.h> 26 26 #include <linux/pm_runtime.h> 27 27 28 28 #include "vc4_drv.h"
-1
drivers/gpu/drm/xen/xen_drm_front.c
··· 11 11 #include <linux/delay.h> 12 12 #include <linux/dma-mapping.h> 13 13 #include <linux/module.h> 14 - #include <linux/of_device.h> 15 14 16 15 #include <drm/drm_atomic_helper.h> 17 16 #include <drm/drm_drv.h>
+6 -23
drivers/gpu/host1x/bus.c
··· 338 338 return strcmp(dev_name(dev), drv->name) == 0; 339 339 } 340 340 341 + /* 342 + * Note that this is really only needed for backwards compatibility 343 + * with libdrm, which parses this information from sysfs and will 344 + * fail if it can't find the OF_FULLNAME, specifically. 345 + */ 341 346 static int host1x_device_uevent(const struct device *dev, 342 347 struct kobj_uevent_env *env) 343 348 { 344 - struct device_node *np = dev->parent->of_node; 345 - unsigned int count = 0; 346 - struct property *p; 347 - const char *compat; 348 - 349 - /* 350 - * This duplicates most of of_device_uevent(), but the latter cannot 351 - * be called from modules and operates on dev->of_node, which is not 352 - * available in this case. 353 - * 354 - * Note that this is really only needed for backwards compatibility 355 - * with libdrm, which parses this information from sysfs and will 356 - * fail if it can't find the OF_FULLNAME, specifically. 357 - */ 358 - add_uevent_var(env, "OF_NAME=%pOFn", np); 359 - add_uevent_var(env, "OF_FULLNAME=%pOF", np); 360 - 361 - of_property_for_each_string(np, "compatible", p, compat) { 362 - add_uevent_var(env, "OF_COMPATIBLE_%u=%s", count, compat); 363 - count++; 364 - } 365 - 366 - add_uevent_var(env, "OF_COMPATIBLE_N=%u", count); 349 + of_device_uevent(dev->parent, env); 367 350 368 351 return 0; 369 352 }
+1 -1
drivers/gpu/host1x/context.c
··· 6 6 #include <linux/device.h> 7 7 #include <linux/kref.h> 8 8 #include <linux/of.h> 9 - #include <linux/of_platform.h> 9 + #include <linux/of_device.h> 10 10 #include <linux/pid.h> 11 11 #include <linux/slab.h> 12 12
+2 -1
drivers/gpu/host1x/dev.c
··· 11 11 #include <linux/io.h> 12 12 #include <linux/list.h> 13 13 #include <linux/module.h> 14 - #include <linux/of_device.h> 15 14 #include <linux/of.h> 15 + #include <linux/of_platform.h> 16 + #include <linux/platform_device.h> 16 17 #include <linux/pm_runtime.h> 17 18 #include <linux/slab.h> 18 19
+1 -1
drivers/gpu/ipu-v3/ipu-common.c
··· 18 18 #include <linux/irq.h> 19 19 #include <linux/irqchip/chained_irq.h> 20 20 #include <linux/irqdomain.h> 21 - #include <linux/of_device.h> 21 + #include <linux/of.h> 22 22 #include <linux/of_graph.h> 23 23 24 24 #include <drm/drm_fourcc.h>
+1
drivers/of/device.c
··· 312 312 } 313 313 mutex_unlock(&of_mutex); 314 314 } 315 + EXPORT_SYMBOL_GPL(of_device_uevent); 315 316 316 317 int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) 317 318 {
+25
include/drm/drm_debugfs.h
··· 34 34 35 35 #include <linux/types.h> 36 36 #include <linux/seq_file.h> 37 + 38 + #include <drm/drm_gpuva_mgr.h> 39 + 40 + /** 41 + * DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space 42 + * @show: the &drm_info_list's show callback 43 + * @data: driver private data 44 + * 45 + * Drivers should use this macro to define a &drm_info_list entry to provide a 46 + * debugfs file for dumping the GPU VA space regions and mappings. 47 + * 48 + * For each DRM GPU VA space drivers should call drm_debugfs_gpuva_info() from 49 + * their @show callback. 50 + */ 51 + #define DRM_DEBUGFS_GPUVA_INFO(show, data) {"gpuvas", show, DRIVER_GEM_GPUVA, data} 52 + 37 53 /** 38 54 * struct drm_info_list - debugfs info list entry 39 55 * ··· 150 134 151 135 void drm_debugfs_add_files(struct drm_device *dev, 152 136 const struct drm_debugfs_info *files, int count); 137 + 138 + int drm_debugfs_gpuva_info(struct seq_file *m, 139 + struct drm_gpuva_manager *mgr); 153 140 #else 154 141 static inline void drm_debugfs_create_files(const struct drm_info_list *files, 155 142 int count, struct dentry *root, ··· 174 155 const struct drm_debugfs_info *files, 175 156 int count) 176 157 {} 158 + 159 + static inline int drm_debugfs_gpuva_info(struct seq_file *m, 160 + struct drm_gpuva_manager *mgr) 161 + { 162 + return 0; 163 + } 177 164 #endif 178 165 179 166 #endif /* _DRM_DEBUGFS_H_ */
+6
include/drm/drm_drv.h
··· 104 104 * acceleration should be handled by two drivers that are connected using auxiliary bus. 105 105 */ 106 106 DRIVER_COMPUTE_ACCEL = BIT(7), 107 + /** 108 + * @DRIVER_GEM_GPUVA: 109 + * 110 + * Driver supports user defined GPU VA bindings for GEM objects. 111 + */ 112 + DRIVER_GEM_GPUVA = BIT(8), 107 113 108 114 /* IMPORTANT: Below are all the legacy flags, add new ones above. */ 109 115
+4 -4
include/drm/drm_file.h
··· 50 50 * header include loops we need it here for now. 51 51 */ 52 52 53 - /* Note that the order of this enum is ABI (it determines 53 + /* Note that the values of this enum are ABI (it determines 54 54 * /dev/dri/renderD* numbers). 55 55 * 56 56 * Setting DRM_MINOR_ACCEL to 32 gives enough space for more drm minors to 57 57 * be implemented before we hit any future 58 58 */ 59 59 enum drm_minor_type { 60 - DRM_MINOR_PRIMARY, 61 - DRM_MINOR_CONTROL, 62 - DRM_MINOR_RENDER, 60 + DRM_MINOR_PRIMARY = 0, 61 + DRM_MINOR_CONTROL = 1, 62 + DRM_MINOR_RENDER = 2, 63 63 DRM_MINOR_ACCEL = 32, 64 64 }; 65 65
+79
include/drm/drm_gem.h
··· 36 36 37 37 #include <linux/kref.h> 38 38 #include <linux/dma-resv.h> 39 + #include <linux/list.h> 40 + #include <linux/mutex.h> 39 41 40 42 #include <drm/drm_vma_manager.h> 41 43 ··· 382 380 struct dma_resv _resv; 383 381 384 382 /** 383 + * @gpuva: 384 + * 385 + * Provides the list of GPU VAs attached to this GEM object. 386 + * 387 + * Drivers should lock list accesses with the GEMs &dma_resv lock 388 + * (&drm_gem_object.resv) or a custom lock if one is provided. 389 + */ 390 + struct { 391 + struct list_head list; 392 + 393 + #ifdef CONFIG_LOCKDEP 394 + struct lockdep_map *lock_dep_map; 395 + #endif 396 + } gpuva; 397 + 398 + /** 385 399 * @funcs: 386 400 * 387 401 * Optional GEM object functions. If this is set, it will be used instead of the ··· 543 525 bool (*shrink)(struct drm_gem_object *obj)); 544 526 545 527 int drm_gem_evict(struct drm_gem_object *obj); 528 + 529 + #ifdef CONFIG_LOCKDEP 530 + /** 531 + * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list. 532 + * @obj: the &drm_gem_object 533 + * @lock: the lock used to protect the gpuva list. The locking primitive 534 + * must contain a dep_map field. 535 + * 536 + * Call this if you're not proctecting access to the gpuva list 537 + * with the dma-resv lock, otherwise, drm_gem_gpuva_init() takes care 538 + * of initializing lock_dep_map for you. 539 + */ 540 + #define drm_gem_gpuva_set_lock(obj, lock) \ 541 + if (!(obj)->gpuva.lock_dep_map) \ 542 + (obj)->gpuva.lock_dep_map = &(lock)->dep_map 543 + #define drm_gem_gpuva_assert_lock_held(obj) \ 544 + lockdep_assert(lock_is_held((obj)->gpuva.lock_dep_map)) 545 + #else 546 + #define drm_gem_gpuva_set_lock(obj, lock) do {} while (0) 547 + #define drm_gem_gpuva_assert_lock_held(obj) do {} while (0) 548 + #endif 549 + 550 + /** 551 + * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object 552 + * @obj: the &drm_gem_object 553 + * 554 + * This initializes the &drm_gem_object's &drm_gpuva list. 555 + * 556 + * Calling this function is only necessary for drivers intending to support the 557 + * &drm_driver_feature DRIVER_GEM_GPUVA. 558 + */ 559 + static inline void drm_gem_gpuva_init(struct drm_gem_object *obj) 560 + { 561 + INIT_LIST_HEAD(&obj->gpuva.list); 562 + drm_gem_gpuva_set_lock(obj, &obj->resv->lock.base); 563 + } 564 + 565 + /** 566 + * drm_gem_for_each_gpuva() - iternator to walk over a list of gpuvas 567 + * @entry__: &drm_gpuva structure to assign to in each iteration step 568 + * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with 569 + * 570 + * This iterator walks over all &drm_gpuva structures associated with the 571 + * &drm_gpuva_manager. 572 + */ 573 + #define drm_gem_for_each_gpuva(entry__, obj__) \ 574 + list_for_each_entry(entry__, &(obj__)->gpuva.list, gem.entry) 575 + 576 + /** 577 + * drm_gem_for_each_gpuva_safe() - iternator to safely walk over a list of 578 + * gpuvas 579 + * @entry__: &drm_gpuva structure to assign to in each iteration step 580 + * @next__: &next &drm_gpuva to store the next step 581 + * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with 582 + * 583 + * This iterator walks over all &drm_gpuva structures associated with the 584 + * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence 585 + * it is save against removal of elements. 586 + */ 587 + #define drm_gem_for_each_gpuva_safe(entry__, next__, obj__) \ 588 + list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, gem.entry) 546 589 547 590 #endif /* __DRM_GEM_H__ */
+706
include/drm/drm_gpuva_mgr.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + 3 + #ifndef __DRM_GPUVA_MGR_H__ 4 + #define __DRM_GPUVA_MGR_H__ 5 + 6 + /* 7 + * Copyright (c) 2022 Red Hat. 8 + * 9 + * Permission is hereby granted, free of charge, to any person obtaining a 10 + * copy of this software and associated documentation files (the "Software"), 11 + * to deal in the Software without restriction, including without limitation 12 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 + * and/or sell copies of the Software, and to permit persons to whom the 14 + * Software is furnished to do so, subject to the following conditions: 15 + * 16 + * The above copyright notice and this permission notice shall be included in 17 + * all copies or substantial portions of the Software. 18 + * 19 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 + * OTHER DEALINGS IN THE SOFTWARE. 26 + */ 27 + 28 + #include <linux/list.h> 29 + #include <linux/rbtree.h> 30 + #include <linux/types.h> 31 + 32 + #include <drm/drm_gem.h> 33 + 34 + struct drm_gpuva_manager; 35 + struct drm_gpuva_fn_ops; 36 + 37 + /** 38 + * enum drm_gpuva_flags - flags for struct drm_gpuva 39 + */ 40 + enum drm_gpuva_flags { 41 + /** 42 + * @DRM_GPUVA_INVALIDATED: 43 + * 44 + * Flag indicating that the &drm_gpuva's backing GEM is invalidated. 45 + */ 46 + DRM_GPUVA_INVALIDATED = (1 << 0), 47 + 48 + /** 49 + * @DRM_GPUVA_SPARSE: 50 + * 51 + * Flag indicating that the &drm_gpuva is a sparse mapping. 52 + */ 53 + DRM_GPUVA_SPARSE = (1 << 1), 54 + 55 + /** 56 + * @DRM_GPUVA_USERBITS: user defined bits 57 + */ 58 + DRM_GPUVA_USERBITS = (1 << 2), 59 + }; 60 + 61 + /** 62 + * struct drm_gpuva - structure to track a GPU VA mapping 63 + * 64 + * This structure represents a GPU VA mapping and is associated with a 65 + * &drm_gpuva_manager. 66 + * 67 + * Typically, this structure is embedded in bigger driver structures. 68 + */ 69 + struct drm_gpuva { 70 + /** 71 + * @mgr: the &drm_gpuva_manager this object is associated with 72 + */ 73 + struct drm_gpuva_manager *mgr; 74 + 75 + /** 76 + * @flags: the &drm_gpuva_flags for this mapping 77 + */ 78 + enum drm_gpuva_flags flags; 79 + 80 + /** 81 + * @va: structure containing the address and range of the &drm_gpuva 82 + */ 83 + struct { 84 + /** 85 + * @addr: the start address 86 + */ 87 + u64 addr; 88 + 89 + /* 90 + * @range: the range 91 + */ 92 + u64 range; 93 + } va; 94 + 95 + /** 96 + * @gem: structure containing the &drm_gem_object and it's offset 97 + */ 98 + struct { 99 + /** 100 + * @offset: the offset within the &drm_gem_object 101 + */ 102 + u64 offset; 103 + 104 + /** 105 + * @obj: the mapped &drm_gem_object 106 + */ 107 + struct drm_gem_object *obj; 108 + 109 + /** 110 + * @entry: the &list_head to attach this object to a &drm_gem_object 111 + */ 112 + struct list_head entry; 113 + } gem; 114 + 115 + /** 116 + * @rb: structure containing data to store &drm_gpuvas in a rb-tree 117 + */ 118 + struct { 119 + /** 120 + * @rb: the rb-tree node 121 + */ 122 + struct rb_node node; 123 + 124 + /** 125 + * @entry: The &list_head to additionally connect &drm_gpuvas 126 + * in the same order they appear in the interval tree. This is 127 + * useful to keep iterating &drm_gpuvas from a start node found 128 + * through the rb-tree while doing modifications on the rb-tree 129 + * itself. 130 + */ 131 + struct list_head entry; 132 + 133 + /** 134 + * @__subtree_last: needed by the interval tree, holding last-in-subtree 135 + */ 136 + u64 __subtree_last; 137 + } rb; 138 + }; 139 + 140 + int drm_gpuva_insert(struct drm_gpuva_manager *mgr, struct drm_gpuva *va); 141 + void drm_gpuva_remove(struct drm_gpuva *va); 142 + 143 + void drm_gpuva_link(struct drm_gpuva *va); 144 + void drm_gpuva_unlink(struct drm_gpuva *va); 145 + 146 + struct drm_gpuva *drm_gpuva_find(struct drm_gpuva_manager *mgr, 147 + u64 addr, u64 range); 148 + struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuva_manager *mgr, 149 + u64 addr, u64 range); 150 + struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start); 151 + struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end); 152 + 153 + bool drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range); 154 + 155 + static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range, 156 + struct drm_gem_object *obj, u64 offset) 157 + { 158 + va->va.addr = addr; 159 + va->va.range = range; 160 + va->gem.obj = obj; 161 + va->gem.offset = offset; 162 + } 163 + 164 + /** 165 + * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is 166 + * invalidated 167 + * @va: the &drm_gpuva to set the invalidate flag for 168 + * @invalidate: indicates whether the &drm_gpuva is invalidated 169 + */ 170 + static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate) 171 + { 172 + if (invalidate) 173 + va->flags |= DRM_GPUVA_INVALIDATED; 174 + else 175 + va->flags &= ~DRM_GPUVA_INVALIDATED; 176 + } 177 + 178 + /** 179 + * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva 180 + * is invalidated 181 + * @va: the &drm_gpuva to check 182 + */ 183 + static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) 184 + { 185 + return va->flags & DRM_GPUVA_INVALIDATED; 186 + } 187 + 188 + /** 189 + * struct drm_gpuva_manager - DRM GPU VA Manager 190 + * 191 + * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using 192 + * &maple_tree structures. Typically, this structure is embedded in bigger 193 + * driver structures. 194 + * 195 + * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or 196 + * pages. 197 + * 198 + * There should be one manager instance per GPU virtual address space. 199 + */ 200 + struct drm_gpuva_manager { 201 + /** 202 + * @name: the name of the DRM GPU VA space 203 + */ 204 + const char *name; 205 + 206 + /** 207 + * @mm_start: start of the VA space 208 + */ 209 + u64 mm_start; 210 + 211 + /** 212 + * @mm_range: length of the VA space 213 + */ 214 + u64 mm_range; 215 + 216 + /** 217 + * @rb: structures to track &drm_gpuva entries 218 + */ 219 + struct { 220 + /** 221 + * @tree: the rb-tree to track GPU VA mappings 222 + */ 223 + struct rb_root_cached tree; 224 + 225 + /** 226 + * @list: the &list_head to track GPU VA mappings 227 + */ 228 + struct list_head list; 229 + } rb; 230 + 231 + /** 232 + * @kernel_alloc_node: 233 + * 234 + * &drm_gpuva representing the address space cutout reserved for 235 + * the kernel 236 + */ 237 + struct drm_gpuva kernel_alloc_node; 238 + 239 + /** 240 + * @ops: &drm_gpuva_fn_ops providing the split/merge steps to drivers 241 + */ 242 + const struct drm_gpuva_fn_ops *ops; 243 + }; 244 + 245 + void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr, 246 + const char *name, 247 + u64 start_offset, u64 range, 248 + u64 reserve_offset, u64 reserve_range, 249 + const struct drm_gpuva_fn_ops *ops); 250 + void drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr); 251 + 252 + static inline struct drm_gpuva * 253 + __drm_gpuva_next(struct drm_gpuva *va) 254 + { 255 + if (va && !list_is_last(&va->rb.entry, &va->mgr->rb.list)) 256 + return list_next_entry(va, rb.entry); 257 + 258 + return NULL; 259 + } 260 + 261 + /** 262 + * drm_gpuva_for_each_va_range() - iterate over a range of &drm_gpuvas 263 + * @va__: &drm_gpuva structure to assign to in each iteration step 264 + * @mgr__: &drm_gpuva_manager to walk over 265 + * @start__: starting offset, the first gpuva will overlap this 266 + * @end__: ending offset, the last gpuva will start before this (but may 267 + * overlap) 268 + * 269 + * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie 270 + * between @start__ and @end__. It is implemented similarly to list_for_each(), 271 + * but is using the &drm_gpuva_manager's internal interval tree to accelerate 272 + * the search for the starting &drm_gpuva, and hence isn't safe against removal 273 + * of elements. It assumes that @end__ is within (or is the upper limit of) the 274 + * &drm_gpuva_manager. This iterator does not skip over the &drm_gpuva_manager's 275 + * @kernel_alloc_node. 276 + */ 277 + #define drm_gpuva_for_each_va_range(va__, mgr__, start__, end__) \ 278 + for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)); \ 279 + va__ && (va__->va.addr < (end__)); \ 280 + va__ = __drm_gpuva_next(va__)) 281 + 282 + /** 283 + * drm_gpuva_for_each_va_range_safe() - safely iterate over a range of 284 + * &drm_gpuvas 285 + * @va__: &drm_gpuva to assign to in each iteration step 286 + * @next__: another &drm_gpuva to use as temporary storage 287 + * @mgr__: &drm_gpuva_manager to walk over 288 + * @start__: starting offset, the first gpuva will overlap this 289 + * @end__: ending offset, the last gpuva will start before this (but may 290 + * overlap) 291 + * 292 + * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie 293 + * between @start__ and @end__. It is implemented similarly to 294 + * list_for_each_safe(), but is using the &drm_gpuva_manager's internal interval 295 + * tree to accelerate the search for the starting &drm_gpuva, and hence is safe 296 + * against removal of elements. It assumes that @end__ is within (or is the 297 + * upper limit of) the &drm_gpuva_manager. This iterator does not skip over the 298 + * &drm_gpuva_manager's @kernel_alloc_node. 299 + */ 300 + #define drm_gpuva_for_each_va_range_safe(va__, next__, mgr__, start__, end__) \ 301 + for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)), \ 302 + next__ = __drm_gpuva_next(va__); \ 303 + va__ && (va__->va.addr < (end__)); \ 304 + va__ = next__, next__ = __drm_gpuva_next(va__)) 305 + 306 + /** 307 + * drm_gpuva_for_each_va() - iterate over all &drm_gpuvas 308 + * @va__: &drm_gpuva to assign to in each iteration step 309 + * @mgr__: &drm_gpuva_manager to walk over 310 + * 311 + * This iterator walks over all &drm_gpuva structures associated with the given 312 + * &drm_gpuva_manager. 313 + */ 314 + #define drm_gpuva_for_each_va(va__, mgr__) \ 315 + list_for_each_entry(va__, &(mgr__)->rb.list, rb.entry) 316 + 317 + /** 318 + * drm_gpuva_for_each_va_safe() - safely iterate over all &drm_gpuvas 319 + * @va__: &drm_gpuva to assign to in each iteration step 320 + * @next__: another &drm_gpuva to use as temporary storage 321 + * @mgr__: &drm_gpuva_manager to walk over 322 + * 323 + * This iterator walks over all &drm_gpuva structures associated with the given 324 + * &drm_gpuva_manager. It is implemented with list_for_each_entry_safe(), and 325 + * hence safe against the removal of elements. 326 + */ 327 + #define drm_gpuva_for_each_va_safe(va__, next__, mgr__) \ 328 + list_for_each_entry_safe(va__, next__, &(mgr__)->rb.list, rb.entry) 329 + 330 + /** 331 + * enum drm_gpuva_op_type - GPU VA operation type 332 + * 333 + * Operations to alter the GPU VA mappings tracked by the &drm_gpuva_manager. 334 + */ 335 + enum drm_gpuva_op_type { 336 + /** 337 + * @DRM_GPUVA_OP_MAP: the map op type 338 + */ 339 + DRM_GPUVA_OP_MAP, 340 + 341 + /** 342 + * @DRM_GPUVA_OP_REMAP: the remap op type 343 + */ 344 + DRM_GPUVA_OP_REMAP, 345 + 346 + /** 347 + * @DRM_GPUVA_OP_UNMAP: the unmap op type 348 + */ 349 + DRM_GPUVA_OP_UNMAP, 350 + 351 + /** 352 + * @DRM_GPUVA_OP_PREFETCH: the prefetch op type 353 + */ 354 + DRM_GPUVA_OP_PREFETCH, 355 + }; 356 + 357 + /** 358 + * struct drm_gpuva_op_map - GPU VA map operation 359 + * 360 + * This structure represents a single map operation generated by the 361 + * DRM GPU VA manager. 362 + */ 363 + struct drm_gpuva_op_map { 364 + /** 365 + * @va: structure containing address and range of a map 366 + * operation 367 + */ 368 + struct { 369 + /** 370 + * @addr: the base address of the new mapping 371 + */ 372 + u64 addr; 373 + 374 + /** 375 + * @range: the range of the new mapping 376 + */ 377 + u64 range; 378 + } va; 379 + 380 + /** 381 + * @gem: structure containing the &drm_gem_object and it's offset 382 + */ 383 + struct { 384 + /** 385 + * @offset: the offset within the &drm_gem_object 386 + */ 387 + u64 offset; 388 + 389 + /** 390 + * @obj: the &drm_gem_object to map 391 + */ 392 + struct drm_gem_object *obj; 393 + } gem; 394 + }; 395 + 396 + /** 397 + * struct drm_gpuva_op_unmap - GPU VA unmap operation 398 + * 399 + * This structure represents a single unmap operation generated by the 400 + * DRM GPU VA manager. 401 + */ 402 + struct drm_gpuva_op_unmap { 403 + /** 404 + * @va: the &drm_gpuva to unmap 405 + */ 406 + struct drm_gpuva *va; 407 + 408 + /** 409 + * @keep: 410 + * 411 + * Indicates whether this &drm_gpuva is physically contiguous with the 412 + * original mapping request. 413 + * 414 + * Optionally, if &keep is set, drivers may keep the actual page table 415 + * mappings for this &drm_gpuva, adding the missing page table entries 416 + * only and update the &drm_gpuva_manager accordingly. 417 + */ 418 + bool keep; 419 + }; 420 + 421 + /** 422 + * struct drm_gpuva_op_remap - GPU VA remap operation 423 + * 424 + * This represents a single remap operation generated by the DRM GPU VA manager. 425 + * 426 + * A remap operation is generated when an existing GPU VA mmapping is split up 427 + * by inserting a new GPU VA mapping or by partially unmapping existent 428 + * mapping(s), hence it consists of a maximum of two map and one unmap 429 + * operation. 430 + * 431 + * The @unmap operation takes care of removing the original existing mapping. 432 + * @prev is used to remap the preceding part, @next the subsequent part. 433 + * 434 + * If either a new mapping's start address is aligned with the start address 435 + * of the old mapping or the new mapping's end address is aligned with the 436 + * end address of the old mapping, either @prev or @next is NULL. 437 + * 438 + * Note, the reason for a dedicated remap operation, rather than arbitrary 439 + * unmap and map operations, is to give drivers the chance of extracting driver 440 + * specific data for creating the new mappings from the unmap operations's 441 + * &drm_gpuva structure which typically is embedded in larger driver specific 442 + * structures. 443 + */ 444 + struct drm_gpuva_op_remap { 445 + /** 446 + * @prev: the preceding part of a split mapping 447 + */ 448 + struct drm_gpuva_op_map *prev; 449 + 450 + /** 451 + * @next: the subsequent part of a split mapping 452 + */ 453 + struct drm_gpuva_op_map *next; 454 + 455 + /** 456 + * @unmap: the unmap operation for the original existing mapping 457 + */ 458 + struct drm_gpuva_op_unmap *unmap; 459 + }; 460 + 461 + /** 462 + * struct drm_gpuva_op_prefetch - GPU VA prefetch operation 463 + * 464 + * This structure represents a single prefetch operation generated by the 465 + * DRM GPU VA manager. 466 + */ 467 + struct drm_gpuva_op_prefetch { 468 + /** 469 + * @va: the &drm_gpuva to prefetch 470 + */ 471 + struct drm_gpuva *va; 472 + }; 473 + 474 + /** 475 + * struct drm_gpuva_op - GPU VA operation 476 + * 477 + * This structure represents a single generic operation. 478 + * 479 + * The particular type of the operation is defined by @op. 480 + */ 481 + struct drm_gpuva_op { 482 + /** 483 + * @entry: 484 + * 485 + * The &list_head used to distribute instances of this struct within 486 + * &drm_gpuva_ops. 487 + */ 488 + struct list_head entry; 489 + 490 + /** 491 + * @op: the type of the operation 492 + */ 493 + enum drm_gpuva_op_type op; 494 + 495 + union { 496 + /** 497 + * @map: the map operation 498 + */ 499 + struct drm_gpuva_op_map map; 500 + 501 + /** 502 + * @remap: the remap operation 503 + */ 504 + struct drm_gpuva_op_remap remap; 505 + 506 + /** 507 + * @unmap: the unmap operation 508 + */ 509 + struct drm_gpuva_op_unmap unmap; 510 + 511 + /** 512 + * @prefetch: the prefetch operation 513 + */ 514 + struct drm_gpuva_op_prefetch prefetch; 515 + }; 516 + }; 517 + 518 + /** 519 + * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op 520 + */ 521 + struct drm_gpuva_ops { 522 + /** 523 + * @list: the &list_head 524 + */ 525 + struct list_head list; 526 + }; 527 + 528 + /** 529 + * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops 530 + * @op: &drm_gpuva_op to assign in each iteration step 531 + * @ops: &drm_gpuva_ops to walk 532 + * 533 + * This iterator walks over all ops within a given list of operations. 534 + */ 535 + #define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry) 536 + 537 + /** 538 + * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops 539 + * @op: &drm_gpuva_op to assign in each iteration step 540 + * @next: &next &drm_gpuva_op to store the next step 541 + * @ops: &drm_gpuva_ops to walk 542 + * 543 + * This iterator walks over all ops within a given list of operations. It is 544 + * implemented with list_for_each_safe(), so save against removal of elements. 545 + */ 546 + #define drm_gpuva_for_each_op_safe(op, next, ops) \ 547 + list_for_each_entry_safe(op, next, &(ops)->list, entry) 548 + 549 + /** 550 + * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point 551 + * @op: &drm_gpuva_op to assign in each iteration step 552 + * @ops: &drm_gpuva_ops to walk 553 + * 554 + * This iterator walks over all ops within a given list of operations beginning 555 + * from the given operation in reverse order. 556 + */ 557 + #define drm_gpuva_for_each_op_from_reverse(op, ops) \ 558 + list_for_each_entry_from_reverse(op, &(ops)->list, entry) 559 + 560 + /** 561 + * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops 562 + * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from 563 + */ 564 + #define drm_gpuva_first_op(ops) \ 565 + list_first_entry(&(ops)->list, struct drm_gpuva_op, entry) 566 + 567 + /** 568 + * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops 569 + * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from 570 + */ 571 + #define drm_gpuva_last_op(ops) \ 572 + list_last_entry(&(ops)->list, struct drm_gpuva_op, entry) 573 + 574 + /** 575 + * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list 576 + * @op: the current &drm_gpuva_op 577 + */ 578 + #define drm_gpuva_prev_op(op) list_prev_entry(op, entry) 579 + 580 + /** 581 + * drm_gpuva_next_op() - next &drm_gpuva_op in the list 582 + * @op: the current &drm_gpuva_op 583 + */ 584 + #define drm_gpuva_next_op(op) list_next_entry(op, entry) 585 + 586 + struct drm_gpuva_ops * 587 + drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr, 588 + u64 addr, u64 range, 589 + struct drm_gem_object *obj, u64 offset); 590 + struct drm_gpuva_ops * 591 + drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr, 592 + u64 addr, u64 range); 593 + 594 + struct drm_gpuva_ops * 595 + drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr, 596 + u64 addr, u64 range); 597 + 598 + struct drm_gpuva_ops * 599 + drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr, 600 + struct drm_gem_object *obj); 601 + 602 + void drm_gpuva_ops_free(struct drm_gpuva_manager *mgr, 603 + struct drm_gpuva_ops *ops); 604 + 605 + static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, 606 + struct drm_gpuva_op_map *op) 607 + { 608 + drm_gpuva_init(va, op->va.addr, op->va.range, 609 + op->gem.obj, op->gem.offset); 610 + } 611 + 612 + /** 613 + * struct drm_gpuva_fn_ops - callbacks for split/merge steps 614 + * 615 + * This structure defines the callbacks used by &drm_gpuva_sm_map and 616 + * &drm_gpuva_sm_unmap to provide the split/merge steps for map and unmap 617 + * operations to drivers. 618 + */ 619 + struct drm_gpuva_fn_ops { 620 + /** 621 + * @op_alloc: called when the &drm_gpuva_manager allocates 622 + * a struct drm_gpuva_op 623 + * 624 + * Some drivers may want to embed struct drm_gpuva_op into driver 625 + * specific structures. By implementing this callback drivers can 626 + * allocate memory accordingly. 627 + * 628 + * This callback is optional. 629 + */ 630 + struct drm_gpuva_op *(*op_alloc)(void); 631 + 632 + /** 633 + * @op_free: called when the &drm_gpuva_manager frees a 634 + * struct drm_gpuva_op 635 + * 636 + * Some drivers may want to embed struct drm_gpuva_op into driver 637 + * specific structures. By implementing this callback drivers can 638 + * free the previously allocated memory accordingly. 639 + * 640 + * This callback is optional. 641 + */ 642 + void (*op_free)(struct drm_gpuva_op *op); 643 + 644 + /** 645 + * @sm_step_map: called from &drm_gpuva_sm_map to finally insert the 646 + * mapping once all previous steps were completed 647 + * 648 + * The &priv pointer matches the one the driver passed to 649 + * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively. 650 + * 651 + * Can be NULL if &drm_gpuva_sm_map is used. 652 + */ 653 + int (*sm_step_map)(struct drm_gpuva_op *op, void *priv); 654 + 655 + /** 656 + * @sm_step_remap: called from &drm_gpuva_sm_map and 657 + * &drm_gpuva_sm_unmap to split up an existent mapping 658 + * 659 + * This callback is called when existent mapping needs to be split up. 660 + * This is the case when either a newly requested mapping overlaps or 661 + * is enclosed by an existent mapping or a partial unmap of an existent 662 + * mapping is requested. 663 + * 664 + * The &priv pointer matches the one the driver passed to 665 + * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively. 666 + * 667 + * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is 668 + * used. 669 + */ 670 + int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv); 671 + 672 + /** 673 + * @sm_step_unmap: called from &drm_gpuva_sm_map and 674 + * &drm_gpuva_sm_unmap to unmap an existent mapping 675 + * 676 + * This callback is called when existent mapping needs to be unmapped. 677 + * This is the case when either a newly requested mapping encloses an 678 + * existent mapping or an unmap of an existent mapping is requested. 679 + * 680 + * The &priv pointer matches the one the driver passed to 681 + * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively. 682 + * 683 + * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is 684 + * used. 685 + */ 686 + int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv); 687 + }; 688 + 689 + int drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv, 690 + u64 addr, u64 range, 691 + struct drm_gem_object *obj, u64 offset); 692 + 693 + int drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv, 694 + u64 addr, u64 range); 695 + 696 + void drm_gpuva_map(struct drm_gpuva_manager *mgr, 697 + struct drm_gpuva *va, 698 + struct drm_gpuva_op_map *op); 699 + 700 + void drm_gpuva_remap(struct drm_gpuva *prev, 701 + struct drm_gpuva *next, 702 + struct drm_gpuva_op_remap *op); 703 + 704 + void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op); 705 + 706 + #endif /* __DRM_GPUVA_MGR_H__ */
+5 -1
include/drm/drm_syncobj.h
··· 54 54 */ 55 55 struct list_head cb_list; 56 56 /** 57 - * @lock: Protects &cb_list and write-locks &fence. 57 + * @ev_fd_list: List of registered eventfd. 58 + */ 59 + struct list_head ev_fd_list; 60 + /** 61 + * @lock: Protects &cb_list and &ev_fd_list, and write-locks &fence. 58 62 */ 59 63 spinlock_t lock; 60 64 /**
+23
include/uapi/drm/drm.h
··· 909 909 __u32 pad; 910 910 }; 911 911 912 + /** 913 + * struct drm_syncobj_eventfd 914 + * @handle: syncobj handle. 915 + * @flags: Zero to wait for the point to be signalled, or 916 + * &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be 917 + * available for the point. 918 + * @point: syncobj timeline point (set to zero for binary syncobjs). 919 + * @fd: Existing eventfd to sent events to. 920 + * @pad: Must be zero. 921 + * 922 + * Register an eventfd to be signalled by a syncobj. The eventfd counter will 923 + * be incremented by one. 924 + */ 925 + struct drm_syncobj_eventfd { 926 + __u32 handle; 927 + __u32 flags; 928 + __u64 point; 929 + __s32 fd; 930 + __u32 pad; 931 + }; 932 + 912 933 913 934 struct drm_syncobj_array { 914 935 __u64 handles; ··· 1189 1168 * double-close handles which are specified multiple times in the array. 1190 1169 */ 1191 1170 #define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2) 1171 + 1172 + #define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd) 1192 1173 1193 1174 /* 1194 1175 * Device specific ioctls should only be in their respective headers