Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge drm/drm-next into drm-intel-next-queued

We need avi infoframe stuff who got merged via drm-misc

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>

+4289 -2288
+2 -1
Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
··· 8 8 9 9 - compatible : Shall contain one of 10 10 - "renesas,r8a7743-lvds" for R8A7743 (RZ/G1M) compatible LVDS encoders 11 + - "renesas,r8a774c0-lvds" for R8A774C0 (RZ/G2E) compatible LVDS encoders 11 12 - "renesas,r8a7790-lvds" for R8A7790 (R-Car H2) compatible LVDS encoders 12 13 - "renesas,r8a7791-lvds" for R8A7791 (R-Car M2-W) compatible LVDS encoders 13 14 - "renesas,r8a7793-lvds" for R8A7793 (R-Car M2-N) compatible LVDS encoders ··· 26 25 - clock-names: Name of the clocks. This property is model-dependent. 27 26 - The functional clock, which mandatory for all models, shall be listed 28 27 first, and shall be named "fck". 29 - - On R8A77990 and R8A77995, the LVDS encoder can use the EXTAL or 28 + - On R8A77990, R8A77995 and R8A774C0, the LVDS encoder can use the EXTAL or 30 29 DU_DOTCLKINx clocks. Those clocks are optional. When supplied they must be 31 30 named "extal" and "dclkin.x" respectively, with "x" being the DU_DOTCLKIN 32 31 numerical index.
Documentation/devicetree/bindings/display/panel/auo,g101evn010 Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt
+2
Documentation/devicetree/bindings/display/renesas,du.txt
··· 7 7 - "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU 8 8 - "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU 9 9 - "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU 10 + - "renesas,du-r8a774c0" for R8A774C0 (RZ/G2E) compatible DU 10 11 - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU 11 12 - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU 12 13 - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU ··· 58 57 R8A7744 (RZ/G1N) DPAD 0 LVDS 0 - - 59 58 R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - - 60 59 R8A77470 (RZ/G1C) DPAD 0 DPAD 1 LVDS 0 - 60 + R8A774C0 (RZ/G2E) DPAD 0 LVDS 0 LVDS 1 - 61 61 R8A7779 (R-Car H1) DPAD 0 DPAD 1 - - 62 62 R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 - 63 63 R8A7791 (R-Car M2-W) DPAD 0 LVDS 0 - -
+1
Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
··· 10 10 "rockchip,rk3126-vop"; 11 11 "rockchip,px30-vop-lit"; 12 12 "rockchip,px30-vop-big"; 13 + "rockchip,rk3066-vop"; 13 14 "rockchip,rk3188-vop"; 14 15 "rockchip,rk3288-vop"; 15 16 "rockchip,rk3368-vop";
+52
Documentation/gpu/dp-mst/topology-figure-1.dot
··· 1 + digraph T { 2 + /* Make sure our payloads are always drawn below the driver node */ 3 + subgraph cluster_driver { 4 + fillcolor = grey; 5 + style = filled; 6 + driver -> {payload1, payload2} [dir=none]; 7 + } 8 + 9 + /* Driver malloc references */ 10 + edge [style=dashed]; 11 + driver -> port1; 12 + driver -> port2; 13 + driver -> port3:e; 14 + driver -> port4; 15 + 16 + payload1:s -> port1:e; 17 + payload2:s -> port3:e; 18 + edge [style=""]; 19 + 20 + subgraph cluster_topology { 21 + label="Topology Manager"; 22 + labelloc=bottom; 23 + 24 + /* Topology references */ 25 + mstb1 -> {port1, port2}; 26 + port1 -> mstb2; 27 + port2 -> mstb3 -> {port3, port4}; 28 + port3 -> mstb4; 29 + 30 + /* Malloc references */ 31 + edge [style=dashed;dir=back]; 32 + mstb1 -> {port1, port2}; 33 + port1 -> mstb2; 34 + port2 -> mstb3 -> {port3, port4}; 35 + port3 -> mstb4; 36 + } 37 + 38 + driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue]; 39 + 40 + payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue]; 41 + payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue]; 42 + 43 + mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen;shape=oval]; 44 + mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen;shape=oval]; 45 + mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen;shape=oval]; 46 + mstb4 [label="MSTB #4";style=filled;fillcolor=palegreen;shape=oval]; 47 + 48 + port1 [label="Port #1";shape=oval]; 49 + port2 [label="Port #2";shape=oval]; 50 + port3 [label="Port #3";shape=oval]; 51 + port4 [label="Port #4";shape=oval]; 52 + }
+56
Documentation/gpu/dp-mst/topology-figure-2.dot
··· 1 + digraph T { 2 + /* Make sure our payloads are always drawn below the driver node */ 3 + subgraph cluster_driver { 4 + fillcolor = grey; 5 + style = filled; 6 + driver -> {payload1, payload2} [dir=none]; 7 + } 8 + 9 + /* Driver malloc references */ 10 + edge [style=dashed]; 11 + driver -> port1; 12 + driver -> port2; 13 + driver -> port3:e; 14 + driver -> port4 [color=red]; 15 + 16 + payload1:s -> port1:e; 17 + payload2:s -> port3:e; 18 + edge [style=""]; 19 + 20 + subgraph cluster_topology { 21 + label="Topology Manager"; 22 + labelloc=bottom; 23 + 24 + /* Topology references */ 25 + mstb1 -> {port1, port2}; 26 + port1 -> mstb2; 27 + edge [color=red]; 28 + port2 -> mstb3 -> {port3, port4}; 29 + port3 -> mstb4; 30 + edge [color=""]; 31 + 32 + /* Malloc references */ 33 + edge [style=dashed;dir=back]; 34 + mstb1 -> {port1, port2}; 35 + port1 -> mstb2; 36 + port2 -> mstb3 -> port3; 37 + edge [color=red]; 38 + mstb3 -> port4; 39 + port3 -> mstb4; 40 + } 41 + 42 + mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen]; 43 + mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen]; 44 + mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen]; 45 + mstb4 [label="MSTB #4";style=filled;fillcolor=grey]; 46 + 47 + port1 [label="Port #1"]; 48 + port2 [label="Port #2"]; 49 + port3 [label="Port #3"]; 50 + port4 [label="Port #4";style=filled;fillcolor=grey]; 51 + 52 + driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue]; 53 + 54 + payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue]; 55 + payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue]; 56 + }
+59
Documentation/gpu/dp-mst/topology-figure-3.dot
··· 1 + digraph T { 2 + /* Make sure our payloads are always drawn below the driver node */ 3 + subgraph cluster_driver { 4 + fillcolor = grey; 5 + style = filled; 6 + edge [dir=none]; 7 + driver -> payload1; 8 + driver -> payload2 [penwidth=3]; 9 + edge [dir=""]; 10 + } 11 + 12 + /* Driver malloc references */ 13 + edge [style=dashed]; 14 + driver -> port1; 15 + driver -> port2; 16 + driver -> port3:e; 17 + driver -> port4 [color=grey]; 18 + payload1:s -> port1:e; 19 + payload2:s -> port3:e [penwidth=3]; 20 + edge [style=""]; 21 + 22 + subgraph cluster_topology { 23 + label="Topology Manager"; 24 + labelloc=bottom; 25 + 26 + /* Topology references */ 27 + mstb1 -> {port1, port2}; 28 + port1 -> mstb2; 29 + edge [color=grey]; 30 + port2 -> mstb3 -> {port3, port4}; 31 + port3 -> mstb4; 32 + edge [color=""]; 33 + 34 + /* Malloc references */ 35 + edge [style=dashed;dir=back]; 36 + mstb1 -> {port1, port2}; 37 + port1 -> mstb2; 38 + port2 -> mstb3 [penwidth=3]; 39 + mstb3 -> port3 [penwidth=3]; 40 + edge [color=grey]; 41 + mstb3 -> port4; 42 + port3 -> mstb4; 43 + } 44 + 45 + mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen]; 46 + mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen]; 47 + mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen;penwidth=3]; 48 + mstb4 [label="MSTB #4";style=filled;fillcolor=grey]; 49 + 50 + port1 [label="Port #1"]; 51 + port2 [label="Port #2";penwidth=5]; 52 + port3 [label="Port #3";penwidth=3]; 53 + port4 [label="Port #4";style=filled;fillcolor=grey]; 54 + 55 + driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue]; 56 + 57 + payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue]; 58 + payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue;penwidth=3]; 59 + }
+12
Documentation/gpu/drm-internals.rst
··· 143 143 .. kernel-doc:: drivers/gpu/drm/drm_drv.c 144 144 :doc: driver instance overview 145 145 146 + .. kernel-doc:: include/drm/drm_device.h 147 + :internal: 148 + 146 149 .. kernel-doc:: include/drm/drm_drv.h 147 150 :internal: 148 151 ··· 232 229 233 230 .. kernel-doc:: drivers/gpu/drm/drm_print.c 234 231 :export: 232 + 233 + Utilities 234 + --------- 235 + 236 + .. kernel-doc:: include/drm/drm_util.h 237 + :doc: drm utils 238 + 239 + .. kernel-doc:: include/drm/drm_util.h 240 + :internal: 235 241 236 242 237 243 Legacy Support Code
+26 -4
Documentation/gpu/drm-kms-helpers.rst
··· 116 116 .. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c 117 117 :export: 118 118 119 - .. _drm_bridges: 120 - 121 119 Framebuffer GEM Helper Reference 122 120 ================================ 123 121 ··· 124 126 125 127 .. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c 126 128 :export: 129 + 130 + .. _drm_bridges: 127 131 128 132 Bridges 129 133 ======= ··· 208 208 .. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c 209 209 :export: 210 210 211 - Display Port MST Helper Functions Reference 212 - =========================================== 211 + Display Port MST Helpers 212 + ======================== 213 + 214 + Overview 215 + -------- 213 216 214 217 .. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c 215 218 :doc: dp mst helper 219 + 220 + .. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c 221 + :doc: Branch device and port refcounting 222 + 223 + Functions Reference 224 + ------------------- 216 225 217 226 .. kernel-doc:: include/drm/drm_dp_mst_helper.h 218 227 :internal: 219 228 220 229 .. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c 221 230 :export: 231 + 232 + Topology Lifetime Internals 233 + --------------------------- 234 + 235 + These functions aren't exported to drivers, but are documented here to help make 236 + the MST topology helpers easier to understand 237 + 238 + .. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c 239 + :functions: drm_dp_mst_topology_try_get_mstb drm_dp_mst_topology_get_mstb 240 + drm_dp_mst_topology_put_mstb 241 + drm_dp_mst_topology_try_get_port drm_dp_mst_topology_get_port 242 + drm_dp_mst_topology_put_port 243 + drm_dp_mst_get_mstb_malloc drm_dp_mst_put_mstb_malloc 222 244 223 245 MIPI DSI Helper Functions Reference 224 246 ===================================
+30 -3
Documentation/gpu/todo.rst
··· 209 209 210 210 Contact: Daniel Vetter 211 211 212 + Generic fbdev defio support 213 + --------------------------- 214 + 215 + The defio support code in the fbdev core has some very specific requirements, 216 + which means drivers need to have a special framebuffer for fbdev. Which prevents 217 + us from using the generic fbdev emulation code everywhere. The main issue is 218 + that it uses some fields in struct page itself, which breaks shmem gem objects 219 + (and other things). 220 + 221 + Possible solution would be to write our own defio mmap code in the drm fbdev 222 + emulation. It would need to fully wrap the existing mmap ops, forwarding 223 + everything after it has done the write-protect/mkwrite trickery: 224 + 225 + - In the drm_fbdev_fb_mmap helper, if we need defio, change the 226 + default page prots to write-protected with something like this:: 227 + 228 + vma->vm_page_prot = pgprot_wrprotect(vma->vm_page_prot); 229 + 230 + - Set the mkwrite and fsync callbacks with similar implementions to the core 231 + fbdev defio stuff. These should all work on plain ptes, they don't actually 232 + require a struct page. uff. These should all work on plain ptes, they don't 233 + actually require a struct page. 234 + 235 + - Track the dirty pages in a separate structure (bitfield with one bit per page 236 + should work) to avoid clobbering struct page. 237 + 238 + Might be good to also have some igt testcases for this. 239 + 240 + Contact: Daniel Vetter, Noralf Tronnes 241 + 212 242 Put a reservation_object into drm_gem_object 213 243 -------------------------------------------- 214 244 ··· 383 353 ------------ 384 354 385 355 Some of these date from the very introduction of KMS in 2008 ... 386 - 387 - - drm_mode_config.crtc_idr is misnamed, since it contains all KMS object. Should 388 - be renamed to drm_mode_config.object_idr. 389 356 390 357 - drm_display_mode doesn't need to be derived from drm_mode_object. That's 391 358 leftovers from older (never merged into upstream) KMS designs where modes
+18 -1
MAINTAINERS
··· 4873 4873 M: Dave Airlie <airlied@redhat.com> 4874 4874 M: Gerd Hoffmann <kraxel@redhat.com> 4875 4875 L: virtualization@lists.linux-foundation.org 4876 + L: spice-devel@lists.freedesktop.org 4876 4877 T: git git://anongit.freedesktop.org/drm/drm-misc 4877 4878 S: Maintained 4878 4879 F: drivers/gpu/drm/qxl/ ··· 4910 4909 S: Orphan / Obsolete 4911 4910 F: drivers/gpu/drm/tdfx/ 4912 4911 4912 + DRM DRIVER FOR TPO TPG110 PANELS 4913 + M: Linus Walleij <linus.walleij@linaro.org> 4914 + T: git git://anongit.freedesktop.org/drm/drm-misc 4915 + S: Maintained 4916 + F: drivers/gpu/drm/panel/panel-tpo-tpg110.c 4917 + F: Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt 4918 + 4913 4919 DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS 4914 4920 M: Dave Airlie <airlied@redhat.com> 4915 4921 R: Sean Paul <sean@poorly.run> ··· 4924 4916 S: Odd Fixes 4925 4917 F: drivers/gpu/drm/udl/ 4926 4918 T: git git://anongit.freedesktop.org/drm/drm-misc 4919 + 4920 + DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS) 4921 + M: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com> 4922 + R: Haneen Mohammed <hamohammed.sa@gmail.com> 4923 + R: Daniel Vetter <daniel@ffwll.ch> 4924 + T: git git://anongit.freedesktop.org/drm/drm-misc 4925 + S: Maintained 4926 + L: dri-devel@lists.freedesktop.org 4927 + F: drivers/gpu/drm/vkms/ 4928 + F: Documentation/gpu/vkms.rst 4927 4929 4928 4930 DRM DRIVER FOR VMWARE VIRTUAL GPU 4929 4931 M: "VMware Graphics" <linux-graphics-maintainer@vmware.com> ··· 5004 4986 T: git git://anongit.freedesktop.org/drm/drm-misc 5005 4987 5006 4988 DRM DRIVERS FOR BRIDGE CHIPS 5007 - M: Archit Taneja <architt@codeaurora.org> 5008 4989 M: Andrzej Hajda <a.hajda@samsung.com> 5009 4990 R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com> 5010 4991 S: Maintained
+1 -11
drivers/dma-buf/dma-buf.c
··· 1093 1093 return 0; 1094 1094 } 1095 1095 1096 - static int dma_buf_debug_open(struct inode *inode, struct file *file) 1097 - { 1098 - return single_open(file, dma_buf_debug_show, NULL); 1099 - } 1100 - 1101 - static const struct file_operations dma_buf_debug_fops = { 1102 - .open = dma_buf_debug_open, 1103 - .read = seq_read, 1104 - .llseek = seq_lseek, 1105 - .release = single_release, 1106 - }; 1096 + DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); 1107 1097 1108 1098 static struct dentry *dma_buf_debugfs_dir; 1109 1099
+1 -1
drivers/dma-buf/dma-fence.c
··· 649 649 */ 650 650 void 651 651 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, 652 - spinlock_t *lock, u64 context, unsigned seqno) 652 + spinlock_t *lock, u64 context, u64 seqno) 653 653 { 654 654 BUG_ON(!lock); 655 655 BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
+1 -1
drivers/dma-buf/sw_sync.c
··· 172 172 static void timeline_fence_value_str(struct dma_fence *fence, 173 173 char *str, int size) 174 174 { 175 - snprintf(str, size, "%d", fence->seqno); 175 + snprintf(str, size, "%lld", fence->seqno); 176 176 } 177 177 178 178 static void timeline_fence_timeline_value_str(struct dma_fence *fence,
+3 -13
drivers/dma-buf/sync_debug.c
··· 147 147 } 148 148 } 149 149 150 - static int sync_debugfs_show(struct seq_file *s, void *unused) 150 + static int sync_info_debugfs_show(struct seq_file *s, void *unused) 151 151 { 152 152 struct list_head *pos; 153 153 ··· 178 178 return 0; 179 179 } 180 180 181 - static int sync_info_debugfs_open(struct inode *inode, struct file *file) 182 - { 183 - return single_open(file, sync_debugfs_show, inode->i_private); 184 - } 185 - 186 - static const struct file_operations sync_info_debugfs_fops = { 187 - .open = sync_info_debugfs_open, 188 - .read = seq_read, 189 - .llseek = seq_lseek, 190 - .release = single_release, 191 - }; 181 + DEFINE_SHOW_ATTRIBUTE(sync_info_debugfs); 192 182 193 183 static __init int sync_debugfs_init(void) 194 184 { ··· 208 218 }; 209 219 int i; 210 220 211 - sync_debugfs_show(&s, NULL); 221 + sync_info_debugfs_show(&s, NULL); 212 222 213 223 for (i = 0; i < s.count; i += DUMP_CHUNK) { 214 224 if ((s.count - i) > DUMP_CHUNK) {
+2 -2
drivers/dma-buf/sync_file.c
··· 144 144 } else { 145 145 struct dma_fence *fence = sync_file->fence; 146 146 147 - snprintf(buf, len, "%s-%s%llu-%d", 147 + snprintf(buf, len, "%s-%s%llu-%lld", 148 148 fence->ops->get_driver_name(fence), 149 149 fence->ops->get_timeline_name(fence), 150 150 fence->context, ··· 258 258 259 259 i_b++; 260 260 } else { 261 - if (pt_a->seqno - pt_b->seqno <= INT_MAX) 261 + if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno)) 262 262 add_fence(fences, &i, pt_a); 263 263 else 264 264 add_fence(fences, &i, pt_b);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 2708 2708 amdgpu_irq_disable_all(adev); 2709 2709 if (adev->mode_info.mode_config_initialized){ 2710 2710 if (!amdgpu_device_has_dc_support(adev)) 2711 - drm_crtc_force_disable_all(adev->ddev); 2711 + drm_helper_force_disable_all(adev->ddev); 2712 2712 else 2713 2713 drm_atomic_helper_shutdown(adev->ddev); 2714 2714 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
··· 388 388 soffset, eoffset, eoffset - soffset); 389 389 390 390 if (i->fence) 391 - seq_printf(m, " protected by 0x%08x on context %llu", 391 + seq_printf(m, " protected by 0x%016llx on context %llu", 392 392 i->fence->seqno, i->fence->context); 393 393 394 394 seq_printf(m, "\n");
+2
drivers/gpu/drm/amd/amdgpu/atom.c
··· 27 27 #include <linux/slab.h> 28 28 #include <asm/unaligned.h> 29 29 30 + #include <drm/drm_util.h> 31 + 30 32 #define ATOM_DEBUG 31 33 32 34 #include "atom.h"
+1 -1
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 1682 1682 dce_v10_0_audio_write_sad_regs(encoder); 1683 1683 dce_v10_0_audio_write_latency_fields(encoder, mode); 1684 1684 1685 - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); 1685 + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); 1686 1686 if (err < 0) { 1687 1687 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1688 1688 return;
+1 -1
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 1724 1724 dce_v11_0_audio_write_sad_regs(encoder); 1725 1725 dce_v11_0_audio_write_latency_fields(encoder, mode); 1726 1726 1727 - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); 1727 + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); 1728 1728 if (err < 0) { 1729 1729 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1730 1730 return;
+2 -1
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
··· 1423 1423 struct amdgpu_device *adev = dev->dev_private; 1424 1424 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1425 1425 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1426 + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 1426 1427 struct hdmi_avi_infoframe frame; 1427 1428 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 1428 1429 uint8_t *payload = buffer + 3; ··· 1431 1430 ssize_t err; 1432 1431 u32 tmp; 1433 1432 1434 - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); 1433 + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); 1435 1434 if (err < 0) { 1436 1435 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1437 1436 return;
+1 -1
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
··· 1616 1616 dce_v8_0_audio_write_sad_regs(encoder); 1617 1617 dce_v8_0_audio_write_latency_fields(encoder, mode); 1618 1618 1619 - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); 1619 + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); 1620 1620 if (err < 0) { 1621 1621 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1622 1622 return;
+2 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 1692 1692 1693 1693 dc_resource_state_copy_construct_current(adev->dm.dc, state->context); 1694 1694 1695 - drm_atomic_private_obj_init(&adev->dm.atomic_obj, 1695 + drm_atomic_private_obj_init(adev->ddev, 1696 + &adev->dm.atomic_obj, 1696 1697 &state->base, 1697 1698 &dm_atomic_state_funcs); 1698 1699
+7 -13
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 191 191 drm_encoder_cleanup(&amdgpu_encoder->base); 192 192 kfree(amdgpu_encoder); 193 193 drm_connector_cleanup(connector); 194 + drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port); 194 195 kfree(amdgpu_dm_connector); 195 196 } 196 197 ··· 364 363 amdgpu_dm_connector_funcs_reset(connector); 365 364 366 365 DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", 367 - aconnector, connector->base.id, aconnector->mst_port); 366 + aconnector, connector->base.id, aconnector->mst_port); 367 + 368 + drm_dp_mst_get_port_malloc(port); 368 369 369 370 DRM_DEBUG_KMS(":%d\n", connector->base.id); 370 371 ··· 382 379 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 383 380 384 381 DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", 385 - aconnector, connector->base.id, aconnector->mst_port); 382 + aconnector, connector->base.id, aconnector->mst_port); 386 383 387 - aconnector->port = NULL; 388 384 if (aconnector->dc_sink) { 389 385 amdgpu_dm_update_freesync_caps(connector, NULL); 390 - dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); 386 + dc_link_remove_remote_sink(aconnector->dc_link, 387 + aconnector->dc_sink); 391 388 dc_sink_release(aconnector->dc_sink); 392 389 aconnector->dc_sink = NULL; 393 390 } ··· 396 393 if (adev->mode_info.rfbdev) 397 394 drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector); 398 395 drm_connector_put(connector); 399 - } 400 - 401 - static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) 402 - { 403 - struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); 404 - struct drm_device *dev = master->base.dev; 405 - 406 - drm_kms_helper_hotplug_event(dev); 407 396 } 408 397 409 398 static void dm_dp_mst_register_connector(struct drm_connector *connector) ··· 414 419 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { 415 420 .add_connector = dm_dp_add_mst_connector, 416 421 .destroy_connector = dm_dp_destroy_mst_connector, 417 - .hotplug = dm_dp_mst_hotplug, 418 422 .register_connector = dm_dp_mst_register_connector 419 423 }; 420 424
+2
drivers/gpu/drm/arc/arcpgu_crtc.c
··· 16 16 17 17 #include <drm/drm_atomic_helper.h> 18 18 #include <drm/drm_crtc_helper.h> 19 + #include <drm/drm_device.h> 19 20 #include <drm/drm_fb_cma_helper.h> 20 21 #include <drm/drm_gem_cma_helper.h> 22 + #include <drm/drm_vblank.h> 21 23 #include <drm/drm_plane_helper.h> 22 24 #include <linux/clk.h> 23 25 #include <linux/platform_data/simplefb.h>
+6
drivers/gpu/drm/arc/arcpgu_drv.c
··· 16 16 17 17 #include <linux/clk.h> 18 18 #include <drm/drm_crtc_helper.h> 19 + #include <drm/drm_device.h> 20 + #include <drm/drm_debugfs.h> 21 + #include <drm/drm_drv.h> 19 22 #include <drm/drm_fb_cma_helper.h> 20 23 #include <drm/drm_fb_helper.h> 21 24 #include <drm/drm_gem_cma_helper.h> 22 25 #include <drm/drm_gem_framebuffer_helper.h> 23 26 #include <drm/drm_atomic_helper.h> 27 + #include <linux/dma-mapping.h> 28 + #include <linux/module.h> 24 29 #include <linux/of_reserved_mem.h> 30 + #include <linux/platform_device.h> 25 31 26 32 #include "arcpgu.h" 27 33 #include "arcpgu_regs.h"
-1
drivers/gpu/drm/arc/arcpgu_sim.c
··· 51 51 }; 52 52 53 53 static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { 54 - .dpms = drm_helper_connector_dpms, 55 54 .reset = drm_atomic_helper_connector_reset, 56 55 .fill_modes = drm_helper_probe_single_connector_modes, 57 56 .destroy = arcpgu_drm_connector_destroy,
+1 -7
drivers/gpu/drm/armada/armada_crtc.c
··· 270 270 tm = adj->crtc_vtotal - adj->crtc_vsync_end; 271 271 272 272 DRM_DEBUG_KMS("[CRTC:%d:%s] mode " DRM_MODE_FMT "\n", 273 - crtc->base.id, crtc->name, 274 - adj->base.id, adj->name, adj->vrefresh, adj->clock, 275 - adj->crtc_hdisplay, adj->crtc_hsync_start, 276 - adj->crtc_hsync_end, adj->crtc_htotal, 277 - adj->crtc_vdisplay, adj->crtc_vsync_start, 278 - adj->crtc_vsync_end, adj->crtc_vtotal, 279 - adj->type, adj->flags); 273 + crtc->base.id, crtc->name, DRM_MODE_ARG(adj)); 280 274 DRM_DEBUG_KMS("lm %d rm %d tm %d bm %d\n", lm, rm, tm, bm); 281 275 282 276 /* Now compute the divider for real */
+3 -3
drivers/gpu/drm/ast/ast_fb.c
··· 39 39 #include <drm/drmP.h> 40 40 #include <drm/drm_crtc.h> 41 41 #include <drm/drm_fb_helper.h> 42 + #include <drm/drm_util.h> 42 43 #include <drm/drm_crtc_helper.h> 44 + 43 45 #include "ast_drv.h" 44 46 45 47 static void ast_dirty_update(struct ast_fbdev *afbdev, ··· 193 191 int size, ret; 194 192 void *sysram; 195 193 struct drm_gem_object *gobj = NULL; 196 - struct ast_bo *bo = NULL; 197 194 mode_cmd.width = sizes->surface_width; 198 195 mode_cmd.height = sizes->surface_height; 199 196 mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8); ··· 207 206 DRM_ERROR("failed to create fbcon backing object %d\n", ret); 208 207 return ret; 209 208 } 210 - bo = gem_to_ast_bo(gobj); 211 209 212 210 sysram = vmalloc(size); 213 211 if (!sysram) ··· 263 263 { 264 264 struct ast_framebuffer *afb = &afbdev->afb; 265 265 266 - drm_crtc_force_disable_all(dev); 266 + drm_helper_force_disable_all(dev); 267 267 drm_fb_helper_unregister_fbi(&afbdev->helper); 268 268 269 269 if (afb->obj) {
+4 -3
drivers/gpu/drm/ati_pcigart.c
··· 103 103 unsigned long pages; 104 104 u32 *pci_gart = NULL, page_base, gart_idx; 105 105 dma_addr_t bus_address = 0; 106 - int i, j, ret = 0; 106 + int i, j, ret = -ENOMEM; 107 107 int max_ati_pages, max_real_pages; 108 108 109 109 if (!entry) { ··· 117 117 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { 118 118 DRM_ERROR("fail to set dma mask to 0x%Lx\n", 119 119 (unsigned long long)gart_info->table_mask); 120 - ret = 1; 120 + ret = -EFAULT; 121 121 goto done; 122 122 } 123 123 ··· 160 160 drm_ati_pcigart_cleanup(dev, gart_info); 161 161 address = NULL; 162 162 bus_address = 0; 163 + ret = -ENOMEM; 163 164 goto done; 164 165 } 165 166 page_base = (u32) entry->busaddr[i]; ··· 189 188 page_base += ATI_PCIGART_PAGE_SIZE; 190 189 } 191 190 } 192 - ret = 1; 191 + ret = 0; 193 192 194 193 #if defined(__i386__) || defined(__x86_64__) 195 194 wbinvd();
+1 -1
drivers/gpu/drm/bochs/Makefile
··· 1 - bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_fbdev.o bochs_hw.o 1 + bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_hw.o 2 2 3 3 obj-$(CONFIG_DRM_BOCHS) += bochs-drm.o
+11 -12
drivers/gpu/drm/bochs/bochs.h
··· 80 80 struct ttm_bo_device bdev; 81 81 bool initialized; 82 82 } ttm; 83 - 84 - /* fbdev */ 85 - struct { 86 - struct drm_framebuffer *fb; 87 - struct drm_fb_helper helper; 88 - } fb; 89 83 }; 90 84 91 85 struct bochs_bo { ··· 115 121 void bochs_hw_fini(struct drm_device *dev); 116 122 117 123 void bochs_hw_setmode(struct bochs_device *bochs, 118 - struct drm_display_mode *mode, 119 - const struct drm_format_info *format); 124 + struct drm_display_mode *mode); 125 + void bochs_hw_setformat(struct bochs_device *bochs, 126 + const struct drm_format_info *format); 120 127 void bochs_hw_setbase(struct bochs_device *bochs, 121 128 int x, int y, u64 addr); 122 129 int bochs_hw_load_edid(struct bochs_device *bochs); ··· 136 141 int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, 137 142 uint32_t handle, uint64_t *offset); 138 143 139 - int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr); 144 + int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag); 140 145 int bochs_bo_unpin(struct bochs_bo *bo); 146 + 147 + int bochs_gem_prime_pin(struct drm_gem_object *obj); 148 + void bochs_gem_prime_unpin(struct drm_gem_object *obj); 149 + void *bochs_gem_prime_vmap(struct drm_gem_object *obj); 150 + void bochs_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 151 + int bochs_gem_prime_mmap(struct drm_gem_object *obj, 152 + struct vm_area_struct *vma); 141 153 142 154 /* bochs_kms.c */ 143 155 int bochs_kms_init(struct bochs_device *bochs); 144 156 void bochs_kms_fini(struct bochs_device *bochs); 145 157 146 158 /* bochs_fbdev.c */ 147 - int bochs_fbdev_init(struct bochs_device *bochs); 148 - void bochs_fbdev_fini(struct bochs_device *bochs); 149 - 150 159 extern const struct drm_mode_config_funcs bochs_mode_funcs;
+13 -22
drivers/gpu/drm/bochs/bochs_drv.c
··· 16 16 module_param_named(modeset, bochs_modeset, int, 0444); 17 17 MODULE_PARM_DESC(modeset, "enable/disable kernel modesetting"); 18 18 19 - static bool enable_fbdev = true; 20 - module_param_named(fbdev, enable_fbdev, bool, 0444); 21 - MODULE_PARM_DESC(fbdev, "register fbdev device"); 22 - 23 19 /* ---------------------------------------------------------------------- */ 24 20 /* drm interface */ 25 21 ··· 23 27 { 24 28 struct bochs_device *bochs = dev->dev_private; 25 29 26 - bochs_fbdev_fini(bochs); 27 30 bochs_kms_fini(bochs); 28 31 bochs_mm_fini(bochs); 29 32 bochs_hw_fini(dev); ··· 53 58 if (ret) 54 59 goto err; 55 60 56 - if (enable_fbdev) 57 - bochs_fbdev_init(bochs); 58 - 59 61 return 0; 60 62 61 63 err: ··· 73 81 }; 74 82 75 83 static struct drm_driver bochs_driver = { 76 - .driver_features = DRIVER_GEM | DRIVER_MODESET, 84 + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | 85 + DRIVER_PRIME, 77 86 .fops = &bochs_fops, 78 87 .name = "bochs-drm", 79 88 .desc = "bochs dispi vga interface (qemu stdvga)", ··· 84 91 .gem_free_object_unlocked = bochs_gem_free_object, 85 92 .dumb_create = bochs_dumb_create, 86 93 .dumb_map_offset = bochs_dumb_mmap_offset, 94 + 95 + .gem_prime_export = drm_gem_prime_export, 96 + .gem_prime_import = drm_gem_prime_import, 97 + .gem_prime_pin = bochs_gem_prime_pin, 98 + .gem_prime_unpin = bochs_gem_prime_unpin, 99 + .gem_prime_vmap = bochs_gem_prime_vmap, 100 + .gem_prime_vunmap = bochs_gem_prime_vunmap, 101 + .gem_prime_mmap = bochs_gem_prime_mmap, 87 102 }; 88 103 89 104 /* ---------------------------------------------------------------------- */ ··· 102 101 { 103 102 struct pci_dev *pdev = to_pci_dev(dev); 104 103 struct drm_device *drm_dev = pci_get_drvdata(pdev); 105 - struct bochs_device *bochs = drm_dev->dev_private; 106 104 107 - drm_kms_helper_poll_disable(drm_dev); 108 - 109 - drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 1); 110 - 111 - return 0; 105 + return drm_mode_config_helper_suspend(drm_dev); 112 106 } 113 107 114 108 static int bochs_pm_resume(struct device *dev) 115 109 { 116 110 struct pci_dev *pdev = to_pci_dev(dev); 117 111 struct drm_device *drm_dev = pci_get_drvdata(pdev); 118 - struct bochs_device *bochs = drm_dev->dev_private; 119 112 120 - drm_helper_resume_force_mode(drm_dev); 121 - 122 - drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 0); 123 - 124 - drm_kms_helper_poll_enable(drm_dev); 125 - return 0; 113 + return drm_mode_config_helper_resume(drm_dev); 126 114 } 127 115 #endif 128 116 ··· 155 165 if (ret) 156 166 goto err_unload; 157 167 168 + drm_fbdev_generic_setup(dev, 32); 158 169 return ret; 159 170 160 171 err_unload:
-163
drivers/gpu/drm/bochs/bochs_fbdev.c
··· 1 - /* 2 - * This program is free software; you can redistribute it and/or modify 3 - * it under the terms of the GNU General Public License as published by 4 - * the Free Software Foundation; either version 2 of the License, or 5 - * (at your option) any later version. 6 - */ 7 - 8 - #include "bochs.h" 9 - #include <drm/drm_gem_framebuffer_helper.h> 10 - 11 - /* ---------------------------------------------------------------------- */ 12 - 13 - static int bochsfb_mmap(struct fb_info *info, 14 - struct vm_area_struct *vma) 15 - { 16 - struct drm_fb_helper *fb_helper = info->par; 17 - struct bochs_bo *bo = gem_to_bochs_bo(fb_helper->fb->obj[0]); 18 - 19 - return ttm_fbdev_mmap(vma, &bo->bo); 20 - } 21 - 22 - static struct fb_ops bochsfb_ops = { 23 - .owner = THIS_MODULE, 24 - DRM_FB_HELPER_DEFAULT_OPS, 25 - .fb_fillrect = drm_fb_helper_cfb_fillrect, 26 - .fb_copyarea = drm_fb_helper_cfb_copyarea, 27 - .fb_imageblit = drm_fb_helper_cfb_imageblit, 28 - .fb_mmap = bochsfb_mmap, 29 - }; 30 - 31 - static int bochsfb_create_object(struct bochs_device *bochs, 32 - const struct drm_mode_fb_cmd2 *mode_cmd, 33 - struct drm_gem_object **gobj_p) 34 - { 35 - struct drm_device *dev = bochs->dev; 36 - struct drm_gem_object *gobj; 37 - u32 size; 38 - int ret = 0; 39 - 40 - size = mode_cmd->pitches[0] * mode_cmd->height; 41 - ret = bochs_gem_create(dev, size, true, &gobj); 42 - if (ret) 43 - return ret; 44 - 45 - *gobj_p = gobj; 46 - return ret; 47 - } 48 - 49 - static int bochsfb_create(struct drm_fb_helper *helper, 50 - struct drm_fb_helper_surface_size *sizes) 51 - { 52 - struct bochs_device *bochs = 53 - container_of(helper, struct bochs_device, fb.helper); 54 - struct fb_info *info; 55 - struct drm_framebuffer *fb; 56 - struct drm_mode_fb_cmd2 mode_cmd; 57 - struct drm_gem_object *gobj = NULL; 58 - struct bochs_bo *bo = NULL; 59 - int size, ret; 60 - 61 - if (sizes->surface_bpp != 32) 62 - return -EINVAL; 63 - 64 - mode_cmd.width = sizes->surface_width; 65 - mode_cmd.height = sizes->surface_height; 66 - mode_cmd.pitches[0] = sizes->surface_width * 4; 67 - mode_cmd.pixel_format = DRM_FORMAT_HOST_XRGB8888; 68 - size = mode_cmd.pitches[0] * mode_cmd.height; 69 - 70 - /* alloc, pin & map bo */ 71 - ret = bochsfb_create_object(bochs, &mode_cmd, &gobj); 72 - if (ret) { 73 - DRM_ERROR("failed to create fbcon backing object %d\n", ret); 74 - return ret; 75 - } 76 - 77 - bo = gem_to_bochs_bo(gobj); 78 - 79 - ret = ttm_bo_reserve(&bo->bo, true, false, NULL); 80 - if (ret) 81 - return ret; 82 - 83 - ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL); 84 - if (ret) { 85 - DRM_ERROR("failed to pin fbcon\n"); 86 - ttm_bo_unreserve(&bo->bo); 87 - return ret; 88 - } 89 - 90 - ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, 91 - &bo->kmap); 92 - if (ret) { 93 - DRM_ERROR("failed to kmap fbcon\n"); 94 - ttm_bo_unreserve(&bo->bo); 95 - return ret; 96 - } 97 - 98 - ttm_bo_unreserve(&bo->bo); 99 - 100 - /* init fb device */ 101 - info = drm_fb_helper_alloc_fbi(helper); 102 - if (IS_ERR(info)) { 103 - DRM_ERROR("Failed to allocate fbi: %ld\n", PTR_ERR(info)); 104 - return PTR_ERR(info); 105 - } 106 - 107 - info->par = &bochs->fb.helper; 108 - 109 - fb = drm_gem_fbdev_fb_create(bochs->dev, sizes, 0, gobj, NULL); 110 - if (IS_ERR(fb)) { 111 - DRM_ERROR("Failed to create framebuffer: %ld\n", PTR_ERR(fb)); 112 - return PTR_ERR(fb); 113 - } 114 - 115 - /* setup helper */ 116 - bochs->fb.helper.fb = fb; 117 - 118 - strcpy(info->fix.id, "bochsdrmfb"); 119 - 120 - info->fbops = &bochsfb_ops; 121 - 122 - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); 123 - drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width, 124 - sizes->fb_height); 125 - 126 - info->screen_base = bo->kmap.virtual; 127 - info->screen_size = size; 128 - 129 - drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node); 130 - info->fix.smem_start = 0; 131 - info->fix.smem_len = size; 132 - return 0; 133 - } 134 - 135 - static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = { 136 - .fb_probe = bochsfb_create, 137 - }; 138 - 139 - static struct drm_framebuffer * 140 - bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file, 141 - const struct drm_mode_fb_cmd2 *mode_cmd) 142 - { 143 - if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 && 144 - mode_cmd->pixel_format != DRM_FORMAT_BGRX8888) 145 - return ERR_PTR(-EINVAL); 146 - 147 - return drm_gem_fb_create(dev, file, mode_cmd); 148 - } 149 - 150 - const struct drm_mode_config_funcs bochs_mode_funcs = { 151 - .fb_create = bochs_gem_fb_create, 152 - }; 153 - 154 - int bochs_fbdev_init(struct bochs_device *bochs) 155 - { 156 - return drm_fb_helper_fbdev_setup(bochs->dev, &bochs->fb.helper, 157 - &bochs_fb_helper_funcs, 32, 1); 158 - } 159 - 160 - void bochs_fbdev_fini(struct bochs_device *bochs) 161 - { 162 - drm_fb_helper_fbdev_teardown(bochs->dev); 163 - }
+19 -7
drivers/gpu/drm/bochs/bochs_hw.c
··· 86 86 87 87 int bochs_hw_load_edid(struct bochs_device *bochs) 88 88 { 89 + u8 header[8]; 90 + 89 91 if (!bochs->mmio) 92 + return -1; 93 + 94 + /* check header to detect whenever edid support is enabled in qemu */ 95 + bochs_get_edid_block(bochs, header, 0, ARRAY_SIZE(header)); 96 + if (drm_edid_header_is_valid(header) != 8) 90 97 return -1; 91 98 92 99 kfree(bochs->edid); ··· 204 197 } 205 198 206 199 void bochs_hw_setmode(struct bochs_device *bochs, 207 - struct drm_display_mode *mode, 208 - const struct drm_format_info *format) 200 + struct drm_display_mode *mode) 209 201 { 210 202 bochs->xres = mode->hdisplay; 211 203 bochs->yres = mode->vdisplay; ··· 212 206 bochs->stride = mode->hdisplay * (bochs->bpp / 8); 213 207 bochs->yres_virtual = bochs->fb_size / bochs->stride; 214 208 215 - DRM_DEBUG_DRIVER("%dx%d @ %d bpp, format %c%c%c%c, vy %d\n", 209 + DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n", 216 210 bochs->xres, bochs->yres, bochs->bpp, 217 - (format->format >> 0) & 0xff, 218 - (format->format >> 8) & 0xff, 219 - (format->format >> 16) & 0xff, 220 - (format->format >> 24) & 0xff, 221 211 bochs->yres_virtual); 222 212 223 213 bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */ ··· 231 229 232 230 bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 233 231 VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED); 232 + } 233 + 234 + void bochs_hw_setformat(struct bochs_device *bochs, 235 + const struct drm_format_info *format) 236 + { 237 + DRM_DEBUG_DRIVER("format %c%c%c%c\n", 238 + (format->format >> 0) & 0xff, 239 + (format->format >> 8) & 0xff, 240 + (format->format >> 16) & 0xff, 241 + (format->format >> 24) & 0xff); 234 242 235 243 switch (format->format) { 236 244 case DRM_FORMAT_XRGB8888:
+103 -115
drivers/gpu/drm/bochs/bochs_kms.c
··· 6 6 */ 7 7 8 8 #include "bochs.h" 9 + #include <drm/drm_atomic_helper.h> 9 10 #include <drm/drm_plane_helper.h> 11 + #include <drm/drm_atomic_uapi.h> 12 + #include <drm/drm_gem_framebuffer_helper.h> 10 13 11 14 static int defx = 1024; 12 15 static int defy = 768; ··· 21 18 22 19 /* ---------------------------------------------------------------------- */ 23 20 24 - static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode) 25 - { 26 - switch (mode) { 27 - case DRM_MODE_DPMS_ON: 28 - case DRM_MODE_DPMS_STANDBY: 29 - case DRM_MODE_DPMS_SUSPEND: 30 - case DRM_MODE_DPMS_OFF: 31 - default: 32 - return; 33 - } 34 - } 35 - 36 - static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 37 - struct drm_framebuffer *old_fb) 38 - { 39 - struct bochs_device *bochs = 40 - container_of(crtc, struct bochs_device, crtc); 41 - struct bochs_bo *bo; 42 - u64 gpu_addr = 0; 43 - int ret; 44 - 45 - if (old_fb) { 46 - bo = gem_to_bochs_bo(old_fb->obj[0]); 47 - ret = ttm_bo_reserve(&bo->bo, true, false, NULL); 48 - if (ret) { 49 - DRM_ERROR("failed to reserve old_fb bo\n"); 50 - } else { 51 - bochs_bo_unpin(bo); 52 - ttm_bo_unreserve(&bo->bo); 53 - } 54 - } 55 - 56 - if (WARN_ON(crtc->primary->fb == NULL)) 57 - return -EINVAL; 58 - 59 - bo = gem_to_bochs_bo(crtc->primary->fb->obj[0]); 60 - ret = ttm_bo_reserve(&bo->bo, true, false, NULL); 61 - if (ret) 62 - return ret; 63 - 64 - ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); 65 - if (ret) { 66 - ttm_bo_unreserve(&bo->bo); 67 - return ret; 68 - } 69 - 70 - ttm_bo_unreserve(&bo->bo); 71 - bochs_hw_setbase(bochs, x, y, gpu_addr); 72 - return 0; 73 - } 74 - 75 - static int bochs_crtc_mode_set(struct drm_crtc *crtc, 76 - struct drm_display_mode *mode, 77 - struct drm_display_mode *adjusted_mode, 78 - int x, int y, struct drm_framebuffer *old_fb) 21 + static void bochs_crtc_mode_set_nofb(struct drm_crtc *crtc) 79 22 { 80 23 struct bochs_device *bochs = 81 24 container_of(crtc, struct bochs_device, crtc); 82 25 83 - if (WARN_ON(crtc->primary->fb == NULL)) 84 - return -EINVAL; 85 - 86 - bochs_hw_setmode(bochs, mode, crtc->primary->fb->format); 87 - bochs_crtc_mode_set_base(crtc, x, y, old_fb); 88 - return 0; 26 + bochs_hw_setmode(bochs, &crtc->mode); 89 27 } 90 28 91 - static void bochs_crtc_prepare(struct drm_crtc *crtc) 29 + static void bochs_crtc_atomic_enable(struct drm_crtc *crtc, 30 + struct drm_crtc_state *old_crtc_state) 92 31 { 93 32 } 94 33 95 - static void bochs_crtc_commit(struct drm_crtc *crtc) 34 + static void bochs_crtc_atomic_flush(struct drm_crtc *crtc, 35 + struct drm_crtc_state *old_crtc_state) 96 36 { 97 - } 37 + struct drm_device *dev = crtc->dev; 38 + struct drm_pending_vblank_event *event; 98 39 99 - static int bochs_crtc_page_flip(struct drm_crtc *crtc, 100 - struct drm_framebuffer *fb, 101 - struct drm_pending_vblank_event *event, 102 - uint32_t page_flip_flags, 103 - struct drm_modeset_acquire_ctx *ctx) 104 - { 105 - struct bochs_device *bochs = 106 - container_of(crtc, struct bochs_device, crtc); 107 - struct drm_framebuffer *old_fb = crtc->primary->fb; 108 - unsigned long irqflags; 40 + if (crtc->state && crtc->state->event) { 41 + unsigned long irqflags; 109 42 110 - crtc->primary->fb = fb; 111 - bochs_crtc_mode_set_base(crtc, 0, 0, old_fb); 112 - if (event) { 113 - spin_lock_irqsave(&bochs->dev->event_lock, irqflags); 43 + spin_lock_irqsave(&dev->event_lock, irqflags); 44 + event = crtc->state->event; 45 + crtc->state->event = NULL; 114 46 drm_crtc_send_vblank_event(crtc, event); 115 - spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags); 47 + spin_unlock_irqrestore(&dev->event_lock, irqflags); 116 48 } 117 - return 0; 118 49 } 50 + 119 51 120 52 /* These provide the minimum set of functions required to handle a CRTC */ 121 53 static const struct drm_crtc_funcs bochs_crtc_funcs = { 122 - .set_config = drm_crtc_helper_set_config, 54 + .set_config = drm_atomic_helper_set_config, 123 55 .destroy = drm_crtc_cleanup, 124 - .page_flip = bochs_crtc_page_flip, 56 + .page_flip = drm_atomic_helper_page_flip, 57 + .reset = drm_atomic_helper_crtc_reset, 58 + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 59 + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 125 60 }; 126 61 127 62 static const struct drm_crtc_helper_funcs bochs_helper_funcs = { 128 - .dpms = bochs_crtc_dpms, 129 - .mode_set = bochs_crtc_mode_set, 130 - .mode_set_base = bochs_crtc_mode_set_base, 131 - .prepare = bochs_crtc_prepare, 132 - .commit = bochs_crtc_commit, 63 + .mode_set_nofb = bochs_crtc_mode_set_nofb, 64 + .atomic_enable = bochs_crtc_atomic_enable, 65 + .atomic_flush = bochs_crtc_atomic_flush, 133 66 }; 134 67 135 68 static const uint32_t bochs_formats[] = { 136 69 DRM_FORMAT_XRGB8888, 137 70 DRM_FORMAT_BGRX8888, 71 + }; 72 + 73 + static void bochs_plane_atomic_update(struct drm_plane *plane, 74 + struct drm_plane_state *old_state) 75 + { 76 + struct bochs_device *bochs = plane->dev->dev_private; 77 + struct bochs_bo *bo; 78 + 79 + if (!plane->state->fb) 80 + return; 81 + bo = gem_to_bochs_bo(plane->state->fb->obj[0]); 82 + bochs_hw_setbase(bochs, 83 + plane->state->crtc_x, 84 + plane->state->crtc_y, 85 + bo->bo.offset); 86 + bochs_hw_setformat(bochs, plane->state->fb->format); 87 + } 88 + 89 + static int bochs_plane_prepare_fb(struct drm_plane *plane, 90 + struct drm_plane_state *new_state) 91 + { 92 + struct bochs_bo *bo; 93 + 94 + if (!new_state->fb) 95 + return 0; 96 + bo = gem_to_bochs_bo(new_state->fb->obj[0]); 97 + return bochs_bo_pin(bo, TTM_PL_FLAG_VRAM); 98 + } 99 + 100 + static void bochs_plane_cleanup_fb(struct drm_plane *plane, 101 + struct drm_plane_state *old_state) 102 + { 103 + struct bochs_bo *bo; 104 + 105 + if (!old_state->fb) 106 + return; 107 + bo = gem_to_bochs_bo(old_state->fb->obj[0]); 108 + bochs_bo_unpin(bo); 109 + } 110 + 111 + static const struct drm_plane_helper_funcs bochs_plane_helper_funcs = { 112 + .atomic_update = bochs_plane_atomic_update, 113 + .prepare_fb = bochs_plane_prepare_fb, 114 + .cleanup_fb = bochs_plane_cleanup_fb, 115 + }; 116 + 117 + static const struct drm_plane_funcs bochs_plane_funcs = { 118 + .update_plane = drm_atomic_helper_update_plane, 119 + .disable_plane = drm_atomic_helper_disable_plane, 120 + .destroy = drm_primary_helper_destroy, 121 + .reset = drm_atomic_helper_plane_reset, 122 + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 123 + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 138 124 }; 139 125 140 126 static struct drm_plane *bochs_primary_plane(struct drm_device *dev) ··· 138 146 } 139 147 140 148 ret = drm_universal_plane_init(dev, primary, 0, 141 - &drm_primary_helper_funcs, 149 + &bochs_plane_funcs, 142 150 bochs_formats, 143 151 ARRAY_SIZE(bochs_formats), 144 152 NULL, 145 153 DRM_PLANE_TYPE_PRIMARY, NULL); 146 154 if (ret) { 147 155 kfree(primary); 148 - primary = NULL; 156 + return NULL; 149 157 } 150 158 159 + drm_plane_helper_add(primary, &bochs_plane_helper_funcs); 151 160 return primary; 152 161 } 153 162 ··· 163 170 drm_crtc_helper_add(crtc, &bochs_helper_funcs); 164 171 } 165 172 166 - static void bochs_encoder_mode_set(struct drm_encoder *encoder, 167 - struct drm_display_mode *mode, 168 - struct drm_display_mode *adjusted_mode) 169 - { 170 - } 171 - 172 - static void bochs_encoder_dpms(struct drm_encoder *encoder, int state) 173 - { 174 - } 175 - 176 - static void bochs_encoder_prepare(struct drm_encoder *encoder) 177 - { 178 - } 179 - 180 - static void bochs_encoder_commit(struct drm_encoder *encoder) 181 - { 182 - } 183 - 184 - static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = { 185 - .dpms = bochs_encoder_dpms, 186 - .mode_set = bochs_encoder_mode_set, 187 - .prepare = bochs_encoder_prepare, 188 - .commit = bochs_encoder_commit, 189 - }; 190 - 191 173 static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = { 192 174 .destroy = drm_encoder_cleanup, 193 175 }; ··· 175 207 encoder->possible_crtcs = 0x1; 176 208 drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs, 177 209 DRM_MODE_ENCODER_DAC, NULL); 178 - drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs); 179 210 } 180 211 181 212 ··· 233 266 .dpms = drm_helper_connector_dpms, 234 267 .fill_modes = drm_helper_probe_single_connector_modes, 235 268 .destroy = drm_connector_cleanup, 269 + .reset = drm_atomic_helper_connector_reset, 270 + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 271 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 236 272 }; 237 273 238 274 static void bochs_connector_init(struct drm_device *dev) ··· 257 287 } 258 288 } 259 289 290 + static struct drm_framebuffer * 291 + bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file, 292 + const struct drm_mode_fb_cmd2 *mode_cmd) 293 + { 294 + if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 && 295 + mode_cmd->pixel_format != DRM_FORMAT_BGRX8888) 296 + return ERR_PTR(-EINVAL); 297 + 298 + return drm_gem_fb_create(dev, file, mode_cmd); 299 + } 300 + 301 + const struct drm_mode_config_funcs bochs_mode_funcs = { 302 + .fb_create = bochs_gem_fb_create, 303 + .atomic_check = drm_atomic_helper_check, 304 + .atomic_commit = drm_atomic_helper_commit, 305 + }; 260 306 261 307 int bochs_kms_init(struct bochs_device *bochs) 262 308 { ··· 294 308 bochs_connector_init(bochs->dev); 295 309 drm_connector_attach_encoder(&bochs->connector, 296 310 &bochs->encoder); 311 + 312 + drm_mode_config_reset(bochs->dev); 297 313 298 314 return 0; 299 315 }
+58 -10
drivers/gpu/drm/bochs/bochs_mm.c
··· 210 210 bo->placement.num_busy_placement = c; 211 211 } 212 212 213 - static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo) 214 - { 215 - return bo->bo.offset; 216 - } 217 - 218 - int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr) 213 + int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag) 219 214 { 220 215 struct ttm_operation_ctx ctx = { false, false }; 221 216 int i, ret; 222 217 223 218 if (bo->pin_count) { 224 219 bo->pin_count++; 225 - if (gpu_addr) 226 - *gpu_addr = bochs_bo_gpu_offset(bo); 227 220 return 0; 228 221 } 229 222 230 223 bochs_ttm_placement(bo, pl_flag); 231 224 for (i = 0; i < bo->placement.num_placement; i++) 232 225 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 226 + ret = ttm_bo_reserve(&bo->bo, true, false, NULL); 227 + if (ret) 228 + return ret; 233 229 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 230 + ttm_bo_unreserve(&bo->bo); 234 231 if (ret) 235 232 return ret; 236 233 237 234 bo->pin_count = 1; 238 - if (gpu_addr) 239 - *gpu_addr = bochs_bo_gpu_offset(bo); 240 235 return 0; 241 236 } 242 237 ··· 251 256 252 257 for (i = 0; i < bo->placement.num_placement; i++) 253 258 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 259 + ret = ttm_bo_reserve(&bo->bo, true, false, NULL); 260 + if (ret) 261 + return ret; 254 262 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 263 + ttm_bo_unreserve(&bo->bo); 255 264 if (ret) 256 265 return ret; 257 266 ··· 394 395 395 396 drm_gem_object_put_unlocked(obj); 396 397 return 0; 398 + } 399 + 400 + /* ---------------------------------------------------------------------- */ 401 + 402 + int bochs_gem_prime_pin(struct drm_gem_object *obj) 403 + { 404 + struct bochs_bo *bo = gem_to_bochs_bo(obj); 405 + 406 + return bochs_bo_pin(bo, TTM_PL_FLAG_VRAM); 407 + } 408 + 409 + void bochs_gem_prime_unpin(struct drm_gem_object *obj) 410 + { 411 + struct bochs_bo *bo = gem_to_bochs_bo(obj); 412 + 413 + bochs_bo_unpin(bo); 414 + } 415 + 416 + void *bochs_gem_prime_vmap(struct drm_gem_object *obj) 417 + { 418 + struct bochs_bo *bo = gem_to_bochs_bo(obj); 419 + bool is_iomem; 420 + int ret; 421 + 422 + ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM); 423 + if (ret) 424 + return NULL; 425 + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); 426 + if (ret) { 427 + bochs_bo_unpin(bo); 428 + return NULL; 429 + } 430 + return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 431 + } 432 + 433 + void bochs_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 434 + { 435 + struct bochs_bo *bo = gem_to_bochs_bo(obj); 436 + 437 + ttm_bo_kunmap(&bo->kmap); 438 + bochs_bo_unpin(bo); 439 + } 440 + 441 + int bochs_gem_prime_mmap(struct drm_gem_object *obj, 442 + struct vm_area_struct *vma) 443 + { 444 + struct bochs_bo *bo = gem_to_bochs_bo(obj); 445 + 446 + return ttm_fbdev_mmap(vma, &bo->bo); 397 447 }
+2 -2
drivers/gpu/drm/bridge/adv7511/adv7511.h
··· 395 395 #ifdef CONFIG_DRM_I2C_ADV7533 396 396 void adv7533_dsi_power_on(struct adv7511 *adv); 397 397 void adv7533_dsi_power_off(struct adv7511 *adv); 398 - void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode); 398 + void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode); 399 399 int adv7533_patch_registers(struct adv7511 *adv); 400 400 int adv7533_patch_cec_registers(struct adv7511 *adv); 401 401 int adv7533_attach_dsi(struct adv7511 *adv); ··· 411 411 } 412 412 413 413 static inline void adv7533_mode_set(struct adv7511 *adv, 414 - struct drm_display_mode *mode) 414 + const struct drm_display_mode *mode) 415 415 { 416 416 } 417 417
+4 -4
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
··· 676 676 } 677 677 678 678 static void adv7511_mode_set(struct adv7511 *adv7511, 679 - struct drm_display_mode *mode, 680 - struct drm_display_mode *adj_mode) 679 + const struct drm_display_mode *mode, 680 + const struct drm_display_mode *adj_mode) 681 681 { 682 682 unsigned int low_refresh_rate; 683 683 unsigned int hsync_polarity = 0; ··· 839 839 } 840 840 841 841 static void adv7511_bridge_mode_set(struct drm_bridge *bridge, 842 - struct drm_display_mode *mode, 843 - struct drm_display_mode *adj_mode) 842 + const struct drm_display_mode *mode, 843 + const struct drm_display_mode *adj_mode) 844 844 { 845 845 struct adv7511 *adv = bridge_to_adv7511(bridge); 846 846
+1 -1
drivers/gpu/drm/bridge/adv7511/adv7533.c
··· 108 108 regmap_write(adv->regmap_cec, 0x27, 0x0b); 109 109 } 110 110 111 - void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode) 111 + void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode) 112 112 { 113 113 struct mipi_dsi_device *dsi = adv->dsi; 114 114 int lanes, ret;
+5 -4
drivers/gpu/drm/bridge/analogix-anx78xx.c
··· 1082 1082 } 1083 1083 1084 1084 static void anx78xx_bridge_mode_set(struct drm_bridge *bridge, 1085 - struct drm_display_mode *mode, 1086 - struct drm_display_mode *adjusted_mode) 1085 + const struct drm_display_mode *mode, 1086 + const struct drm_display_mode *adjusted_mode) 1087 1087 { 1088 1088 struct anx78xx *anx78xx = bridge_to_anx78xx(bridge); 1089 1089 struct hdmi_avi_infoframe frame; ··· 1094 1094 1095 1095 mutex_lock(&anx78xx->lock); 1096 1096 1097 - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, adjusted_mode, 1098 - false); 1097 + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, 1098 + &anx78xx->connector, 1099 + adjusted_mode); 1099 1100 if (err) { 1100 1101 DRM_ERROR("Failed to setup AVI infoframe: %d\n", err); 1101 1102 goto unlock;
+2 -2
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
··· 1361 1361 } 1362 1362 1363 1363 static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, 1364 - struct drm_display_mode *orig_mode, 1365 - struct drm_display_mode *mode) 1364 + const struct drm_display_mode *orig_mode, 1365 + const struct drm_display_mode *mode) 1366 1366 { 1367 1367 struct analogix_dp_device *dp = bridge->driver_private; 1368 1368 struct drm_display_info *display_info = &dp->connector.display_info;
+18 -4
drivers/gpu/drm/bridge/panel.c
··· 134 134 }; 135 135 136 136 /** 137 - * drm_panel_bridge_add - Creates a drm_bridge and drm_connector that 138 - * just calls the appropriate functions from drm_panel. 137 + * drm_panel_bridge_add - Creates a &drm_bridge and &drm_connector that 138 + * just calls the appropriate functions from &drm_panel. 139 139 * 140 140 * @panel: The drm_panel being wrapped. Must be non-NULL. 141 141 * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be ··· 149 149 * passed to drm_bridge_attach(). The drm_panel_prepare() and related 150 150 * functions can be dropped from the encoder driver (they're now 151 151 * called by the KMS helpers before calling into the encoder), along 152 - * with connector creation. When done with the bridge, 153 - * drm_bridge_detach() should be called as normal, then 152 + * with connector creation. When done with the bridge (after 153 + * drm_mode_config_cleanup() if the bridge has already been attached), then 154 154 * drm_panel_bridge_remove() to free it. 155 + * 156 + * See devm_drm_panel_bridge_add() for an automatically manged version of this 157 + * function. 155 158 */ 156 159 struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel, 157 160 u32 connector_type) ··· 213 210 drm_panel_bridge_remove(*bridge); 214 211 } 215 212 213 + /** 214 + * devm_drm_panel_bridge_add - Creates a managed &drm_bridge and &drm_connector 215 + * that just calls the appropriate functions from &drm_panel. 216 + * @dev: device to tie the bridge lifetime to 217 + * @panel: The drm_panel being wrapped. Must be non-NULL. 218 + * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be 219 + * created. 220 + * 221 + * This is the managed version of drm_panel_bridge_add() which automatically 222 + * calls drm_panel_bridge_remove() when @dev is unbound. 223 + */ 216 224 struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev, 217 225 struct drm_panel *panel, 218 226 u32 connector_type)
+4 -3
drivers/gpu/drm/bridge/sii902x.c
··· 232 232 } 233 233 234 234 static void sii902x_bridge_mode_set(struct drm_bridge *bridge, 235 - struct drm_display_mode *mode, 236 - struct drm_display_mode *adj) 235 + const struct drm_display_mode *mode, 236 + const struct drm_display_mode *adj) 237 237 { 238 238 struct sii902x *sii902x = bridge_to_sii902x(bridge); 239 239 struct regmap *regmap = sii902x->regmap; ··· 258 258 if (ret) 259 259 return; 260 260 261 - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj, false); 261 + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, 262 + &sii902x->connector, adj); 262 263 if (ret < 0) { 263 264 DRM_ERROR("couldn't fill AVI infoframe\n"); 264 265 return;
+1 -2
drivers/gpu/drm/bridge/sil-sii8620.c
··· 1104 1104 int ret; 1105 1105 1106 1106 ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, 1107 - mode, 1108 - true); 1107 + NULL, mode); 1109 1108 if (ctx->use_packed_pixel) 1110 1109 frm.avi.colorspace = HDMI_COLORSPACE_YUV422; 1111 1110
+5 -4
drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * dw-hdmi-i2s-audio.c 3 4 * 4 5 * Copyright (c) 2017 Renesas Solutions Corp. 5 6 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License version 2 as 9 - * published by the Free Software Foundation. 10 7 */ 8 + 9 + #include <linux/dma-mapping.h> 10 + #include <linux/module.h> 11 + 11 12 #include <drm/bridge/dw_hdmi.h> 12 13 13 14 #include <sound/hdmi-codec.h>
+4 -3
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
··· 1344 1344 u8 val; 1345 1345 1346 1346 /* Initialise info frame from DRM mode */ 1347 - drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); 1347 + drm_hdmi_avi_infoframe_from_display_mode(&frame, 1348 + &hdmi->connector, mode); 1348 1349 1349 1350 if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format)) 1350 1351 frame.colorspace = HDMI_COLORSPACE_YUV444; ··· 1999 1998 } 2000 1999 2001 2000 static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge, 2002 - struct drm_display_mode *orig_mode, 2003 - struct drm_display_mode *mode) 2001 + const struct drm_display_mode *orig_mode, 2002 + const struct drm_display_mode *mode) 2004 2003 { 2005 2004 struct dw_hdmi *hdmi = bridge->driver_private; 2006 2005
+9 -9
drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
··· 248 248 * The controller should generate 2 frames before 249 249 * preparing the peripheral. 250 250 */ 251 - static void dw_mipi_dsi_wait_for_two_frames(struct drm_display_mode *mode) 251 + static void dw_mipi_dsi_wait_for_two_frames(const struct drm_display_mode *mode) 252 252 { 253 253 int refresh, two_frames; 254 254 ··· 564 564 } 565 565 566 566 static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi, 567 - struct drm_display_mode *mode) 567 + const struct drm_display_mode *mode) 568 568 { 569 569 u32 val = 0, color = 0; 570 570 ··· 607 607 } 608 608 609 609 static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi, 610 - struct drm_display_mode *mode) 610 + const struct drm_display_mode *mode) 611 611 { 612 612 /* 613 613 * TODO dw drv improvements ··· 642 642 643 643 /* Get lane byte clock cycles. */ 644 644 static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi, 645 - struct drm_display_mode *mode, 645 + const struct drm_display_mode *mode, 646 646 u32 hcomponent) 647 647 { 648 648 u32 frac, lbcc; ··· 658 658 } 659 659 660 660 static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi, 661 - struct drm_display_mode *mode) 661 + const struct drm_display_mode *mode) 662 662 { 663 663 u32 htotal, hsa, hbp, lbcc; 664 664 ··· 681 681 } 682 682 683 683 static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi, 684 - struct drm_display_mode *mode) 684 + const struct drm_display_mode *mode) 685 685 { 686 686 u32 vactive, vsa, vfp, vbp; 687 687 ··· 818 818 } 819 819 820 820 static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi, 821 - struct drm_display_mode *adjusted_mode) 821 + const struct drm_display_mode *adjusted_mode) 822 822 { 823 823 const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops; 824 824 void *priv_data = dsi->plat_data->priv_data; ··· 861 861 } 862 862 863 863 static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge, 864 - struct drm_display_mode *mode, 865 - struct drm_display_mode *adjusted_mode) 864 + const struct drm_display_mode *mode, 865 + const struct drm_display_mode *adjusted_mode) 866 866 { 867 867 struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); 868 868
+5 -4
drivers/gpu/drm/bridge/tc358767.c
··· 203 203 /* display edid */ 204 204 struct edid *edid; 205 205 /* current mode */ 206 - struct drm_display_mode *mode; 206 + const struct drm_display_mode *mode; 207 207 208 208 u32 rev; 209 209 u8 assr; ··· 648 648 return ret; 649 649 } 650 650 651 - static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) 651 + static int tc_set_video_mode(struct tc_data *tc, 652 + const struct drm_display_mode *mode) 652 653 { 653 654 int ret; 654 655 int vid_sync_dly; ··· 1114 1113 } 1115 1114 1116 1115 static void tc_bridge_mode_set(struct drm_bridge *bridge, 1117 - struct drm_display_mode *mode, 1118 - struct drm_display_mode *adj) 1116 + const struct drm_display_mode *mode, 1117 + const struct drm_display_mode *adj) 1119 1118 { 1120 1119 struct tc_data *tc = bridge_to_tc(bridge); 1121 1120
+3
drivers/gpu/drm/cirrus/cirrus_fbdev.c
··· 10 10 */ 11 11 #include <linux/module.h> 12 12 #include <drm/drmP.h> 13 + #include <drm/drm_util.h> 13 14 #include <drm/drm_fb_helper.h> 14 15 #include <drm/drm_crtc_helper.h> 15 16 ··· 256 255 struct cirrus_fbdev *gfbdev) 257 256 { 258 257 struct drm_framebuffer *gfb = gfbdev->gfb; 258 + 259 + drm_helper_force_disable_all(dev); 259 260 260 261 drm_fb_helper_unregister_fbi(&gfbdev->helper); 261 262
+15 -4
drivers/gpu/drm/drm_atomic.c
··· 698 698 699 699 /** 700 700 * drm_atomic_private_obj_init - initialize private object 701 + * @dev: DRM device this object will be attached to 701 702 * @obj: private object 702 703 * @state: initial private object state 703 704 * @funcs: pointer to the struct of function pointers that identify the object ··· 708 707 * driver private object that needs its own atomic state. 709 708 */ 710 709 void 711 - drm_atomic_private_obj_init(struct drm_private_obj *obj, 710 + drm_atomic_private_obj_init(struct drm_device *dev, 711 + struct drm_private_obj *obj, 712 712 struct drm_private_state *state, 713 713 const struct drm_private_state_funcs *funcs) 714 714 { 715 715 memset(obj, 0, sizeof(*obj)); 716 716 717 + drm_modeset_lock_init(&obj->lock); 718 + 717 719 obj->state = state; 718 720 obj->funcs = funcs; 721 + list_add_tail(&obj->head, &dev->mode_config.privobj_list); 719 722 } 720 723 EXPORT_SYMBOL(drm_atomic_private_obj_init); 721 724 ··· 732 727 void 733 728 drm_atomic_private_obj_fini(struct drm_private_obj *obj) 734 729 { 730 + list_del(&obj->head); 735 731 obj->funcs->atomic_destroy_state(obj, obj->state); 732 + drm_modeset_lock_fini(&obj->lock); 736 733 } 737 734 EXPORT_SYMBOL(drm_atomic_private_obj_fini); 738 735 ··· 744 737 * @obj: private object to get the state for 745 738 * 746 739 * This function returns the private object state for the given private object, 747 - * allocating the state if needed. It does not grab any locks as the caller is 748 - * expected to care of any required locking. 740 + * allocating the state if needed. It will also grab the relevant private 741 + * object lock to make sure that the state is consistent. 749 742 * 750 743 * RETURNS: 751 744 * ··· 755 748 drm_atomic_get_private_obj_state(struct drm_atomic_state *state, 756 749 struct drm_private_obj *obj) 757 750 { 758 - int index, num_objs, i; 751 + int index, num_objs, i, ret; 759 752 size_t size; 760 753 struct __drm_private_objs_state *arr; 761 754 struct drm_private_state *obj_state; ··· 763 756 for (i = 0; i < state->num_private_objs; i++) 764 757 if (obj == state->private_objs[i].ptr) 765 758 return state->private_objs[i].state; 759 + 760 + ret = drm_modeset_lock(&obj->lock, state->acquire_ctx); 761 + if (ret) 762 + return ERR_PTR(ret); 766 763 767 764 num_objs = state->num_private_objs + 1; 768 765 size = sizeof(*state->private_objs) * num_objs;
+2 -2
drivers/gpu/drm/drm_bridge.c
··· 294 294 * Note: the bridge passed should be the one closest to the encoder 295 295 */ 296 296 void drm_bridge_mode_set(struct drm_bridge *bridge, 297 - struct drm_display_mode *mode, 298 - struct drm_display_mode *adjusted_mode) 297 + const struct drm_display_mode *mode, 298 + const struct drm_display_mode *adjusted_mode) 299 299 { 300 300 if (!bridge) 301 301 return;
+69 -22
drivers/gpu/drm/drm_connector.c
··· 1138 1138 EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type); 1139 1139 1140 1140 /** 1141 - * drm_create_tv_properties - create TV specific connector properties 1141 + * drm_mode_attach_tv_margin_properties - attach TV connector margin properties 1142 + * @connector: DRM connector 1143 + * 1144 + * Called by a driver when it needs to attach TV margin props to a connector. 1145 + * Typically used on SDTV and HDMI connectors. 1146 + */ 1147 + void drm_connector_attach_tv_margin_properties(struct drm_connector *connector) 1148 + { 1149 + struct drm_device *dev = connector->dev; 1150 + 1151 + drm_object_attach_property(&connector->base, 1152 + dev->mode_config.tv_left_margin_property, 1153 + 0); 1154 + drm_object_attach_property(&connector->base, 1155 + dev->mode_config.tv_right_margin_property, 1156 + 0); 1157 + drm_object_attach_property(&connector->base, 1158 + dev->mode_config.tv_top_margin_property, 1159 + 0); 1160 + drm_object_attach_property(&connector->base, 1161 + dev->mode_config.tv_bottom_margin_property, 1162 + 0); 1163 + } 1164 + EXPORT_SYMBOL(drm_connector_attach_tv_margin_properties); 1165 + 1166 + /** 1167 + * drm_mode_create_tv_margin_properties - create TV connector margin properties 1168 + * @dev: DRM device 1169 + * 1170 + * Called by a driver's HDMI connector initialization routine, this function 1171 + * creates the TV margin properties for a given device. No need to call this 1172 + * function for an SDTV connector, it's already called from 1173 + * drm_mode_create_tv_properties(). 1174 + */ 1175 + int drm_mode_create_tv_margin_properties(struct drm_device *dev) 1176 + { 1177 + if (dev->mode_config.tv_left_margin_property) 1178 + return 0; 1179 + 1180 + dev->mode_config.tv_left_margin_property = 1181 + drm_property_create_range(dev, 0, "left margin", 0, 100); 1182 + if (!dev->mode_config.tv_left_margin_property) 1183 + return -ENOMEM; 1184 + 1185 + dev->mode_config.tv_right_margin_property = 1186 + drm_property_create_range(dev, 0, "right margin", 0, 100); 1187 + if (!dev->mode_config.tv_right_margin_property) 1188 + return -ENOMEM; 1189 + 1190 + dev->mode_config.tv_top_margin_property = 1191 + drm_property_create_range(dev, 0, "top margin", 0, 100); 1192 + if (!dev->mode_config.tv_top_margin_property) 1193 + return -ENOMEM; 1194 + 1195 + dev->mode_config.tv_bottom_margin_property = 1196 + drm_property_create_range(dev, 0, "bottom margin", 0, 100); 1197 + if (!dev->mode_config.tv_bottom_margin_property) 1198 + return -ENOMEM; 1199 + 1200 + return 0; 1201 + } 1202 + EXPORT_SYMBOL(drm_mode_create_tv_margin_properties); 1203 + 1204 + /** 1205 + * drm_mode_create_tv_properties - create TV specific connector properties 1142 1206 * @dev: DRM device 1143 1207 * @num_modes: number of different TV formats (modes) supported 1144 1208 * @modes: array of pointers to strings containing name of each format ··· 1247 1183 /* 1248 1184 * Other, TV specific properties: margins & TV modes. 1249 1185 */ 1250 - dev->mode_config.tv_left_margin_property = 1251 - drm_property_create_range(dev, 0, "left margin", 0, 100); 1252 - if (!dev->mode_config.tv_left_margin_property) 1253 - goto nomem; 1254 - 1255 - dev->mode_config.tv_right_margin_property = 1256 - drm_property_create_range(dev, 0, "right margin", 0, 100); 1257 - if (!dev->mode_config.tv_right_margin_property) 1258 - goto nomem; 1259 - 1260 - dev->mode_config.tv_top_margin_property = 1261 - drm_property_create_range(dev, 0, "top margin", 0, 100); 1262 - if (!dev->mode_config.tv_top_margin_property) 1263 - goto nomem; 1264 - 1265 - dev->mode_config.tv_bottom_margin_property = 1266 - drm_property_create_range(dev, 0, "bottom margin", 0, 100); 1267 - if (!dev->mode_config.tv_bottom_margin_property) 1186 + if (drm_mode_create_tv_margin_properties(dev)) 1268 1187 goto nomem; 1269 1188 1270 1189 dev->mode_config.tv_mode_property = ··· 2124 2077 * identifier for the tile group. 2125 2078 * 2126 2079 * RETURNS: 2127 - * new tile group or error. 2080 + * new tile group or NULL. 2128 2081 */ 2129 2082 struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, 2130 2083 char topology[8]) ··· 2134 2087 2135 2088 tg = kzalloc(sizeof(*tg), GFP_KERNEL); 2136 2089 if (!tg) 2137 - return ERR_PTR(-ENOMEM); 2090 + return NULL; 2138 2091 2139 2092 kref_init(&tg->refcount); 2140 2093 memcpy(tg->group_data, topology, 8); ··· 2146 2099 tg->id = ret; 2147 2100 } else { 2148 2101 kfree(tg); 2149 - tg = ERR_PTR(ret); 2102 + tg = NULL; 2150 2103 } 2151 2104 2152 2105 mutex_unlock(&dev->mode_config.idr_mutex);
+9 -6
drivers/gpu/drm/drm_context.c
··· 361 361 { 362 362 struct drm_ctx_list *ctx_entry; 363 363 struct drm_ctx *ctx = data; 364 + int tmp_handle; 364 365 365 366 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 366 367 !drm_core_check_feature(dev, DRIVER_LEGACY)) 367 368 return -EOPNOTSUPP; 368 369 369 - ctx->handle = drm_legacy_ctxbitmap_next(dev); 370 - if (ctx->handle == DRM_KERNEL_CONTEXT) { 370 + tmp_handle = drm_legacy_ctxbitmap_next(dev); 371 + if (tmp_handle == DRM_KERNEL_CONTEXT) { 371 372 /* Skip kernel's context and get a new one. */ 372 - ctx->handle = drm_legacy_ctxbitmap_next(dev); 373 + tmp_handle = drm_legacy_ctxbitmap_next(dev); 373 374 } 374 - DRM_DEBUG("%d\n", ctx->handle); 375 - if (ctx->handle < 0) { 375 + DRM_DEBUG("%d\n", tmp_handle); 376 + if (tmp_handle < 0) { 376 377 DRM_DEBUG("Not enough free contexts.\n"); 377 378 /* Should this return -EBUSY instead? */ 378 - return -ENOMEM; 379 + return tmp_handle; 379 380 } 381 + 382 + ctx->handle = tmp_handle; 380 383 381 384 ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL); 382 385 if (!ctx_entry) {
-41
drivers/gpu/drm/drm_crtc.c
··· 93 93 } 94 94 EXPORT_SYMBOL(drm_crtc_from_index); 95 95 96 - /** 97 - * drm_crtc_force_disable - Forcibly turn off a CRTC 98 - * @crtc: CRTC to turn off 99 - * 100 - * Note: This should only be used by non-atomic legacy drivers. 101 - * 102 - * Returns: 103 - * Zero on success, error code on failure. 104 - */ 105 96 int drm_crtc_force_disable(struct drm_crtc *crtc) 106 97 { 107 98 struct drm_mode_set set = { ··· 103 112 104 113 return drm_mode_set_config_internal(&set); 105 114 } 106 - EXPORT_SYMBOL(drm_crtc_force_disable); 107 - 108 - /** 109 - * drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs 110 - * @dev: DRM device whose CRTCs to turn off 111 - * 112 - * Drivers may want to call this on unload to ensure that all displays are 113 - * unlit and the GPU is in a consistent, low power state. Takes modeset locks. 114 - * 115 - * Note: This should only be used by non-atomic legacy drivers. For an atomic 116 - * version look at drm_atomic_helper_shutdown(). 117 - * 118 - * Returns: 119 - * Zero on success, error code on failure. 120 - */ 121 - int drm_crtc_force_disable_all(struct drm_device *dev) 122 - { 123 - struct drm_crtc *crtc; 124 - int ret = 0; 125 - 126 - drm_modeset_lock_all(dev); 127 - drm_for_each_crtc(crtc, dev) 128 - if (crtc->enabled) { 129 - ret = drm_crtc_force_disable(crtc); 130 - if (ret) 131 - goto out; 132 - } 133 - out: 134 - drm_modeset_unlock_all(dev); 135 - return ret; 136 - } 137 - EXPORT_SYMBOL(drm_crtc_force_disable_all); 138 115 139 116 static unsigned int drm_num_crtcs(struct drm_device *dev) 140 117 {
+51 -7
drivers/gpu/drm/drm_crtc_helper.c
··· 93 93 struct drm_connector_list_iter conn_iter; 94 94 struct drm_device *dev = encoder->dev; 95 95 96 + WARN_ON(drm_drv_uses_atomic_modeset(dev)); 97 + 96 98 /* 97 99 * We can expect this mutex to be locked if we are not panicking. 98 100 * Locking is currently fubar in the panic handler. ··· 132 130 { 133 131 struct drm_encoder *encoder; 134 132 struct drm_device *dev = crtc->dev; 133 + 134 + WARN_ON(drm_drv_uses_atomic_modeset(dev)); 135 135 136 136 /* 137 137 * We can expect this mutex to be locked if we are not panicking. ··· 216 212 */ 217 213 void drm_helper_disable_unused_functions(struct drm_device *dev) 218 214 { 219 - if (drm_core_check_feature(dev, DRIVER_ATOMIC)) 220 - DRM_ERROR("Called for atomic driver, this is not what you want.\n"); 215 + WARN_ON(drm_drv_uses_atomic_modeset(dev)); 221 216 222 217 drm_modeset_lock_all(dev); 223 218 __drm_helper_disable_unused_functions(dev); ··· 283 280 bool saved_enabled; 284 281 struct drm_encoder *encoder; 285 282 bool ret = true; 283 + 284 + WARN_ON(drm_drv_uses_atomic_modeset(dev)); 286 285 287 286 drm_warn_on_modeset_not_all_locked(dev); 288 287 ··· 391 386 if (!encoder_funcs) 392 387 continue; 393 388 394 - DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", 395 - encoder->base.id, encoder->name, 396 - mode->base.id, mode->name); 389 + DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%s]\n", 390 + encoder->base.id, encoder->name, mode->name); 397 391 if (encoder_funcs->mode_set) 398 392 encoder_funcs->mode_set(encoder, mode, adjusted_mode); 399 393 ··· 544 540 545 541 crtc_funcs = set->crtc->helper_private; 546 542 543 + dev = set->crtc->dev; 544 + WARN_ON(drm_drv_uses_atomic_modeset(dev)); 545 + 547 546 if (!set->mode) 548 547 set->fb = NULL; 549 548 ··· 561 554 drm_crtc_helper_disable(set->crtc); 562 555 return 0; 563 556 } 564 - 565 - dev = set->crtc->dev; 566 557 567 558 drm_warn_on_modeset_not_all_locked(dev); 568 559 ··· 880 875 struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; 881 876 int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF; 882 877 878 + WARN_ON(drm_drv_uses_atomic_modeset(connector->dev)); 879 + 883 880 if (mode == connector->dpms) 884 881 return 0; 885 882 ··· 953 946 int encoder_dpms; 954 947 bool ret; 955 948 949 + WARN_ON(drm_drv_uses_atomic_modeset(dev)); 950 + 956 951 drm_modeset_lock_all(dev); 957 952 drm_for_each_crtc(crtc, dev) { 958 953 ··· 993 984 drm_modeset_unlock_all(dev); 994 985 } 995 986 EXPORT_SYMBOL(drm_helper_resume_force_mode); 987 + 988 + /** 989 + * drm_helper_force_disable_all - Forcibly turn off all enabled CRTCs 990 + * @dev: DRM device whose CRTCs to turn off 991 + * 992 + * Drivers may want to call this on unload to ensure that all displays are 993 + * unlit and the GPU is in a consistent, low power state. Takes modeset locks. 994 + * 995 + * Note: This should only be used by non-atomic legacy drivers. For an atomic 996 + * version look at drm_atomic_helper_shutdown(). 997 + * 998 + * Returns: 999 + * Zero on success, error code on failure. 1000 + */ 1001 + int drm_helper_force_disable_all(struct drm_device *dev) 1002 + { 1003 + struct drm_crtc *crtc; 1004 + int ret = 0; 1005 + 1006 + drm_modeset_lock_all(dev); 1007 + drm_for_each_crtc(crtc, dev) 1008 + if (crtc->enabled) { 1009 + struct drm_mode_set set = { 1010 + .crtc = crtc, 1011 + }; 1012 + 1013 + ret = drm_mode_set_config_internal(&set); 1014 + if (ret) 1015 + goto out; 1016 + } 1017 + out: 1018 + drm_modeset_unlock_all(dev); 1019 + return ret; 1020 + } 1021 + EXPORT_SYMBOL(drm_helper_force_disable_all);
+1
drivers/gpu/drm/drm_crtc_internal.h
··· 50 50 const struct drm_framebuffer *fb); 51 51 int drm_crtc_register_all(struct drm_device *dev); 52 52 void drm_crtc_unregister_all(struct drm_device *dev); 53 + int drm_crtc_force_disable(struct drm_crtc *crtc); 53 54 54 55 struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc); 55 56
+3
drivers/gpu/drm/drm_dp_helper.c
··· 154 154 default: 155 155 WARN(1, "unknown DP link rate %d, using %x\n", link_rate, 156 156 DP_LINK_BW_1_62); 157 + /* fall through */ 157 158 case 162000: 158 159 return DP_LINK_BW_1_62; 159 160 case 270000: ··· 172 171 switch (link_bw) { 173 172 default: 174 173 WARN(1, "unknown DP link BW code %x, using 162000\n", link_bw); 174 + /* fall through */ 175 175 case DP_LINK_BW_1_62: 176 176 return 162000; 177 177 case DP_LINK_BW_2_7: ··· 554 552 case DP_DS_16BPC: 555 553 return 16; 556 554 } 555 + /* fall through */ 557 556 default: 558 557 return 0; 559 558 }
+824 -235
drivers/gpu/drm/drm_dp_mst_topology.c
··· 33 33 #include <drm/drm_fixed.h> 34 34 #include <drm/drm_atomic.h> 35 35 #include <drm/drm_atomic_helper.h> 36 + #include <drm/drm_crtc_helper.h> 36 37 37 38 /** 38 39 * DOC: dp mst helper ··· 46 45 char *buf); 47 46 static int test_calc_pbn_mode(void); 48 47 49 - static void drm_dp_put_port(struct drm_dp_mst_port *port); 48 + static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port); 50 49 51 50 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 52 51 int id, ··· 850 849 if (lct > 1) 851 850 memcpy(mstb->rad, rad, lct / 2); 852 851 INIT_LIST_HEAD(&mstb->ports); 853 - kref_init(&mstb->kref); 852 + kref_init(&mstb->topology_kref); 853 + kref_init(&mstb->malloc_kref); 854 854 return mstb; 855 855 } 856 856 857 - static void drm_dp_free_mst_port(struct kref *kref); 858 - 859 857 static void drm_dp_free_mst_branch_device(struct kref *kref) 860 858 { 861 - struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); 862 - if (mstb->port_parent) { 863 - if (list_empty(&mstb->port_parent->next)) 864 - kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port); 865 - } 859 + struct drm_dp_mst_branch *mstb = 860 + container_of(kref, struct drm_dp_mst_branch, malloc_kref); 861 + 862 + if (mstb->port_parent) 863 + drm_dp_mst_put_port_malloc(mstb->port_parent); 864 + 866 865 kfree(mstb); 867 866 } 868 867 868 + /** 869 + * DOC: Branch device and port refcounting 870 + * 871 + * Topology refcount overview 872 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~ 873 + * 874 + * The refcounting schemes for &struct drm_dp_mst_branch and &struct 875 + * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have 876 + * two different kinds of refcounts: topology refcounts, and malloc refcounts. 877 + * 878 + * Topology refcounts are not exposed to drivers, and are handled internally 879 + * by the DP MST helpers. The helpers use them in order to prevent the 880 + * in-memory topology state from being changed in the middle of critical 881 + * operations like changing the internal state of payload allocations. This 882 + * means each branch and port will be considered to be connected to the rest 883 + * of the topology until it's topology refcount reaches zero. Additionally, 884 + * for ports this means that their associated &struct drm_connector will stay 885 + * registered with userspace until the port's refcount reaches 0. 886 + * 887 + * Malloc refcount overview 888 + * ~~~~~~~~~~~~~~~~~~~~~~~~ 889 + * 890 + * Malloc references are used to keep a &struct drm_dp_mst_port or &struct 891 + * drm_dp_mst_branch allocated even after all of its topology references have 892 + * been dropped, so that the driver or MST helpers can safely access each 893 + * branch's last known state before it was disconnected from the topology. 894 + * When the malloc refcount of a port or branch reaches 0, the memory 895 + * allocation containing the &struct drm_dp_mst_branch or &struct 896 + * drm_dp_mst_port respectively will be freed. 897 + * 898 + * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed 899 + * to drivers. As of writing this documentation, there are no drivers that 900 + * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST 901 + * helpers. Exposing this API to drivers in a race-free manner would take more 902 + * tweaking of the refcounting scheme, however patches are welcome provided 903 + * there is a legitimate driver usecase for this. 904 + * 905 + * Refcount relationships in a topology 906 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 907 + * 908 + * Let's take a look at why the relationship between topology and malloc 909 + * refcounts is designed the way it is. 910 + * 911 + * .. kernel-figure:: dp-mst/topology-figure-1.dot 912 + * 913 + * An example of topology and malloc refs in a DP MST topology with two 914 + * active payloads. Topology refcount increments are indicated by solid 915 + * lines, and malloc refcount increments are indicated by dashed lines. 916 + * Each starts from the branch which incremented the refcount, and ends at 917 + * the branch to which the refcount belongs to, i.e. the arrow points the 918 + * same way as the C pointers used to reference a structure. 919 + * 920 + * As you can see in the above figure, every branch increments the topology 921 + * refcount of it's children, and increments the malloc refcount of it's 922 + * parent. Additionally, every payload increments the malloc refcount of it's 923 + * assigned port by 1. 924 + * 925 + * So, what would happen if MSTB #3 from the above figure was unplugged from 926 + * the system, but the driver hadn't yet removed payload #2 from port #3? The 927 + * topology would start to look like the figure below. 928 + * 929 + * .. kernel-figure:: dp-mst/topology-figure-2.dot 930 + * 931 + * Ports and branch devices which have been released from memory are 932 + * colored grey, and references which have been removed are colored red. 933 + * 934 + * Whenever a port or branch device's topology refcount reaches zero, it will 935 + * decrement the topology refcounts of all its children, the malloc refcount 936 + * of its parent, and finally its own malloc refcount. For MSTB #4 and port 937 + * #4, this means they both have been disconnected from the topology and freed 938 + * from memory. But, because payload #2 is still holding a reference to port 939 + * #3, port #3 is removed from the topology but it's &struct drm_dp_mst_port 940 + * is still accessible from memory. This also means port #3 has not yet 941 + * decremented the malloc refcount of MSTB #3, so it's &struct 942 + * drm_dp_mst_branch will also stay allocated in memory until port #3's 943 + * malloc refcount reaches 0. 944 + * 945 + * This relationship is necessary because in order to release payload #2, we 946 + * need to be able to figure out the last relative of port #3 that's still 947 + * connected to the topology. In this case, we would travel up the topology as 948 + * shown below. 949 + * 950 + * .. kernel-figure:: dp-mst/topology-figure-3.dot 951 + * 952 + * And finally, remove payload #2 by communicating with port #2 through 953 + * sideband transactions. 954 + */ 955 + 956 + /** 957 + * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch 958 + * device 959 + * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of 960 + * 961 + * Increments &drm_dp_mst_branch.malloc_kref. When 962 + * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb 963 + * will be released and @mstb may no longer be used. 964 + * 965 + * See also: drm_dp_mst_put_mstb_malloc() 966 + */ 967 + static void 968 + drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb) 969 + { 970 + kref_get(&mstb->malloc_kref); 971 + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref)); 972 + } 973 + 974 + /** 975 + * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch 976 + * device 977 + * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of 978 + * 979 + * Decrements &drm_dp_mst_branch.malloc_kref. When 980 + * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb 981 + * will be released and @mstb may no longer be used. 982 + * 983 + * See also: drm_dp_mst_get_mstb_malloc() 984 + */ 985 + static void 986 + drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb) 987 + { 988 + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1); 989 + kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device); 990 + } 991 + 992 + static void drm_dp_free_mst_port(struct kref *kref) 993 + { 994 + struct drm_dp_mst_port *port = 995 + container_of(kref, struct drm_dp_mst_port, malloc_kref); 996 + 997 + drm_dp_mst_put_mstb_malloc(port->parent); 998 + kfree(port); 999 + } 1000 + 1001 + /** 1002 + * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port 1003 + * @port: The &struct drm_dp_mst_port to increment the malloc refcount of 1004 + * 1005 + * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref 1006 + * reaches 0, the memory allocation for @port will be released and @port may 1007 + * no longer be used. 1008 + * 1009 + * Because @port could potentially be freed at any time by the DP MST helpers 1010 + * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this 1011 + * function, drivers that which to make use of &struct drm_dp_mst_port should 1012 + * ensure that they grab at least one main malloc reference to their MST ports 1013 + * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before 1014 + * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0. 1015 + * 1016 + * See also: drm_dp_mst_put_port_malloc() 1017 + */ 1018 + void 1019 + drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port) 1020 + { 1021 + kref_get(&port->malloc_kref); 1022 + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref)); 1023 + } 1024 + EXPORT_SYMBOL(drm_dp_mst_get_port_malloc); 1025 + 1026 + /** 1027 + * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port 1028 + * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of 1029 + * 1030 + * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref 1031 + * reaches 0, the memory allocation for @port will be released and @port may 1032 + * no longer be used. 1033 + * 1034 + * See also: drm_dp_mst_get_port_malloc() 1035 + */ 1036 + void 1037 + drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) 1038 + { 1039 + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1); 1040 + kref_put(&port->malloc_kref, drm_dp_free_mst_port); 1041 + } 1042 + EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); 1043 + 869 1044 static void drm_dp_destroy_mst_branch_device(struct kref *kref) 870 1045 { 871 - struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); 1046 + struct drm_dp_mst_branch *mstb = 1047 + container_of(kref, struct drm_dp_mst_branch, topology_kref); 1048 + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 872 1049 struct drm_dp_mst_port *port, *tmp; 873 1050 bool wake_tx = false; 874 1051 875 - /* 876 - * init kref again to be used by ports to remove mst branch when it is 877 - * not needed anymore 878 - */ 879 - kref_init(kref); 880 - 881 - if (mstb->port_parent && list_empty(&mstb->port_parent->next)) 882 - kref_get(&mstb->port_parent->kref); 883 - 884 - /* 885 - * destroy all ports - don't need lock 886 - * as there are no more references to the mst branch 887 - * device at this point. 888 - */ 1052 + mutex_lock(&mgr->lock); 889 1053 list_for_each_entry_safe(port, tmp, &mstb->ports, next) { 890 1054 list_del(&port->next); 891 - drm_dp_put_port(port); 1055 + drm_dp_mst_topology_put_port(port); 892 1056 } 1057 + mutex_unlock(&mgr->lock); 893 1058 894 1059 /* drop any tx slots msg */ 895 1060 mutex_lock(&mstb->mgr->qlock); ··· 1074 907 if (wake_tx) 1075 908 wake_up_all(&mstb->mgr->tx_waitq); 1076 909 1077 - kref_put(kref, drm_dp_free_mst_branch_device); 910 + drm_dp_mst_put_mstb_malloc(mstb); 1078 911 } 1079 912 1080 - static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) 913 + /** 914 + * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a 915 + * branch device unless its zero 916 + * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of 917 + * 918 + * Attempts to grab a topology reference to @mstb, if it hasn't yet been 919 + * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has 920 + * reached 0). Holding a topology reference implies that a malloc reference 921 + * will be held to @mstb as long as the user holds the topology reference. 922 + * 923 + * Care should be taken to ensure that the user has at least one malloc 924 + * reference to @mstb. If you already have a topology reference to @mstb, you 925 + * should use drm_dp_mst_topology_get_mstb() instead. 926 + * 927 + * See also: 928 + * drm_dp_mst_topology_get_mstb() 929 + * drm_dp_mst_topology_put_mstb() 930 + * 931 + * Returns: 932 + * * 1: A topology reference was grabbed successfully 933 + * * 0: @port is no longer in the topology, no reference was grabbed 934 + */ 935 + static int __must_check 936 + drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) 1081 937 { 1082 - kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device); 938 + int ret = kref_get_unless_zero(&mstb->topology_kref); 939 + 940 + if (ret) 941 + DRM_DEBUG("mstb %p (%d)\n", mstb, 942 + kref_read(&mstb->topology_kref)); 943 + 944 + return ret; 1083 945 } 1084 946 947 + /** 948 + * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a 949 + * branch device 950 + * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of 951 + * 952 + * Increments &drm_dp_mst_branch.topology_refcount without checking whether or 953 + * not it's already reached 0. This is only valid to use in scenarios where 954 + * you are already guaranteed to have at least one active topology reference 955 + * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used. 956 + * 957 + * See also: 958 + * drm_dp_mst_topology_try_get_mstb() 959 + * drm_dp_mst_topology_put_mstb() 960 + */ 961 + static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) 962 + { 963 + WARN_ON(kref_read(&mstb->topology_kref) == 0); 964 + kref_get(&mstb->topology_kref); 965 + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); 966 + } 967 + 968 + /** 969 + * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch 970 + * device 971 + * @mstb: The &struct drm_dp_mst_branch to release the topology reference from 972 + * 973 + * Releases a topology reference from @mstb by decrementing 974 + * &drm_dp_mst_branch.topology_kref. 975 + * 976 + * See also: 977 + * drm_dp_mst_topology_try_get_mstb() 978 + * drm_dp_mst_topology_get_mstb() 979 + */ 980 + static void 981 + drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) 982 + { 983 + DRM_DEBUG("mstb %p (%d)\n", 984 + mstb, kref_read(&mstb->topology_kref) - 1); 985 + kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); 986 + } 1085 987 1086 988 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) 1087 989 { ··· 1165 929 case DP_PEER_DEVICE_MST_BRANCHING: 1166 930 mstb = port->mstb; 1167 931 port->mstb = NULL; 1168 - drm_dp_put_mst_branch_device(mstb); 932 + drm_dp_mst_topology_put_mstb(mstb); 1169 933 break; 1170 934 } 1171 935 } 1172 936 1173 937 static void drm_dp_destroy_port(struct kref *kref) 1174 938 { 1175 - struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); 939 + struct drm_dp_mst_port *port = 940 + container_of(kref, struct drm_dp_mst_port, topology_kref); 1176 941 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 1177 942 1178 943 if (!port->input) { 1179 - port->vcpi.num_slots = 0; 1180 - 1181 944 kfree(port->cached_edid); 1182 945 1183 946 /* ··· 1190 955 * from an EDID retrieval */ 1191 956 1192 957 mutex_lock(&mgr->destroy_connector_lock); 1193 - kref_get(&port->parent->kref); 1194 958 list_add(&port->next, &mgr->destroy_connector_list); 1195 959 mutex_unlock(&mgr->destroy_connector_lock); 1196 960 schedule_work(&mgr->destroy_connector_work); ··· 1200 966 drm_dp_port_teardown_pdt(port, port->pdt); 1201 967 port->pdt = DP_PEER_DEVICE_NONE; 1202 968 } 1203 - kfree(port); 969 + drm_dp_mst_put_port_malloc(port); 1204 970 } 1205 971 1206 - static void drm_dp_put_port(struct drm_dp_mst_port *port) 972 + /** 973 + * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a 974 + * port unless its zero 975 + * @port: &struct drm_dp_mst_port to increment the topology refcount of 976 + * 977 + * Attempts to grab a topology reference to @port, if it hasn't yet been 978 + * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached 979 + * 0). Holding a topology reference implies that a malloc reference will be 980 + * held to @port as long as the user holds the topology reference. 981 + * 982 + * Care should be taken to ensure that the user has at least one malloc 983 + * reference to @port. If you already have a topology reference to @port, you 984 + * should use drm_dp_mst_topology_get_port() instead. 985 + * 986 + * See also: 987 + * drm_dp_mst_topology_get_port() 988 + * drm_dp_mst_topology_put_port() 989 + * 990 + * Returns: 991 + * * 1: A topology reference was grabbed successfully 992 + * * 0: @port is no longer in the topology, no reference was grabbed 993 + */ 994 + static int __must_check 995 + drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) 1207 996 { 1208 - kref_put(&port->kref, drm_dp_destroy_port); 997 + int ret = kref_get_unless_zero(&port->topology_kref); 998 + 999 + if (ret) 1000 + DRM_DEBUG("port %p (%d)\n", port, 1001 + kref_read(&port->topology_kref)); 1002 + 1003 + return ret; 1209 1004 } 1210 1005 1211 - static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find) 1006 + /** 1007 + * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port 1008 + * @port: The &struct drm_dp_mst_port to increment the topology refcount of 1009 + * 1010 + * Increments &drm_dp_mst_port.topology_refcount without checking whether or 1011 + * not it's already reached 0. This is only valid to use in scenarios where 1012 + * you are already guaranteed to have at least one active topology reference 1013 + * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used. 1014 + * 1015 + * See also: 1016 + * drm_dp_mst_topology_try_get_port() 1017 + * drm_dp_mst_topology_put_port() 1018 + */ 1019 + static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) 1020 + { 1021 + WARN_ON(kref_read(&port->topology_kref) == 0); 1022 + kref_get(&port->topology_kref); 1023 + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref)); 1024 + } 1025 + 1026 + /** 1027 + * drm_dp_mst_topology_put_port() - release a topology reference to a port 1028 + * @port: The &struct drm_dp_mst_port to release the topology reference from 1029 + * 1030 + * Releases a topology reference from @port by decrementing 1031 + * &drm_dp_mst_port.topology_kref. 1032 + * 1033 + * See also: 1034 + * drm_dp_mst_topology_try_get_port() 1035 + * drm_dp_mst_topology_get_port() 1036 + */ 1037 + static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) 1038 + { 1039 + DRM_DEBUG("port %p (%d)\n", 1040 + port, kref_read(&port->topology_kref) - 1); 1041 + kref_put(&port->topology_kref, drm_dp_destroy_port); 1042 + } 1043 + 1044 + static struct drm_dp_mst_branch * 1045 + drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb, 1046 + struct drm_dp_mst_branch *to_find) 1212 1047 { 1213 1048 struct drm_dp_mst_port *port; 1214 1049 struct drm_dp_mst_branch *rmstb; 1215 - if (to_find == mstb) { 1216 - kref_get(&mstb->kref); 1050 + 1051 + if (to_find == mstb) 1217 1052 return mstb; 1218 - } 1053 + 1219 1054 list_for_each_entry(port, &mstb->ports, next) { 1220 1055 if (port->mstb) { 1221 - rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find); 1056 + rmstb = drm_dp_mst_topology_get_mstb_validated_locked( 1057 + port->mstb, to_find); 1222 1058 if (rmstb) 1223 1059 return rmstb; 1224 1060 } ··· 1296 992 return NULL; 1297 993 } 1298 994 1299 - static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) 995 + static struct drm_dp_mst_branch * 996 + drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr, 997 + struct drm_dp_mst_branch *mstb) 1300 998 { 1301 999 struct drm_dp_mst_branch *rmstb = NULL; 1000 + 1302 1001 mutex_lock(&mgr->lock); 1303 - if (mgr->mst_primary) 1304 - rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb); 1002 + if (mgr->mst_primary) { 1003 + rmstb = drm_dp_mst_topology_get_mstb_validated_locked( 1004 + mgr->mst_primary, mstb); 1005 + 1006 + if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb)) 1007 + rmstb = NULL; 1008 + } 1305 1009 mutex_unlock(&mgr->lock); 1306 1010 return rmstb; 1307 1011 } 1308 1012 1309 - static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find) 1013 + static struct drm_dp_mst_port * 1014 + drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb, 1015 + struct drm_dp_mst_port *to_find) 1310 1016 { 1311 1017 struct drm_dp_mst_port *port, *mport; 1312 1018 1313 1019 list_for_each_entry(port, &mstb->ports, next) { 1314 - if (port == to_find) { 1315 - kref_get(&port->kref); 1020 + if (port == to_find) 1316 1021 return port; 1317 - } 1022 + 1318 1023 if (port->mstb) { 1319 - mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find); 1024 + mport = drm_dp_mst_topology_get_port_validated_locked( 1025 + port->mstb, to_find); 1320 1026 if (mport) 1321 1027 return mport; 1322 1028 } ··· 1334 1020 return NULL; 1335 1021 } 1336 1022 1337 - static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 1023 + static struct drm_dp_mst_port * 1024 + drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr, 1025 + struct drm_dp_mst_port *port) 1338 1026 { 1339 1027 struct drm_dp_mst_port *rport = NULL; 1028 + 1340 1029 mutex_lock(&mgr->lock); 1341 - if (mgr->mst_primary) 1342 - rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port); 1030 + if (mgr->mst_primary) { 1031 + rport = drm_dp_mst_topology_get_port_validated_locked( 1032 + mgr->mst_primary, port); 1033 + 1034 + if (rport && !drm_dp_mst_topology_try_get_port(rport)) 1035 + rport = NULL; 1036 + } 1343 1037 mutex_unlock(&mgr->lock); 1344 1038 return rport; 1345 1039 } ··· 1355 1033 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num) 1356 1034 { 1357 1035 struct drm_dp_mst_port *port; 1036 + int ret; 1358 1037 1359 1038 list_for_each_entry(port, &mstb->ports, next) { 1360 1039 if (port->port_num == port_num) { 1361 - kref_get(&port->kref); 1362 - return port; 1040 + ret = drm_dp_mst_topology_try_get_port(port); 1041 + return ret ? port : NULL; 1363 1042 } 1364 1043 } 1365 1044 ··· 1409 1086 if (port->mstb) { 1410 1087 port->mstb->mgr = port->mgr; 1411 1088 port->mstb->port_parent = port; 1089 + /* 1090 + * Make sure this port's memory allocation stays 1091 + * around until it's child MSTB releases it 1092 + */ 1093 + drm_dp_mst_get_port_malloc(port); 1412 1094 1413 1095 send_link = true; 1414 1096 } ··· 1474 1146 bool created = false; 1475 1147 int old_pdt = 0; 1476 1148 int old_ddps = 0; 1149 + 1477 1150 port = drm_dp_get_port(mstb, port_msg->port_number); 1478 1151 if (!port) { 1479 1152 port = kzalloc(sizeof(*port), GFP_KERNEL); 1480 1153 if (!port) 1481 1154 return; 1482 - kref_init(&port->kref); 1155 + kref_init(&port->topology_kref); 1156 + kref_init(&port->malloc_kref); 1483 1157 port->parent = mstb; 1484 1158 port->port_num = port_msg->port_number; 1485 1159 port->mgr = mstb->mgr; 1486 1160 port->aux.name = "DPMST"; 1487 1161 port->aux.dev = dev->dev; 1162 + 1163 + /* 1164 + * Make sure the memory allocation for our parent branch stays 1165 + * around until our own memory allocation is released 1166 + */ 1167 + drm_dp_mst_get_mstb_malloc(mstb); 1168 + 1488 1169 created = true; 1489 1170 } else { 1490 1171 old_pdt = port->pdt; ··· 1513 1176 for this list */ 1514 1177 if (created) { 1515 1178 mutex_lock(&mstb->mgr->lock); 1516 - kref_get(&port->kref); 1179 + drm_dp_mst_topology_get_port(port); 1517 1180 list_add(&port->next, &mstb->ports); 1518 1181 mutex_unlock(&mstb->mgr->lock); 1519 1182 } 1520 1183 1521 1184 if (old_ddps != port->ddps) { 1522 1185 if (port->ddps) { 1523 - if (!port->input) 1524 - drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); 1186 + if (!port->input) { 1187 + drm_dp_send_enum_path_resources(mstb->mgr, 1188 + mstb, port); 1189 + } 1525 1190 } else { 1526 1191 port->available_pbn = 0; 1527 - } 1192 + } 1528 1193 } 1529 1194 1530 1195 if (old_pdt != port->pdt && !port->input) { ··· 1540 1201 if (created && !port->input) { 1541 1202 char proppath[255]; 1542 1203 1543 - build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); 1544 - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); 1204 + build_mst_prop_path(mstb, port->port_num, proppath, 1205 + sizeof(proppath)); 1206 + port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, 1207 + port, 1208 + proppath); 1545 1209 if (!port->connector) { 1546 1210 /* remove it from the port list */ 1547 1211 mutex_lock(&mstb->mgr->lock); 1548 1212 list_del(&port->next); 1549 1213 mutex_unlock(&mstb->mgr->lock); 1550 1214 /* drop port list reference */ 1551 - drm_dp_put_port(port); 1215 + drm_dp_mst_topology_put_port(port); 1552 1216 goto out; 1553 1217 } 1554 1218 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || 1555 1219 port->pdt == DP_PEER_DEVICE_SST_SINK) && 1556 1220 port->port_num >= DP_MST_LOGICAL_PORT_0) { 1557 - port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); 1221 + port->cached_edid = drm_get_edid(port->connector, 1222 + &port->aux.ddc); 1558 1223 drm_connector_set_tile_property(port->connector); 1559 1224 } 1560 1225 (*mstb->mgr->cbs->register_connector)(port->connector); ··· 1566 1223 1567 1224 out: 1568 1225 /* put reference to this port */ 1569 - drm_dp_put_port(port); 1226 + drm_dp_mst_topology_put_port(port); 1570 1227 } 1571 1228 1572 1229 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, ··· 1601 1258 dowork = true; 1602 1259 } 1603 1260 1604 - drm_dp_put_port(port); 1261 + drm_dp_mst_topology_put_port(port); 1605 1262 if (dowork) 1606 1263 queue_work(system_long_wq, &mstb->mgr->work); 1607 1264 ··· 1612 1269 { 1613 1270 struct drm_dp_mst_branch *mstb; 1614 1271 struct drm_dp_mst_port *port; 1615 - int i; 1272 + int i, ret; 1616 1273 /* find the port by iterating down */ 1617 1274 1618 1275 mutex_lock(&mgr->lock); ··· 1637 1294 } 1638 1295 } 1639 1296 } 1640 - kref_get(&mstb->kref); 1297 + ret = drm_dp_mst_topology_try_get_mstb(mstb); 1298 + if (!ret) 1299 + mstb = NULL; 1641 1300 out: 1642 1301 mutex_unlock(&mgr->lock); 1643 1302 return mstb; ··· 1669 1324 return NULL; 1670 1325 } 1671 1326 1672 - static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid( 1673 - struct drm_dp_mst_topology_mgr *mgr, 1674 - uint8_t *guid) 1327 + static struct drm_dp_mst_branch * 1328 + drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, 1329 + uint8_t *guid) 1675 1330 { 1676 1331 struct drm_dp_mst_branch *mstb; 1332 + int ret; 1677 1333 1678 1334 /* find the port by iterating down */ 1679 1335 mutex_lock(&mgr->lock); 1680 1336 1681 1337 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); 1682 - 1683 - if (mstb) 1684 - kref_get(&mstb->kref); 1338 + if (mstb) { 1339 + ret = drm_dp_mst_topology_try_get_mstb(mstb); 1340 + if (!ret) 1341 + mstb = NULL; 1342 + } 1685 1343 1686 1344 mutex_unlock(&mgr->lock); 1687 1345 return mstb; ··· 1709 1361 drm_dp_send_enum_path_resources(mgr, mstb, port); 1710 1362 1711 1363 if (port->mstb) { 1712 - mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb); 1364 + mstb_child = drm_dp_mst_topology_get_mstb_validated( 1365 + mgr, port->mstb); 1713 1366 if (mstb_child) { 1714 1367 drm_dp_check_and_send_link_address(mgr, mstb_child); 1715 - drm_dp_put_mst_branch_device(mstb_child); 1368 + drm_dp_mst_topology_put_mstb(mstb_child); 1716 1369 } 1717 1370 } 1718 1371 } ··· 1723 1374 { 1724 1375 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); 1725 1376 struct drm_dp_mst_branch *mstb; 1377 + int ret; 1726 1378 1727 1379 mutex_lock(&mgr->lock); 1728 1380 mstb = mgr->mst_primary; 1729 1381 if (mstb) { 1730 - kref_get(&mstb->kref); 1382 + ret = drm_dp_mst_topology_try_get_mstb(mstb); 1383 + if (!ret) 1384 + mstb = NULL; 1731 1385 } 1732 1386 mutex_unlock(&mgr->lock); 1733 1387 if (mstb) { 1734 1388 drm_dp_check_and_send_link_address(mgr, mstb); 1735 - drm_dp_put_mst_branch_device(mstb); 1389 + drm_dp_mst_topology_put_mstb(mstb); 1736 1390 } 1737 1391 } 1738 1392 ··· 1991 1639 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1992 1640 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); 1993 1641 } 1994 - (*mgr->cbs->hotplug)(mgr); 1642 + drm_kms_helper_hotplug_event(mgr->dev); 1995 1643 } 1996 1644 } else { 1997 1645 mstb->link_address_sent = false; ··· 2046 1694 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); 2047 1695 } 2048 1696 2049 - static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, 2050 - struct drm_dp_mst_branch *mstb, 2051 - int *port_num) 1697 + /* 1698 + * Searches upwards in the topology starting from mstb to try to find the 1699 + * closest available parent of mstb that's still connected to the rest of the 1700 + * topology. This can be used in order to perform operations like releasing 1701 + * payloads, where the branch device which owned the payload may no longer be 1702 + * around and thus would require that the payload on the last living relative 1703 + * be freed instead. 1704 + */ 1705 + static struct drm_dp_mst_branch * 1706 + drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, 1707 + struct drm_dp_mst_branch *mstb, 1708 + int *port_num) 2052 1709 { 2053 1710 struct drm_dp_mst_branch *rmstb = NULL; 2054 1711 struct drm_dp_mst_port *found_port; 2055 - mutex_lock(&mgr->lock); 2056 - if (mgr->mst_primary) { 2057 - found_port = drm_dp_get_last_connected_port_to_mstb(mstb); 2058 1712 2059 - if (found_port) { 1713 + mutex_lock(&mgr->lock); 1714 + if (!mgr->mst_primary) 1715 + goto out; 1716 + 1717 + do { 1718 + found_port = drm_dp_get_last_connected_port_to_mstb(mstb); 1719 + if (!found_port) 1720 + break; 1721 + 1722 + if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) { 2060 1723 rmstb = found_port->parent; 2061 - kref_get(&rmstb->kref); 2062 1724 *port_num = found_port->port_num; 1725 + } else { 1726 + /* Search again, starting from this parent */ 1727 + mstb = found_port->parent; 2063 1728 } 2064 - } 1729 + } while (!rmstb); 1730 + out: 2065 1731 mutex_unlock(&mgr->lock); 2066 1732 return rmstb; 2067 1733 } ··· 2095 1725 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 2096 1726 int i; 2097 1727 2098 - port = drm_dp_get_validated_port_ref(mgr, port); 2099 - if (!port) 2100 - return -EINVAL; 2101 - 2102 1728 port_num = port->port_num; 2103 - mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1729 + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 2104 1730 if (!mstb) { 2105 - mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); 1731 + mstb = drm_dp_get_last_connected_port_and_mstb(mgr, 1732 + port->parent, 1733 + &port_num); 2106 1734 2107 - if (!mstb) { 2108 - drm_dp_put_port(port); 1735 + if (!mstb) 2109 1736 return -EINVAL; 2110 - } 2111 1737 } 2112 1738 2113 1739 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); ··· 2122 1756 2123 1757 drm_dp_queue_down_tx(mgr, txmsg); 2124 1758 1759 + /* 1760 + * FIXME: there is a small chance that between getting the last 1761 + * connected mstb and sending the payload message, the last connected 1762 + * mstb could also be removed from the topology. In the future, this 1763 + * needs to be fixed by restarting the 1764 + * drm_dp_get_last_connected_port_and_mstb() search in the event of a 1765 + * timeout if the topology is still connected to the system. 1766 + */ 2125 1767 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 2126 1768 if (ret > 0) { 2127 - if (txmsg->reply.reply_type == 1) { 1769 + if (txmsg->reply.reply_type == 1) 2128 1770 ret = -EINVAL; 2129 - } else 1771 + else 2130 1772 ret = 0; 2131 1773 } 2132 1774 kfree(txmsg); 2133 1775 fail_put: 2134 - drm_dp_put_mst_branch_device(mstb); 2135 - drm_dp_put_port(port); 1776 + drm_dp_mst_topology_put_mstb(mstb); 2136 1777 return ret; 2137 1778 } 2138 1779 ··· 2149 1776 struct drm_dp_sideband_msg_tx *txmsg; 2150 1777 int len, ret; 2151 1778 2152 - port = drm_dp_get_validated_port_ref(mgr, port); 1779 + port = drm_dp_mst_topology_get_port_validated(mgr, port); 2153 1780 if (!port) 2154 1781 return -EINVAL; 2155 1782 2156 1783 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 2157 1784 if (!txmsg) { 2158 - drm_dp_put_port(port); 1785 + drm_dp_mst_topology_put_port(port); 2159 1786 return -ENOMEM; 2160 1787 } 2161 1788 ··· 2171 1798 ret = 0; 2172 1799 } 2173 1800 kfree(txmsg); 2174 - drm_dp_put_port(port); 1801 + drm_dp_mst_topology_put_port(port); 2175 1802 2176 1803 return ret; 2177 1804 } ··· 2244 1871 */ 2245 1872 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) 2246 1873 { 2247 - int i, j; 2248 - int cur_slots = 1; 2249 1874 struct drm_dp_payload req_payload; 2250 1875 struct drm_dp_mst_port *port; 1876 + int i, j; 1877 + int cur_slots = 1; 2251 1878 2252 1879 mutex_lock(&mgr->payload_lock); 2253 1880 for (i = 0; i < mgr->max_payloads; i++) { 1881 + struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; 1882 + struct drm_dp_payload *payload = &mgr->payloads[i]; 1883 + bool put_port = false; 1884 + 2254 1885 /* solve the current payloads - compare to the hw ones 2255 1886 - update the hw view */ 2256 1887 req_payload.start_slot = cur_slots; 2257 - if (mgr->proposed_vcpis[i]) { 2258 - port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 2259 - port = drm_dp_get_validated_port_ref(mgr, port); 2260 - if (!port) { 2261 - mutex_unlock(&mgr->payload_lock); 2262 - return -EINVAL; 1888 + if (vcpi) { 1889 + port = container_of(vcpi, struct drm_dp_mst_port, 1890 + vcpi); 1891 + 1892 + /* Validated ports don't matter if we're releasing 1893 + * VCPI 1894 + */ 1895 + if (vcpi->num_slots) { 1896 + port = drm_dp_mst_topology_get_port_validated( 1897 + mgr, port); 1898 + if (!port) { 1899 + mutex_unlock(&mgr->payload_lock); 1900 + return -EINVAL; 1901 + } 1902 + put_port = true; 2263 1903 } 2264 - req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; 2265 - req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; 1904 + 1905 + req_payload.num_slots = vcpi->num_slots; 1906 + req_payload.vcpi = vcpi->vcpi; 2266 1907 } else { 2267 1908 port = NULL; 2268 1909 req_payload.num_slots = 0; 2269 1910 } 2270 1911 2271 - if (mgr->payloads[i].start_slot != req_payload.start_slot) { 2272 - mgr->payloads[i].start_slot = req_payload.start_slot; 2273 - } 1912 + payload->start_slot = req_payload.start_slot; 2274 1913 /* work out what is required to happen with this payload */ 2275 - if (mgr->payloads[i].num_slots != req_payload.num_slots) { 1914 + if (payload->num_slots != req_payload.num_slots) { 2276 1915 2277 1916 /* need to push an update for this payload */ 2278 1917 if (req_payload.num_slots) { 2279 - drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload); 2280 - mgr->payloads[i].num_slots = req_payload.num_slots; 2281 - mgr->payloads[i].vcpi = req_payload.vcpi; 2282 - } else if (mgr->payloads[i].num_slots) { 2283 - mgr->payloads[i].num_slots = 0; 2284 - drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]); 2285 - req_payload.payload_state = mgr->payloads[i].payload_state; 2286 - mgr->payloads[i].start_slot = 0; 1918 + drm_dp_create_payload_step1(mgr, vcpi->vcpi, 1919 + &req_payload); 1920 + payload->num_slots = req_payload.num_slots; 1921 + payload->vcpi = req_payload.vcpi; 1922 + 1923 + } else if (payload->num_slots) { 1924 + payload->num_slots = 0; 1925 + drm_dp_destroy_payload_step1(mgr, port, 1926 + payload->vcpi, 1927 + payload); 1928 + req_payload.payload_state = 1929 + payload->payload_state; 1930 + payload->start_slot = 0; 2287 1931 } 2288 - mgr->payloads[i].payload_state = req_payload.payload_state; 1932 + payload->payload_state = req_payload.payload_state; 2289 1933 } 2290 1934 cur_slots += req_payload.num_slots; 2291 1935 2292 - if (port) 2293 - drm_dp_put_port(port); 1936 + if (put_port) 1937 + drm_dp_mst_topology_put_port(port); 2294 1938 } 2295 1939 2296 1940 for (i = 0; i < mgr->max_payloads; i++) { 2297 - if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { 2298 - DRM_DEBUG_KMS("removing payload %d\n", i); 2299 - for (j = i; j < mgr->max_payloads - 1; j++) { 2300 - memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload)); 2301 - mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1]; 2302 - if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) { 2303 - set_bit(j + 1, &mgr->payload_mask); 2304 - } else { 2305 - clear_bit(j + 1, &mgr->payload_mask); 2306 - } 2307 - } 2308 - memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload)); 2309 - mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL; 2310 - clear_bit(mgr->max_payloads, &mgr->payload_mask); 1941 + if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) 1942 + continue; 2311 1943 1944 + DRM_DEBUG_KMS("removing payload %d\n", i); 1945 + for (j = i; j < mgr->max_payloads - 1; j++) { 1946 + mgr->payloads[j] = mgr->payloads[j + 1]; 1947 + mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1]; 1948 + 1949 + if (mgr->proposed_vcpis[j] && 1950 + mgr->proposed_vcpis[j]->num_slots) { 1951 + set_bit(j + 1, &mgr->payload_mask); 1952 + } else { 1953 + clear_bit(j + 1, &mgr->payload_mask); 1954 + } 2312 1955 } 1956 + 1957 + memset(&mgr->payloads[mgr->max_payloads - 1], 0, 1958 + sizeof(struct drm_dp_payload)); 1959 + mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL; 1960 + clear_bit(mgr->max_payloads, &mgr->payload_mask); 2313 1961 } 2314 1962 mutex_unlock(&mgr->payload_lock); 2315 1963 ··· 2406 2012 struct drm_dp_sideband_msg_tx *txmsg; 2407 2013 struct drm_dp_mst_branch *mstb; 2408 2014 2409 - mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 2015 + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 2410 2016 if (!mstb) 2411 2017 return -EINVAL; 2412 2018 ··· 2430 2036 } 2431 2037 kfree(txmsg); 2432 2038 fail_put: 2433 - drm_dp_put_mst_branch_device(mstb); 2039 + drm_dp_mst_topology_put_mstb(mstb); 2434 2040 return ret; 2435 2041 } 2436 2042 ··· 2540 2146 2541 2147 /* give this the main reference */ 2542 2148 mgr->mst_primary = mstb; 2543 - kref_get(&mgr->mst_primary->kref); 2149 + drm_dp_mst_topology_get_mstb(mgr->mst_primary); 2544 2150 2545 2151 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2546 2152 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); ··· 2574 2180 out_unlock: 2575 2181 mutex_unlock(&mgr->lock); 2576 2182 if (mstb) 2577 - drm_dp_put_mst_branch_device(mstb); 2183 + drm_dp_mst_topology_put_mstb(mstb); 2578 2184 return ret; 2579 2185 2580 2186 } ··· 2739 2345 mgr->down_rep_recv.initial_hdr.lct, 2740 2346 mgr->down_rep_recv.initial_hdr.rad[0], 2741 2347 mgr->down_rep_recv.msg[0]); 2742 - drm_dp_put_mst_branch_device(mstb); 2348 + drm_dp_mst_topology_put_mstb(mstb); 2743 2349 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2744 2350 return 0; 2745 2351 } ··· 2750 2356 } 2751 2357 2752 2358 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2753 - drm_dp_put_mst_branch_device(mstb); 2359 + drm_dp_mst_topology_put_mstb(mstb); 2754 2360 2755 2361 mutex_lock(&mgr->qlock); 2756 2362 txmsg->state = DRM_DP_SIDEBAND_TX_RX; ··· 2806 2412 drm_dp_update_port(mstb, &msg.u.conn_stat); 2807 2413 2808 2414 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); 2809 - (*mgr->cbs->hotplug)(mgr); 2415 + drm_kms_helper_hotplug_event(mgr->dev); 2810 2416 2811 2417 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 2812 2418 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); ··· 2823 2429 } 2824 2430 2825 2431 if (mstb) 2826 - drm_dp_put_mst_branch_device(mstb); 2432 + drm_dp_mst_topology_put_mstb(mstb); 2827 2433 2828 2434 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2829 2435 } ··· 2883 2489 enum drm_connector_status status = connector_status_disconnected; 2884 2490 2885 2491 /* we need to search for the port in the mgr in case its gone */ 2886 - port = drm_dp_get_validated_port_ref(mgr, port); 2492 + port = drm_dp_mst_topology_get_port_validated(mgr, port); 2887 2493 if (!port) 2888 2494 return connector_status_disconnected; 2889 2495 ··· 2908 2514 break; 2909 2515 } 2910 2516 out: 2911 - drm_dp_put_port(port); 2517 + drm_dp_mst_topology_put_port(port); 2912 2518 return status; 2913 2519 } 2914 2520 EXPORT_SYMBOL(drm_dp_mst_detect_port); ··· 2925 2531 { 2926 2532 bool ret = false; 2927 2533 2928 - port = drm_dp_get_validated_port_ref(mgr, port); 2534 + port = drm_dp_mst_topology_get_port_validated(mgr, port); 2929 2535 if (!port) 2930 2536 return ret; 2931 2537 ret = port->has_audio; 2932 - drm_dp_put_port(port); 2538 + drm_dp_mst_topology_put_port(port); 2933 2539 return ret; 2934 2540 } 2935 2541 EXPORT_SYMBOL(drm_dp_mst_port_has_audio); ··· 2949 2555 struct edid *edid = NULL; 2950 2556 2951 2557 /* we need to search for the port in the mgr in case its gone */ 2952 - port = drm_dp_get_validated_port_ref(mgr, port); 2558 + port = drm_dp_mst_topology_get_port_validated(mgr, port); 2953 2559 if (!port) 2954 2560 return NULL; 2955 2561 ··· 2960 2566 drm_connector_set_tile_property(connector); 2961 2567 } 2962 2568 port->has_audio = drm_detect_monitor_audio(edid); 2963 - drm_dp_put_port(port); 2569 + drm_dp_mst_topology_put_port(port); 2964 2570 return edid; 2965 2571 } 2966 2572 EXPORT_SYMBOL(drm_dp_mst_get_edid); ··· 3011 2617 } 3012 2618 3013 2619 /** 3014 - * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state 2620 + * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state 3015 2621 * @state: global atomic state 3016 2622 * @mgr: MST topology manager for the port 3017 2623 * @port: port to find vcpi slots for 3018 2624 * @pbn: bandwidth required for the mode in PBN 3019 2625 * 3020 - * RETURNS: 3021 - * Total slots in the atomic state assigned for this port or error 2626 + * Allocates VCPI slots to @port, replacing any previous VCPI allocations it 2627 + * may have had. Any atomic drivers which support MST must call this function 2628 + * in their &drm_encoder_helper_funcs.atomic_check() callback to change the 2629 + * current VCPI allocation for the new state, but only when 2630 + * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set 2631 + * to ensure compatibility with userspace applications that still use the 2632 + * legacy modesetting UAPI. 2633 + * 2634 + * Allocations set by this function are not checked against the bandwidth 2635 + * restraints of @mgr until the driver calls drm_dp_mst_atomic_check(). 2636 + * 2637 + * Additionally, it is OK to call this function multiple times on the same 2638 + * @port as needed. It is not OK however, to call this function and 2639 + * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase. 2640 + * 2641 + * See also: 2642 + * drm_dp_atomic_release_vcpi_slots() 2643 + * drm_dp_mst_atomic_check() 2644 + * 2645 + * Returns: 2646 + * Total slots in the atomic state assigned for this port, or a negative error 2647 + * code if the port no longer exists 3022 2648 */ 3023 2649 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, 3024 2650 struct drm_dp_mst_topology_mgr *mgr, 3025 2651 struct drm_dp_mst_port *port, int pbn) 3026 2652 { 3027 2653 struct drm_dp_mst_topology_state *topology_state; 3028 - int req_slots; 2654 + struct drm_dp_vcpi_allocation *pos, *vcpi = NULL; 2655 + int prev_slots, req_slots, ret; 3029 2656 3030 2657 topology_state = drm_atomic_get_mst_topology_state(state, mgr); 3031 2658 if (IS_ERR(topology_state)) 3032 2659 return PTR_ERR(topology_state); 3033 2660 3034 - port = drm_dp_get_validated_port_ref(mgr, port); 2661 + port = drm_dp_mst_topology_get_port_validated(mgr, port); 3035 2662 if (port == NULL) 3036 2663 return -EINVAL; 3037 - req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); 3038 - DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n", 3039 - req_slots, topology_state->avail_slots); 3040 2664 3041 - if (req_slots > topology_state->avail_slots) { 3042 - drm_dp_put_port(port); 3043 - return -ENOSPC; 2665 + /* Find the current allocation for this port, if any */ 2666 + list_for_each_entry(pos, &topology_state->vcpis, next) { 2667 + if (pos->port == port) { 2668 + vcpi = pos; 2669 + prev_slots = vcpi->vcpi; 2670 + 2671 + /* 2672 + * This should never happen, unless the driver tries 2673 + * releasing and allocating the same VCPI allocation, 2674 + * which is an error 2675 + */ 2676 + if (WARN_ON(!prev_slots)) { 2677 + DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n", 2678 + port); 2679 + return -EINVAL; 2680 + } 2681 + 2682 + break; 2683 + } 3044 2684 } 2685 + if (!vcpi) 2686 + prev_slots = 0; 3045 2687 3046 - topology_state->avail_slots -= req_slots; 3047 - DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots); 2688 + req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); 3048 2689 3049 - drm_dp_put_port(port); 3050 - return req_slots; 2690 + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n", 2691 + port->connector->base.id, port->connector->name, 2692 + port, prev_slots, req_slots); 2693 + 2694 + /* Add the new allocation to the state */ 2695 + if (!vcpi) { 2696 + vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL); 2697 + if (!vcpi) { 2698 + ret = -ENOMEM; 2699 + goto out; 2700 + } 2701 + 2702 + drm_dp_mst_get_port_malloc(port); 2703 + vcpi->port = port; 2704 + list_add(&vcpi->next, &topology_state->vcpis); 2705 + } 2706 + vcpi->vcpi = req_slots; 2707 + 2708 + ret = req_slots; 2709 + out: 2710 + drm_dp_mst_topology_put_port(port); 2711 + return ret; 3051 2712 } 3052 2713 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); 3053 2714 ··· 3110 2661 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots 3111 2662 * @state: global atomic state 3112 2663 * @mgr: MST topology manager for the port 3113 - * @slots: number of vcpi slots to release 2664 + * @port: The port to release the VCPI slots from 3114 2665 * 3115 - * RETURNS: 3116 - * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or 3117 - * negative error code 2666 + * Releases any VCPI slots that have been allocated to a port in the atomic 2667 + * state. Any atomic drivers which support MST must call this function in 2668 + * their &drm_connector_helper_funcs.atomic_check() callback when the 2669 + * connector will no longer have VCPI allocated (e.g. because it's CRTC was 2670 + * removed) when it had VCPI allocated in the previous atomic state. 2671 + * 2672 + * It is OK to call this even if @port has been removed from the system. 2673 + * Additionally, it is OK to call this function multiple times on the same 2674 + * @port as needed. It is not OK however, to call this function and 2675 + * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check 2676 + * phase. 2677 + * 2678 + * See also: 2679 + * drm_dp_atomic_find_vcpi_slots() 2680 + * drm_dp_mst_atomic_check() 2681 + * 2682 + * Returns: 2683 + * 0 if all slots for this port were added back to 2684 + * &drm_dp_mst_topology_state.avail_slots or negative error code 3118 2685 */ 3119 2686 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, 3120 2687 struct drm_dp_mst_topology_mgr *mgr, 3121 - int slots) 2688 + struct drm_dp_mst_port *port) 3122 2689 { 3123 2690 struct drm_dp_mst_topology_state *topology_state; 2691 + struct drm_dp_vcpi_allocation *pos; 2692 + bool found = false; 3124 2693 3125 2694 topology_state = drm_atomic_get_mst_topology_state(state, mgr); 3126 2695 if (IS_ERR(topology_state)) 3127 2696 return PTR_ERR(topology_state); 3128 2697 3129 - /* We cannot rely on port->vcpi.num_slots to update 3130 - * topology_state->avail_slots as the port may not exist if the parent 3131 - * branch device was unplugged. This should be fixed by tracking 3132 - * per-port slot allocation in drm_dp_mst_topology_state instead of 3133 - * depending on the caller to tell us how many slots to release. 3134 - */ 3135 - topology_state->avail_slots += slots; 3136 - DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n", 3137 - slots, topology_state->avail_slots); 2698 + list_for_each_entry(pos, &topology_state->vcpis, next) { 2699 + if (pos->port == port) { 2700 + found = true; 2701 + break; 2702 + } 2703 + } 2704 + if (WARN_ON(!found)) { 2705 + DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n", 2706 + port, &topology_state->base); 2707 + return -EINVAL; 2708 + } 2709 + 2710 + DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi); 2711 + if (pos->vcpi) { 2712 + drm_dp_mst_put_port_malloc(port); 2713 + pos->vcpi = 0; 2714 + } 3138 2715 3139 2716 return 0; 3140 2717 } ··· 3178 2703 { 3179 2704 int ret; 3180 2705 3181 - port = drm_dp_get_validated_port_ref(mgr, port); 2706 + port = drm_dp_mst_topology_get_port_validated(mgr, port); 3182 2707 if (!port) 3183 2708 return false; 3184 2709 ··· 3186 2711 return false; 3187 2712 3188 2713 if (port->vcpi.vcpi > 0) { 3189 - DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); 2714 + DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", 2715 + port->vcpi.vcpi, port->vcpi.pbn, pbn); 3190 2716 if (pbn == port->vcpi.pbn) { 3191 - drm_dp_put_port(port); 2717 + drm_dp_mst_topology_put_port(port); 3192 2718 return true; 3193 2719 } 3194 2720 } ··· 3197 2721 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); 3198 2722 if (ret) { 3199 2723 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n", 3200 - DIV_ROUND_UP(pbn, mgr->pbn_div), ret); 2724 + DIV_ROUND_UP(pbn, mgr->pbn_div), ret); 3201 2725 goto out; 3202 2726 } 3203 2727 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", 3204 - pbn, port->vcpi.num_slots); 2728 + pbn, port->vcpi.num_slots); 3205 2729 3206 - drm_dp_put_port(port); 2730 + /* Keep port allocated until it's payload has been removed */ 2731 + drm_dp_mst_get_port_malloc(port); 2732 + drm_dp_mst_topology_put_port(port); 3207 2733 return true; 3208 2734 out: 3209 2735 return false; ··· 3215 2737 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 3216 2738 { 3217 2739 int slots = 0; 3218 - port = drm_dp_get_validated_port_ref(mgr, port); 2740 + port = drm_dp_mst_topology_get_port_validated(mgr, port); 3219 2741 if (!port) 3220 2742 return slots; 3221 2743 3222 2744 slots = port->vcpi.num_slots; 3223 - drm_dp_put_port(port); 2745 + drm_dp_mst_topology_put_port(port); 3224 2746 return slots; 3225 2747 } 3226 2748 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); ··· 3234 2756 */ 3235 2757 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 3236 2758 { 3237 - port = drm_dp_get_validated_port_ref(mgr, port); 3238 - if (!port) 3239 - return; 2759 + /* 2760 + * A port with VCPI will remain allocated until it's VCPI is 2761 + * released, no verified ref needed 2762 + */ 2763 + 3240 2764 port->vcpi.num_slots = 0; 3241 - drm_dp_put_port(port); 3242 2765 } 3243 2766 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); 3244 2767 ··· 3248 2769 * @mgr: manager for this port 3249 2770 * @port: unverified port to deallocate vcpi for 3250 2771 */ 3251 - void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2772 + void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, 2773 + struct drm_dp_mst_port *port) 3252 2774 { 3253 - port = drm_dp_get_validated_port_ref(mgr, port); 3254 - if (!port) 3255 - return; 2775 + /* 2776 + * A port with VCPI will remain allocated until it's VCPI is 2777 + * released, no verified ref needed 2778 + */ 3256 2779 3257 2780 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 3258 2781 port->vcpi.num_slots = 0; 3259 2782 port->vcpi.pbn = 0; 3260 2783 port->vcpi.aligned_pbn = 0; 3261 2784 port->vcpi.vcpi = 0; 3262 - drm_dp_put_port(port); 2785 + drm_dp_mst_put_port_malloc(port); 3263 2786 } 3264 2787 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); 3265 2788 ··· 3545 3064 mutex_unlock(&mgr->qlock); 3546 3065 } 3547 3066 3548 - static void drm_dp_free_mst_port(struct kref *kref) 3549 - { 3550 - struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); 3551 - kref_put(&port->parent->kref, drm_dp_free_mst_branch_device); 3552 - kfree(port); 3553 - } 3554 - 3555 3067 static void drm_dp_destroy_connector_work(struct work_struct *work) 3556 3068 { 3557 3069 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); ··· 3565 3091 list_del(&port->next); 3566 3092 mutex_unlock(&mgr->destroy_connector_lock); 3567 3093 3568 - kref_init(&port->kref); 3569 3094 INIT_LIST_HEAD(&port->next); 3570 3095 3571 3096 mgr->cbs->destroy_connector(mgr, port->connector); ··· 3572 3099 drm_dp_port_teardown_pdt(port, port->pdt); 3573 3100 port->pdt = DP_PEER_DEVICE_NONE; 3574 3101 3575 - if (!port->input && port->vcpi.vcpi > 0) { 3576 - drm_dp_mst_reset_vcpi_slots(mgr, port); 3577 - drm_dp_update_payload_part1(mgr); 3578 - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 3579 - } 3580 - 3581 - kref_put(&port->kref, drm_dp_free_mst_port); 3102 + drm_dp_mst_put_port_malloc(port); 3582 3103 send_hotplug = true; 3583 3104 } 3584 3105 if (send_hotplug) 3585 - (*mgr->cbs->hotplug)(mgr); 3106 + drm_kms_helper_hotplug_event(mgr->dev); 3586 3107 } 3587 3108 3588 3109 static struct drm_private_state * 3589 3110 drm_dp_mst_duplicate_state(struct drm_private_obj *obj) 3590 3111 { 3591 - struct drm_dp_mst_topology_state *state; 3112 + struct drm_dp_mst_topology_state *state, *old_state = 3113 + to_dp_mst_topology_state(obj->state); 3114 + struct drm_dp_vcpi_allocation *pos, *vcpi; 3592 3115 3593 - state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 3116 + state = kmemdup(old_state, sizeof(*state), GFP_KERNEL); 3594 3117 if (!state) 3595 3118 return NULL; 3596 3119 3597 3120 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 3598 3121 3122 + INIT_LIST_HEAD(&state->vcpis); 3123 + 3124 + list_for_each_entry(pos, &old_state->vcpis, next) { 3125 + /* Prune leftover freed VCPI allocations */ 3126 + if (!pos->vcpi) 3127 + continue; 3128 + 3129 + vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL); 3130 + if (!vcpi) 3131 + goto fail; 3132 + 3133 + drm_dp_mst_get_port_malloc(vcpi->port); 3134 + list_add(&vcpi->next, &state->vcpis); 3135 + } 3136 + 3599 3137 return &state->base; 3138 + 3139 + fail: 3140 + list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) { 3141 + drm_dp_mst_put_port_malloc(pos->port); 3142 + kfree(pos); 3143 + } 3144 + kfree(state); 3145 + 3146 + return NULL; 3600 3147 } 3601 3148 3602 3149 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, ··· 3624 3131 { 3625 3132 struct drm_dp_mst_topology_state *mst_state = 3626 3133 to_dp_mst_topology_state(state); 3134 + struct drm_dp_vcpi_allocation *pos, *tmp; 3135 + 3136 + list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) { 3137 + /* We only keep references to ports with non-zero VCPIs */ 3138 + if (pos->vcpi) 3139 + drm_dp_mst_put_port_malloc(pos->port); 3140 + kfree(pos); 3141 + } 3627 3142 3628 3143 kfree(mst_state); 3629 3144 } 3630 3145 3631 - static const struct drm_private_state_funcs mst_state_funcs = { 3146 + static inline int 3147 + drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr, 3148 + struct drm_dp_mst_topology_state *mst_state) 3149 + { 3150 + struct drm_dp_vcpi_allocation *vcpi; 3151 + int avail_slots = 63, payload_count = 0; 3152 + 3153 + list_for_each_entry(vcpi, &mst_state->vcpis, next) { 3154 + /* Releasing VCPI is always OK-even if the port is gone */ 3155 + if (!vcpi->vcpi) { 3156 + DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n", 3157 + vcpi->port); 3158 + continue; 3159 + } 3160 + 3161 + DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n", 3162 + vcpi->port, vcpi->vcpi); 3163 + 3164 + avail_slots -= vcpi->vcpi; 3165 + if (avail_slots < 0) { 3166 + DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n", 3167 + vcpi->port, mst_state, 3168 + avail_slots + vcpi->vcpi); 3169 + return -ENOSPC; 3170 + } 3171 + 3172 + if (++payload_count > mgr->max_payloads) { 3173 + DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n", 3174 + mgr, mst_state, mgr->max_payloads); 3175 + return -EINVAL; 3176 + } 3177 + } 3178 + DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", 3179 + mgr, mst_state, avail_slots, 3180 + 63 - avail_slots); 3181 + 3182 + return 0; 3183 + } 3184 + 3185 + /** 3186 + * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an 3187 + * atomic update is valid 3188 + * @state: Pointer to the new &struct drm_dp_mst_topology_state 3189 + * 3190 + * Checks the given topology state for an atomic update to ensure that it's 3191 + * valid. This includes checking whether there's enough bandwidth to support 3192 + * the new VCPI allocations in the atomic update. 3193 + * 3194 + * Any atomic drivers supporting DP MST must make sure to call this after 3195 + * checking the rest of their state in their 3196 + * &drm_mode_config_funcs.atomic_check() callback. 3197 + * 3198 + * See also: 3199 + * drm_dp_atomic_find_vcpi_slots() 3200 + * drm_dp_atomic_release_vcpi_slots() 3201 + * 3202 + * Returns: 3203 + * 3204 + * 0 if the new state is valid, negative error code otherwise. 3205 + */ 3206 + int drm_dp_mst_atomic_check(struct drm_atomic_state *state) 3207 + { 3208 + struct drm_dp_mst_topology_mgr *mgr; 3209 + struct drm_dp_mst_topology_state *mst_state; 3210 + int i, ret = 0; 3211 + 3212 + for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 3213 + ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state); 3214 + if (ret) 3215 + break; 3216 + } 3217 + 3218 + return ret; 3219 + } 3220 + EXPORT_SYMBOL(drm_dp_mst_atomic_check); 3221 + 3222 + const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = { 3632 3223 .atomic_duplicate_state = drm_dp_mst_duplicate_state, 3633 3224 .atomic_destroy_state = drm_dp_mst_destroy_state, 3634 3225 }; 3226 + EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs); 3635 3227 3636 3228 /** 3637 3229 * drm_atomic_get_mst_topology_state: get MST topology state ··· 3794 3216 return -ENOMEM; 3795 3217 3796 3218 mst_state->mgr = mgr; 3219 + INIT_LIST_HEAD(&mst_state->vcpis); 3797 3220 3798 - /* max. time slots - one slot for MTP header */ 3799 - mst_state->avail_slots = 63; 3800 - 3801 - drm_atomic_private_obj_init(&mgr->base, 3221 + drm_atomic_private_obj_init(dev, &mgr->base, 3802 3222 &mst_state->base, 3803 - &mst_state_funcs); 3223 + &drm_dp_mst_topology_state_funcs); 3804 3224 3805 3225 return 0; 3806 3226 } ··· 3810 3234 */ 3811 3235 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) 3812 3236 { 3237 + drm_dp_mst_topology_mgr_set_mst(mgr, false); 3813 3238 flush_work(&mgr->work); 3814 3239 flush_work(&mgr->destroy_connector_work); 3815 3240 mutex_lock(&mgr->payload_lock); ··· 3826 3249 } 3827 3250 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); 3828 3251 3252 + static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num) 3253 + { 3254 + int i; 3255 + 3256 + if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS) 3257 + return false; 3258 + 3259 + for (i = 0; i < num - 1; i++) { 3260 + if (msgs[i].flags & I2C_M_RD || 3261 + msgs[i].len > 0xff) 3262 + return false; 3263 + } 3264 + 3265 + return msgs[num - 1].flags & I2C_M_RD && 3266 + msgs[num - 1].len <= 0xff; 3267 + } 3268 + 3829 3269 /* I2C device */ 3830 3270 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, 3831 3271 int num) ··· 3852 3258 struct drm_dp_mst_branch *mstb; 3853 3259 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 3854 3260 unsigned int i; 3855 - bool reading = false; 3856 3261 struct drm_dp_sideband_msg_req_body msg; 3857 3262 struct drm_dp_sideband_msg_tx *txmsg = NULL; 3858 3263 int ret; 3859 3264 3860 - mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 3265 + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 3861 3266 if (!mstb) 3862 3267 return -EREMOTEIO; 3863 3268 3864 - /* construct i2c msg */ 3865 - /* see if last msg is a read */ 3866 - if (msgs[num - 1].flags & I2C_M_RD) 3867 - reading = true; 3868 - 3869 - if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) { 3269 + if (!remote_i2c_read_ok(msgs, num)) { 3870 3270 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); 3871 3271 ret = -EIO; 3872 3272 goto out; ··· 3874 3286 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; 3875 3287 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; 3876 3288 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; 3289 + msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP); 3877 3290 } 3878 3291 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; 3879 3292 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; ··· 3906 3317 } 3907 3318 out: 3908 3319 kfree(txmsg); 3909 - drm_dp_put_mst_branch_device(mstb); 3320 + drm_dp_mst_topology_put_mstb(mstb); 3910 3321 return ret; 3911 3322 } 3912 3323
+11 -13
drivers/gpu/drm/drm_drv.c
··· 41 41 #include "drm_crtc_internal.h" 42 42 #include "drm_legacy.h" 43 43 #include "drm_internal.h" 44 - #include "drm_crtc_internal.h" 45 44 46 45 /* 47 46 * drm_debug: Enable debug output. ··· 264 265 * DOC: driver instance overview 265 266 * 266 267 * A device instance for a drm driver is represented by &struct drm_device. This 267 - * is allocated with drm_dev_alloc(), usually from bus-specific ->probe() 268 + * is initialized with drm_dev_init(), usually from bus-specific ->probe() 268 269 * callbacks implemented by the driver. The driver then needs to initialize all 269 270 * the various subsystems for the drm device like memory management, vblank 270 271 * handling, modesetting support and intial output configuration plus obviously 271 - * initialize all the corresponding hardware bits. An important part of this is 272 - * also calling drm_dev_set_unique() to set the userspace-visible unique name of 273 - * this device instance. Finally when everything is up and running and ready for 274 - * userspace the device instance can be published using drm_dev_register(). 272 + * initialize all the corresponding hardware bits. Finally when everything is up 273 + * and running and ready for userspace the device instance can be published 274 + * using drm_dev_register(). 275 275 * 276 276 * There is also deprecated support for initalizing device instances using 277 277 * bus-specific helpers and the &drm_driver.load callback. But due to ··· 286 288 * Note that the lifetime rules for &drm_device instance has still a lot of 287 289 * historical baggage. Hence use the reference counting provided by 288 290 * drm_dev_get() and drm_dev_put() only carefully. 289 - * 290 - * It is recommended that drivers embed &struct drm_device into their own device 291 - * structure, which is supported through drm_dev_init(). 292 291 */ 293 292 294 293 /** ··· 470 475 * 471 476 * The initial ref-count of the object is 1. Use drm_dev_get() and 472 477 * drm_dev_put() to take and drop further ref-counts. 478 + * 479 + * It is recommended that drivers embed &struct drm_device into their own device 480 + * structure. 473 481 * 474 482 * Drivers that do not want to allocate their own device struct 475 483 * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers ··· 764 766 * @flags: Flags passed to the driver's .load() function 765 767 * 766 768 * Register the DRM device @dev with the system, advertise device to user-space 767 - * and start normal device operation. @dev must be allocated via drm_dev_alloc() 769 + * and start normal device operation. @dev must be initialized via drm_dev_init() 768 770 * previously. 769 771 * 770 772 * Never call this twice on any device! ··· 876 878 * @dev: device of which to set the unique name 877 879 * @name: unique name 878 880 * 879 - * Sets the unique name of a DRM device using the specified string. Drivers 880 - * can use this at driver probe time if the unique name of the devices they 881 - * drive is static. 881 + * Sets the unique name of a DRM device using the specified string. This is 882 + * already done by drm_dev_init(), drivers should only override the default 883 + * unique name for backwards compatibility reasons. 882 884 * 883 885 * Return: 0 on success or a negative error code on failure. 884 886 */
+51 -50
drivers/gpu/drm/drm_edid.c
··· 3641 3641 return oui == HDMI_FORUM_IEEE_OUI; 3642 3642 } 3643 3643 3644 + static bool cea_db_is_vcdb(const u8 *db) 3645 + { 3646 + if (cea_db_tag(db) != USE_EXTENDED_TAG) 3647 + return false; 3648 + 3649 + if (cea_db_payload_len(db) != 2) 3650 + return false; 3651 + 3652 + if (cea_db_extended_tag(db) != EXT_VIDEO_CAPABILITY_BLOCK) 3653 + return false; 3654 + 3655 + return true; 3656 + } 3657 + 3644 3658 static bool cea_db_is_y420cmdb(const u8 *db) 3645 3659 { 3646 3660 if (cea_db_tag(db) != USE_EXTENDED_TAG) ··· 4237 4223 } 4238 4224 EXPORT_SYMBOL(drm_detect_monitor_audio); 4239 4225 4240 - /** 4241 - * drm_rgb_quant_range_selectable - is RGB quantization range selectable? 4242 - * @edid: EDID block to scan 4243 - * 4244 - * Check whether the monitor reports the RGB quantization range selection 4245 - * as supported. The AVI infoframe can then be used to inform the monitor 4246 - * which quantization range (full or limited) is used. 4247 - * 4248 - * Return: True if the RGB quantization range is selectable, false otherwise. 4249 - */ 4250 - bool drm_rgb_quant_range_selectable(struct edid *edid) 4251 - { 4252 - u8 *edid_ext; 4253 - int i, start, end; 4254 - 4255 - edid_ext = drm_find_cea_extension(edid); 4256 - if (!edid_ext) 4257 - return false; 4258 - 4259 - if (cea_db_offsets(edid_ext, &start, &end)) 4260 - return false; 4261 - 4262 - for_each_cea_db(edid_ext, i, start, end) { 4263 - if (cea_db_tag(&edid_ext[i]) == USE_EXTENDED_TAG && 4264 - cea_db_payload_len(&edid_ext[i]) == 2 && 4265 - cea_db_extended_tag(&edid_ext[i]) == 4266 - EXT_VIDEO_CAPABILITY_BLOCK) { 4267 - DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]); 4268 - return edid_ext[i + 2] & EDID_CEA_VCDB_QS; 4269 - } 4270 - } 4271 - 4272 - return false; 4273 - } 4274 - EXPORT_SYMBOL(drm_rgb_quant_range_selectable); 4275 4226 4276 4227 /** 4277 4228 * drm_default_rgb_quant_range - default RGB quantization range ··· 4256 4277 HDMI_QUANTIZATION_RANGE_FULL; 4257 4278 } 4258 4279 EXPORT_SYMBOL(drm_default_rgb_quant_range); 4280 + 4281 + static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db) 4282 + { 4283 + struct drm_display_info *info = &connector->display_info; 4284 + 4285 + DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", db[2]); 4286 + 4287 + if (db[2] & EDID_CEA_VCDB_QS) 4288 + info->rgb_quant_range_selectable = true; 4289 + } 4259 4290 4260 4291 static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector, 4261 4292 const u8 *db) ··· 4441 4452 drm_parse_hdmi_forum_vsdb(connector, db); 4442 4453 if (cea_db_is_y420cmdb(db)) 4443 4454 drm_parse_y420cmdb_bitmap(connector, db); 4455 + if (cea_db_is_vcdb(db)) 4456 + drm_parse_vcdb(connector, db); 4444 4457 } 4445 4458 } 4446 4459 ··· 4463 4472 info->max_tmds_clock = 0; 4464 4473 info->dvi_dual = false; 4465 4474 info->has_hdmi_infoframe = false; 4475 + info->rgb_quant_range_selectable = false; 4466 4476 memset(&info->hdmi, 0, sizeof(info->hdmi)); 4467 4477 4468 4478 info->non_desktop = 0; ··· 4822 4830 } 4823 4831 EXPORT_SYMBOL(drm_set_preferred_mode); 4824 4832 4833 + static bool is_hdmi2_sink(struct drm_connector *connector) 4834 + { 4835 + /* 4836 + * FIXME: sil-sii8620 doesn't have a connector around when 4837 + * we need one, so we have to be prepared for a NULL connector. 4838 + */ 4839 + if (!connector) 4840 + return true; 4841 + 4842 + return connector->display_info.hdmi.scdc.supported || 4843 + connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420; 4844 + } 4845 + 4825 4846 /** 4826 4847 * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with 4827 4848 * data from a DRM display mode 4828 4849 * @frame: HDMI AVI infoframe 4850 + * @connector: the connector 4829 4851 * @mode: DRM display mode 4830 - * @is_hdmi2_sink: Sink is HDMI 2.0 compliant 4831 4852 * 4832 4853 * Return: 0 on success or a negative error code on failure. 4833 4854 */ 4834 4855 int 4835 4856 drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, 4836 - const struct drm_display_mode *mode, 4837 - bool is_hdmi2_sink) 4857 + struct drm_connector *connector, 4858 + const struct drm_display_mode *mode) 4838 4859 { 4839 4860 enum hdmi_picture_aspect picture_aspect; 4840 4861 int err; ··· 4869 4864 * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we 4870 4865 * have to make sure we dont break HDMI 1.4 sinks. 4871 4866 */ 4872 - if (!is_hdmi2_sink && frame->video_code > 64) 4867 + if (!is_hdmi2_sink(connector) && frame->video_code > 64) 4873 4868 frame->video_code = 0; 4874 4869 4875 4870 /* ··· 4928 4923 * drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe 4929 4924 * quantization range information 4930 4925 * @frame: HDMI AVI infoframe 4926 + * @connector: the connector 4931 4927 * @mode: DRM display mode 4932 4928 * @rgb_quant_range: RGB quantization range (Q) 4933 - * @rgb_quant_range_selectable: Sink support selectable RGB quantization range (QS) 4934 - * @is_hdmi2_sink: HDMI 2.0 sink, which has different default recommendations 4935 - * 4936 - * Note that @is_hdmi2_sink can be derived by looking at the 4937 - * &drm_scdc.supported flag stored in &drm_hdmi_info.scdc, 4938 - * &drm_display_info.hdmi, which can be found in &drm_connector.display_info. 4939 4929 */ 4940 4930 void 4941 4931 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, 4932 + struct drm_connector *connector, 4942 4933 const struct drm_display_mode *mode, 4943 - enum hdmi_quantization_range rgb_quant_range, 4944 - bool rgb_quant_range_selectable, 4945 - bool is_hdmi2_sink) 4934 + enum hdmi_quantization_range rgb_quant_range) 4946 4935 { 4936 + const struct drm_display_info *info = &connector->display_info; 4937 + 4947 4938 /* 4948 4939 * CEA-861: 4949 4940 * "A Source shall not send a non-zero Q value that does not correspond ··· 4950 4949 * HDMI 2.0 recommends sending non-zero Q when it does match the 4951 4950 * default RGB quantization range for the mode, even when QS=0. 4952 4951 */ 4953 - if (rgb_quant_range_selectable || 4952 + if (info->rgb_quant_range_selectable || 4954 4953 rgb_quant_range == drm_default_rgb_quant_range(mode)) 4955 4954 frame->quantization_range = rgb_quant_range; 4956 4955 else ··· 4969 4968 * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based 4970 4969 * on on CEA-861-F. 4971 4970 */ 4972 - if (!is_hdmi2_sink || 4971 + if (!is_hdmi2_sink(connector) || 4973 4972 rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED) 4974 4973 frame->ycc_quantization_range = 4975 4974 HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+113 -42
drivers/gpu/drm/drm_fb_helper.c
··· 1797 1797 int i; 1798 1798 struct drm_fb_helper_surface_size sizes; 1799 1799 int gamma_size = 0; 1800 + int best_depth = 0; 1800 1801 1801 1802 memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size)); 1802 1803 sizes.surface_depth = 24; ··· 1805 1804 sizes.fb_width = (u32)-1; 1806 1805 sizes.fb_height = (u32)-1; 1807 1806 1808 - /* if driver picks 8 or 16 by default use that for both depth/bpp */ 1807 + /* 1808 + * If driver picks 8 or 16 by default use that for both depth/bpp 1809 + * to begin with 1810 + */ 1809 1811 if (preferred_bpp != sizes.surface_bpp) 1810 1812 sizes.surface_depth = sizes.surface_bpp = preferred_bpp; 1811 1813 ··· 1841 1837 } 1842 1838 break; 1843 1839 } 1840 + } 1841 + 1842 + /* 1843 + * If we run into a situation where, for example, the primary plane 1844 + * supports RGBA5551 (16 bpp, depth 15) but not RGB565 (16 bpp, depth 1845 + * 16) we need to scale down the depth of the sizes we request. 1846 + */ 1847 + for (i = 0; i < fb_helper->crtc_count; i++) { 1848 + struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; 1849 + struct drm_crtc *crtc = mode_set->crtc; 1850 + struct drm_plane *plane = crtc->primary; 1851 + int j; 1852 + 1853 + DRM_DEBUG("test CRTC %d primary plane\n", i); 1854 + 1855 + for (j = 0; j < plane->format_count; j++) { 1856 + const struct drm_format_info *fmt; 1857 + 1858 + fmt = drm_format_info(plane->format_types[j]); 1859 + 1860 + /* 1861 + * Do not consider YUV or other complicated formats 1862 + * for framebuffers. This means only legacy formats 1863 + * are supported (fmt->depth is a legacy field) but 1864 + * the framebuffer emulation can only deal with such 1865 + * formats, specifically RGB/BGA formats. 1866 + */ 1867 + if (fmt->depth == 0) 1868 + continue; 1869 + 1870 + /* We found a perfect fit, great */ 1871 + if (fmt->depth == sizes.surface_depth) { 1872 + best_depth = fmt->depth; 1873 + break; 1874 + } 1875 + 1876 + /* Skip depths above what we're looking for */ 1877 + if (fmt->depth > sizes.surface_depth) 1878 + continue; 1879 + 1880 + /* Best depth found so far */ 1881 + if (fmt->depth > best_depth) 1882 + best_depth = fmt->depth; 1883 + } 1884 + } 1885 + if (sizes.surface_depth != best_depth) { 1886 + DRM_INFO("requested bpp %d, scaled depth down to %d", 1887 + sizes.surface_bpp, best_depth); 1888 + sizes.surface_depth = best_depth; 1844 1889 } 1845 1890 1846 1891 crtc_count = 0; ··· 2919 2866 return 0; 2920 2867 2921 2868 err_drm_fb_helper_fini: 2922 - drm_fb_helper_fini(fb_helper); 2869 + drm_fb_helper_fbdev_teardown(dev); 2923 2870 2924 2871 return ret; 2925 2872 } ··· 3014 2961 return 0; 3015 2962 } 3016 2963 3017 - /* 3018 - * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of 3019 - * unregister_framebuffer() or fb_release(). 3020 - */ 3021 - static void drm_fbdev_fb_destroy(struct fb_info *info) 2964 + static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) 3022 2965 { 3023 - struct drm_fb_helper *fb_helper = info->par; 3024 2966 struct fb_info *fbi = fb_helper->fbdev; 3025 2967 struct fb_ops *fbops = NULL; 3026 2968 void *shadow = NULL; 3027 2969 3028 - if (fbi->fbdefio) { 2970 + if (!fb_helper->dev) 2971 + return; 2972 + 2973 + if (fbi && fbi->fbdefio) { 3029 2974 fb_deferred_io_cleanup(fbi); 3030 2975 shadow = fbi->screen_buffer; 3031 2976 fbops = fbi->fbops; ··· 3037 2986 } 3038 2987 3039 2988 drm_client_framebuffer_delete(fb_helper->buffer); 2989 + } 2990 + 2991 + static void drm_fbdev_release(struct drm_fb_helper *fb_helper) 2992 + { 2993 + drm_fbdev_cleanup(fb_helper); 2994 + 3040 2995 /* 3041 2996 * FIXME: 3042 2997 * Remove conditional when all CMA drivers have been moved over to using ··· 3052 2995 drm_client_release(&fb_helper->client); 3053 2996 kfree(fb_helper); 3054 2997 } 2998 + } 2999 + 3000 + /* 3001 + * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of 3002 + * unregister_framebuffer() or fb_release(). 3003 + */ 3004 + static void drm_fbdev_fb_destroy(struct fb_info *info) 3005 + { 3006 + drm_fbdev_release(info->par); 3055 3007 } 3056 3008 3057 3009 static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) ··· 3113 3047 struct drm_framebuffer *fb; 3114 3048 struct fb_info *fbi; 3115 3049 u32 format; 3116 - int ret; 3117 3050 3118 3051 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", 3119 3052 sizes->surface_width, sizes->surface_height, ··· 3129 3064 fb = buffer->fb; 3130 3065 3131 3066 fbi = drm_fb_helper_alloc_fbi(fb_helper); 3132 - if (IS_ERR(fbi)) { 3133 - ret = PTR_ERR(fbi); 3134 - goto err_free_buffer; 3135 - } 3067 + if (IS_ERR(fbi)) 3068 + return PTR_ERR(fbi); 3136 3069 3137 3070 fbi->par = fb_helper; 3138 3071 fbi->fbops = &drm_fbdev_fb_ops; ··· 3161 3098 if (!fbops || !shadow) { 3162 3099 kfree(fbops); 3163 3100 vfree(shadow); 3164 - ret = -ENOMEM; 3165 - goto err_fb_info_destroy; 3101 + return -ENOMEM; 3166 3102 } 3167 3103 3168 3104 *fbops = *fbi->fbops; ··· 3173 3111 } 3174 3112 3175 3113 return 0; 3176 - 3177 - err_fb_info_destroy: 3178 - drm_fb_helper_fini(fb_helper); 3179 - err_free_buffer: 3180 - drm_client_framebuffer_delete(buffer); 3181 - 3182 - return ret; 3183 3114 } 3184 3115 EXPORT_SYMBOL(drm_fb_helper_generic_probe); 3185 3116 ··· 3184 3129 { 3185 3130 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); 3186 3131 3187 - if (fb_helper->fbdev) { 3188 - drm_fb_helper_unregister_fbi(fb_helper); 3132 + if (fb_helper->fbdev) 3189 3133 /* drm_fbdev_fb_destroy() takes care of cleanup */ 3190 - return; 3191 - } 3192 - 3193 - /* Did drm_fb_helper_fbdev_setup() run? */ 3194 - if (fb_helper->dev) 3195 - drm_fb_helper_fini(fb_helper); 3196 - 3197 - drm_client_release(client); 3198 - kfree(fb_helper); 3134 + drm_fb_helper_unregister_fbi(fb_helper); 3135 + else 3136 + drm_fbdev_release(fb_helper); 3199 3137 } 3200 3138 3201 3139 static int drm_fbdev_client_restore(struct drm_client_dev *client) ··· 3206 3158 struct drm_device *dev = client->dev; 3207 3159 int ret; 3208 3160 3209 - /* If drm_fb_helper_fbdev_setup() failed, we only try once */ 3161 + /* Setup is not retried if it has failed */ 3210 3162 if (!fb_helper->dev && fb_helper->funcs) 3211 3163 return 0; 3212 3164 ··· 3218 3170 return 0; 3219 3171 } 3220 3172 3221 - ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs, 3222 - fb_helper->preferred_bpp, 0); 3223 - if (ret) { 3224 - fb_helper->dev = NULL; 3225 - fb_helper->fbdev = NULL; 3226 - return ret; 3227 - } 3173 + drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs); 3174 + 3175 + ret = drm_fb_helper_init(dev, fb_helper, dev->mode_config.num_connector); 3176 + if (ret) 3177 + goto err; 3178 + 3179 + ret = drm_fb_helper_single_add_all_connectors(fb_helper); 3180 + if (ret) 3181 + goto err_cleanup; 3182 + 3183 + if (!drm_drv_uses_atomic_modeset(dev)) 3184 + drm_helper_disable_unused_functions(dev); 3185 + 3186 + ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp); 3187 + if (ret) 3188 + goto err_cleanup; 3228 3189 3229 3190 return 0; 3191 + 3192 + err_cleanup: 3193 + drm_fbdev_cleanup(fb_helper); 3194 + err: 3195 + fb_helper->dev = NULL; 3196 + fb_helper->fbdev = NULL; 3197 + 3198 + DRM_DEV_ERROR(dev->dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret); 3199 + 3200 + return ret; 3230 3201 } 3231 3202 3232 3203 static const struct drm_client_funcs drm_fbdev_client_funcs = { ··· 3304 3237 3305 3238 drm_client_add(&fb_helper->client); 3306 3239 3240 + if (!preferred_bpp) 3241 + preferred_bpp = dev->mode_config.preferred_depth; 3242 + if (!preferred_bpp) 3243 + preferred_bpp = 32; 3307 3244 fb_helper->preferred_bpp = preferred_bpp; 3308 3245 3309 3246 ret = drm_fbdev_client_hotplug(&fb_helper->client);
+1
drivers/gpu/drm/drm_flip_work.c
··· 22 22 */ 23 23 24 24 #include <drm/drmP.h> 25 + #include <drm/drm_util.h> 25 26 #include <drm/drm_flip_work.h> 26 27 27 28 /**
+1
drivers/gpu/drm/drm_framebuffer.c
··· 27 27 #include <drm/drm_atomic.h> 28 28 #include <drm/drm_atomic_uapi.h> 29 29 #include <drm/drm_print.h> 30 + #include <drm/drm_util.h> 30 31 31 32 #include "drm_internal.h" 32 33 #include "drm_crtc_internal.h"
+33 -3
drivers/gpu/drm/drm_gem.c
··· 37 37 #include <linux/shmem_fs.h> 38 38 #include <linux/dma-buf.h> 39 39 #include <linux/mem_encrypt.h> 40 + #include <linux/pagevec.h> 40 41 #include <drm/drmP.h> 41 42 #include <drm/drm_vma_manager.h> 42 43 #include <drm/drm_gem.h> ··· 527 526 } 528 527 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 529 528 529 + /* 530 + * Move pages to appropriate lru and release the pagevec, decrementing the 531 + * ref count of those pages. 532 + */ 533 + static void drm_gem_check_release_pagevec(struct pagevec *pvec) 534 + { 535 + check_move_unevictable_pages(pvec); 536 + __pagevec_release(pvec); 537 + cond_resched(); 538 + } 539 + 530 540 /** 531 541 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 532 542 * from shmem ··· 563 551 { 564 552 struct address_space *mapping; 565 553 struct page *p, **pages; 554 + struct pagevec pvec; 566 555 int i, npages; 567 556 568 557 /* This is the shared memory object that backs the GEM resource */ ··· 580 567 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 581 568 if (pages == NULL) 582 569 return ERR_PTR(-ENOMEM); 570 + 571 + mapping_set_unevictable(mapping); 583 572 584 573 for (i = 0; i < npages; i++) { 585 574 p = shmem_read_mapping_page(mapping, i); ··· 601 586 return pages; 602 587 603 588 fail: 604 - while (i--) 605 - put_page(pages[i]); 589 + mapping_clear_unevictable(mapping); 590 + pagevec_init(&pvec); 591 + while (i--) { 592 + if (!pagevec_add(&pvec, pages[i])) 593 + drm_gem_check_release_pagevec(&pvec); 594 + } 595 + if (pagevec_count(&pvec)) 596 + drm_gem_check_release_pagevec(&pvec); 606 597 607 598 kvfree(pages); 608 599 return ERR_CAST(p); ··· 626 605 bool dirty, bool accessed) 627 606 { 628 607 int i, npages; 608 + struct address_space *mapping; 609 + struct pagevec pvec; 610 + 611 + mapping = file_inode(obj->filp)->i_mapping; 612 + mapping_clear_unevictable(mapping); 629 613 630 614 /* We already BUG_ON() for non-page-aligned sizes in 631 615 * drm_gem_object_init(), so we should never hit this unless ··· 640 614 641 615 npages = obj->size >> PAGE_SHIFT; 642 616 617 + pagevec_init(&pvec); 643 618 for (i = 0; i < npages; i++) { 644 619 if (dirty) 645 620 set_page_dirty(pages[i]); ··· 649 622 mark_page_accessed(pages[i]); 650 623 651 624 /* Undo the reference we took when populating the table */ 652 - put_page(pages[i]); 625 + if (!pagevec_add(&pvec, pages[i])) 626 + drm_gem_check_release_pagevec(&pvec); 653 627 } 628 + if (pagevec_count(&pvec)) 629 + drm_gem_check_release_pagevec(&pvec); 654 630 655 631 kvfree(pages); 656 632 }
+2
drivers/gpu/drm/drm_internal.h
··· 26 26 #define DRM_IF_MAJOR 1 27 27 #define DRM_IF_MINOR 4 28 28 29 + #define DRM_IF_VERSION(maj, min) (maj << 16 | min) 30 + 29 31 struct drm_prime_file_private; 30 32 struct dma_buf; 31 33
+3 -3
drivers/gpu/drm/drm_lease.c
··· 218 218 219 219 idr_for_each_entry(leases, entry, object) { 220 220 error = 0; 221 - if (!idr_find(&dev->mode_config.crtc_idr, object)) 221 + if (!idr_find(&dev->mode_config.object_idr, object)) 222 222 error = -ENOENT; 223 223 else if (!_drm_lease_held_master(lessor, object)) 224 224 error = -EACCES; ··· 439 439 /* 440 440 * We're using an IDR to hold the set of leased 441 441 * objects, but we don't need to point at the object's 442 - * data structure from the lease as the main crtc_idr 442 + * data structure from the lease as the main object_idr 443 443 * will be used to actually find that. Instead, all we 444 444 * really want is a 'leased/not-leased' result, for 445 445 * which any non-NULL pointer will work fine. ··· 687 687 688 688 if (lessee->lessor == NULL) 689 689 /* owner can use all objects */ 690 - object_idr = &lessee->dev->mode_config.crtc_idr; 690 + object_idr = &lessee->dev->mode_config.object_idr; 691 691 else 692 692 /* lessee can only use allowed object */ 693 693 object_idr = &lessee->leases;
+3 -2
drivers/gpu/drm/drm_mode_config.c
··· 393 393 INIT_LIST_HEAD(&dev->mode_config.property_list); 394 394 INIT_LIST_HEAD(&dev->mode_config.property_blob_list); 395 395 INIT_LIST_HEAD(&dev->mode_config.plane_list); 396 - idr_init(&dev->mode_config.crtc_idr); 396 + INIT_LIST_HEAD(&dev->mode_config.privobj_list); 397 + idr_init(&dev->mode_config.object_idr); 397 398 idr_init(&dev->mode_config.tile_idr); 398 399 ida_init(&dev->mode_config.connector_ida); 399 400 spin_lock_init(&dev->mode_config.connector_list_lock); ··· 497 496 498 497 ida_destroy(&dev->mode_config.connector_ida); 499 498 idr_destroy(&dev->mode_config.tile_idr); 500 - idr_destroy(&dev->mode_config.crtc_idr); 499 + idr_destroy(&dev->mode_config.object_idr); 501 500 drm_modeset_lock_fini(&dev->mode_config.connection_mutex); 502 501 } 503 502 EXPORT_SYMBOL(drm_mode_config_cleanup);
+7 -6
drivers/gpu/drm/drm_mode_object.c
··· 38 38 int ret; 39 39 40 40 mutex_lock(&dev->mode_config.idr_mutex); 41 - ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 41 + ret = idr_alloc(&dev->mode_config.object_idr, register_obj ? obj : NULL, 42 42 1, 0, GFP_KERNEL); 43 43 if (ret >= 0) { 44 44 /* ··· 79 79 struct drm_mode_object *obj) 80 80 { 81 81 mutex_lock(&dev->mode_config.idr_mutex); 82 - idr_replace(&dev->mode_config.crtc_idr, obj, obj->id); 82 + idr_replace(&dev->mode_config.object_idr, obj, obj->id); 83 83 mutex_unlock(&dev->mode_config.idr_mutex); 84 84 } 85 85 ··· 99 99 { 100 100 mutex_lock(&dev->mode_config.idr_mutex); 101 101 if (object->id) { 102 - idr_remove(&dev->mode_config.crtc_idr, object->id); 102 + idr_remove(&dev->mode_config.object_idr, object->id); 103 103 object->id = 0; 104 104 } 105 105 mutex_unlock(&dev->mode_config.idr_mutex); ··· 131 131 struct drm_mode_object *obj = NULL; 132 132 133 133 mutex_lock(&dev->mode_config.idr_mutex); 134 - obj = idr_find(&dev->mode_config.crtc_idr, id); 134 + obj = idr_find(&dev->mode_config.object_idr, id); 135 135 if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type) 136 136 obj = NULL; 137 137 if (obj && obj->id != id) ··· 459 459 struct drm_modeset_acquire_ctx ctx; 460 460 int ret; 461 461 462 - drm_modeset_acquire_init(&ctx, 0); 463 - 464 462 state = drm_atomic_state_alloc(dev); 465 463 if (!state) 466 464 return -ENOMEM; 465 + 466 + drm_modeset_acquire_init(&ctx, 0); 467 467 state->acquire_ctx = &ctx; 468 + 468 469 retry: 469 470 if (prop == state->dev->mode_config.dpms_property) { 470 471 if (obj->type != DRM_MODE_OBJECT_CONNECTOR) {
-9
drivers/gpu/drm/drm_modes.c
··· 71 71 if (!nmode) 72 72 return NULL; 73 73 74 - if (drm_mode_object_add(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) { 75 - kfree(nmode); 76 - return NULL; 77 - } 78 - 79 74 return nmode; 80 75 } 81 76 EXPORT_SYMBOL(drm_mode_create); ··· 86 91 { 87 92 if (!mode) 88 93 return; 89 - 90 - drm_mode_object_unregister(dev, &mode->base); 91 94 92 95 kfree(mode); 93 96 } ··· 904 911 */ 905 912 void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src) 906 913 { 907 - int id = dst->base.id; 908 914 struct list_head head = dst->head; 909 915 910 916 *dst = *src; 911 - dst->base.id = id; 912 917 dst->head = head; 913 918 } 914 919 EXPORT_SYMBOL(drm_mode_copy);
+8
drivers/gpu/drm/drm_modeset_lock.c
··· 22 22 */ 23 23 24 24 #include <drm/drmP.h> 25 + #include <drm/drm_atomic.h> 25 26 #include <drm/drm_crtc.h> 26 27 #include <drm/drm_modeset_lock.h> 27 28 ··· 395 394 int drm_modeset_lock_all_ctx(struct drm_device *dev, 396 395 struct drm_modeset_acquire_ctx *ctx) 397 396 { 397 + struct drm_private_obj *privobj; 398 398 struct drm_crtc *crtc; 399 399 struct drm_plane *plane; 400 400 int ret; ··· 412 410 413 411 drm_for_each_plane(plane, dev) { 414 412 ret = drm_modeset_lock(&plane->mutex, ctx); 413 + if (ret) 414 + return ret; 415 + } 416 + 417 + drm_for_each_privobj(privobj, dev) { 418 + ret = drm_modeset_lock(&privobj->lock, ctx); 415 419 if (ret) 416 420 return ret; 417 421 }
+3 -1
drivers/gpu/drm/drm_of.c
··· 217 217 } 218 218 EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint); 219 219 220 - /* 220 + /** 221 221 * drm_of_find_panel_or_bridge - return connected panel or bridge device 222 222 * @np: device tree node containing encoder output ports 223 + * @port: port in the device tree node 224 + * @endpoint: endpoint in the device tree node 223 225 * @panel: pointer to hold returned drm_panel 224 226 * @bridge: pointer to hold returned drm_bridge 225 227 *
+3
drivers/gpu/drm/drm_panel.c
··· 36 36 * The DRM panel helpers allow drivers to register panel objects with a 37 37 * central registry and provide functions to retrieve those panels in display 38 38 * drivers. 39 + * 40 + * For easy integration into drivers using the &drm_bridge infrastructure please 41 + * take look at drm_panel_bridge_add() and devm_drm_panel_bridge_add(). 39 42 */ 40 43 41 44 /**
+3
drivers/gpu/drm/drm_plane.c
··· 220 220 format_modifier_count++; 221 221 } 222 222 223 + if (format_modifier_count) 224 + config->allow_fb_modifiers = true; 225 + 223 226 plane->modifier_count = format_modifier_count; 224 227 plane->modifiers = kmalloc_array(format_modifier_count, 225 228 sizeof(format_modifiers[0]),
+31 -62
drivers/gpu/drm/drm_syncobj.c
··· 56 56 #include "drm_internal.h" 57 57 #include <drm/drm_syncobj.h> 58 58 59 + struct syncobj_wait_entry { 60 + struct list_head node; 61 + struct task_struct *task; 62 + struct dma_fence *fence; 63 + struct dma_fence_cb fence_cb; 64 + }; 65 + 66 + static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 67 + struct syncobj_wait_entry *wait); 68 + 59 69 /** 60 70 * drm_syncobj_find - lookup and reference a sync object. 61 71 * @file_private: drm file private pointer ··· 92 82 } 93 83 EXPORT_SYMBOL(drm_syncobj_find); 94 84 95 - static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj, 96 - struct drm_syncobj_cb *cb, 97 - drm_syncobj_func_t func) 85 + static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj, 86 + struct syncobj_wait_entry *wait) 98 87 { 99 - cb->func = func; 100 - list_add_tail(&cb->node, &syncobj->cb_list); 101 - } 102 - 103 - static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, 104 - struct dma_fence **fence, 105 - struct drm_syncobj_cb *cb, 106 - drm_syncobj_func_t func) 107 - { 108 - int ret; 109 - 110 - *fence = drm_syncobj_fence_get(syncobj); 111 - if (*fence) 112 - return 1; 88 + if (wait->fence) 89 + return; 113 90 114 91 spin_lock(&syncobj->lock); 115 92 /* We've already tried once to get a fence and failed. Now that we 116 93 * have the lock, try one more time just to be sure we don't add a 117 94 * callback when a fence has already been set. 118 95 */ 119 - if (syncobj->fence) { 120 - *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 121 - lockdep_is_held(&syncobj->lock))); 122 - ret = 1; 123 - } else { 124 - *fence = NULL; 125 - drm_syncobj_add_callback_locked(syncobj, cb, func); 126 - ret = 0; 127 - } 128 - spin_unlock(&syncobj->lock); 129 - 130 - return ret; 131 - } 132 - 133 - void drm_syncobj_add_callback(struct drm_syncobj *syncobj, 134 - struct drm_syncobj_cb *cb, 135 - drm_syncobj_func_t func) 136 - { 137 - spin_lock(&syncobj->lock); 138 - drm_syncobj_add_callback_locked(syncobj, cb, func); 96 + if (syncobj->fence) 97 + wait->fence = dma_fence_get( 98 + rcu_dereference_protected(syncobj->fence, 1)); 99 + else 100 + list_add_tail(&wait->node, &syncobj->cb_list); 139 101 spin_unlock(&syncobj->lock); 140 102 } 141 103 142 - void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, 143 - struct drm_syncobj_cb *cb) 104 + static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj, 105 + struct syncobj_wait_entry *wait) 144 106 { 107 + if (!wait->node.next) 108 + return; 109 + 145 110 spin_lock(&syncobj->lock); 146 - list_del_init(&cb->node); 111 + list_del_init(&wait->node); 147 112 spin_unlock(&syncobj->lock); 148 113 } 149 114 ··· 133 148 struct dma_fence *fence) 134 149 { 135 150 struct dma_fence *old_fence; 136 - struct drm_syncobj_cb *cur, *tmp; 151 + struct syncobj_wait_entry *cur, *tmp; 137 152 138 153 if (fence) 139 154 dma_fence_get(fence); ··· 147 162 if (fence != old_fence) { 148 163 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { 149 164 list_del_init(&cur->node); 150 - cur->func(syncobj, cur); 165 + syncobj_wait_syncobj_func(syncobj, cur); 151 166 } 152 167 } 153 168 ··· 593 608 &args->handle); 594 609 } 595 610 596 - struct syncobj_wait_entry { 597 - struct task_struct *task; 598 - struct dma_fence *fence; 599 - struct dma_fence_cb fence_cb; 600 - struct drm_syncobj_cb syncobj_cb; 601 - }; 602 - 603 611 static void syncobj_wait_fence_func(struct dma_fence *fence, 604 612 struct dma_fence_cb *cb) 605 613 { ··· 603 625 } 604 626 605 627 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 606 - struct drm_syncobj_cb *cb) 628 + struct syncobj_wait_entry *wait) 607 629 { 608 - struct syncobj_wait_entry *wait = 609 - container_of(cb, struct syncobj_wait_entry, syncobj_cb); 610 - 611 630 /* This happens inside the syncobj lock */ 612 631 wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 613 632 lockdep_is_held(&syncobj->lock))); ··· 663 688 */ 664 689 665 690 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 666 - for (i = 0; i < count; ++i) { 667 - drm_syncobj_fence_get_or_add_callback(syncobjs[i], 668 - &entries[i].fence, 669 - &entries[i].syncobj_cb, 670 - syncobj_wait_syncobj_func); 671 - } 691 + for (i = 0; i < count; ++i) 692 + drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); 672 693 } 673 694 674 695 do { ··· 713 742 714 743 cleanup_entries: 715 744 for (i = 0; i < count; ++i) { 716 - if (entries[i].syncobj_cb.func) 717 - drm_syncobj_remove_callback(syncobjs[i], 718 - &entries[i].syncobj_cb); 745 + drm_syncobj_remove_wait(syncobjs[i], &entries[i]); 719 746 if (entries[i].fence_cb.func) 720 747 dma_fence_remove_callback(entries[i].fence, 721 748 &entries[i].fence_cb);
+42 -3
drivers/gpu/drm/drm_vblank.c
··· 105 105 write_sequnlock(&vblank->seqlock); 106 106 } 107 107 108 + static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe) 109 + { 110 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 111 + 112 + return vblank->max_vblank_count ?: dev->max_vblank_count; 113 + } 114 + 108 115 /* 109 116 * "No hw counter" fallback implementation of .get_vblank_counter() hook, 110 117 * if there is no useable hardware frame counter available. 111 118 */ 112 119 static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe) 113 120 { 114 - WARN_ON_ONCE(dev->max_vblank_count != 0); 121 + WARN_ON_ONCE(drm_max_vblank_count(dev, pipe) != 0); 115 122 return 0; 116 123 } 117 124 ··· 205 198 ktime_t t_vblank; 206 199 int count = DRM_TIMESTAMP_MAXRETRIES; 207 200 int framedur_ns = vblank->framedur_ns; 201 + u32 max_vblank_count = drm_max_vblank_count(dev, pipe); 208 202 209 203 /* 210 204 * Interrupts were disabled prior to this call, so deal with counter ··· 224 216 rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq); 225 217 } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0); 226 218 227 - if (dev->max_vblank_count != 0) { 219 + if (max_vblank_count) { 228 220 /* trust the hw counter when it's around */ 229 - diff = (cur_vblank - vblank->last) & dev->max_vblank_count; 221 + diff = (cur_vblank - vblank->last) & max_vblank_count; 230 222 } else if (rc && framedur_ns) { 231 223 u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time)); 232 224 ··· 1211 1203 WARN_ON(!list_empty(&dev->vblank_event_list)); 1212 1204 } 1213 1205 EXPORT_SYMBOL(drm_crtc_vblank_reset); 1206 + 1207 + /** 1208 + * drm_crtc_set_max_vblank_count - configure the hw max vblank counter value 1209 + * @crtc: CRTC in question 1210 + * @max_vblank_count: max hardware vblank counter value 1211 + * 1212 + * Update the maximum hardware vblank counter value for @crtc 1213 + * at runtime. Useful for hardware where the operation of the 1214 + * hardware vblank counter depends on the currently active 1215 + * display configuration. 1216 + * 1217 + * For example, if the hardware vblank counter does not work 1218 + * when a specific connector is active the maximum can be set 1219 + * to zero. And when that specific connector isn't active the 1220 + * maximum can again be set to the appropriate non-zero value. 1221 + * 1222 + * If used, must be called before drm_vblank_on(). 1223 + */ 1224 + void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, 1225 + u32 max_vblank_count) 1226 + { 1227 + struct drm_device *dev = crtc->dev; 1228 + unsigned int pipe = drm_crtc_index(crtc); 1229 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1230 + 1231 + WARN_ON(dev->max_vblank_count); 1232 + WARN_ON(!READ_ONCE(vblank->inmodeset)); 1233 + 1234 + vblank->max_vblank_count = max_vblank_count; 1235 + } 1236 + EXPORT_SYMBOL(drm_crtc_set_max_vblank_count); 1214 1237 1215 1238 /** 1216 1239 * drm_crtc_vblank_on - enable vblank events on a CRTC
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_gem.c
··· 449 449 const char *type, struct seq_file *m) 450 450 { 451 451 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 452 - seq_printf(m, "\t%9s: %s %s seq %u\n", 452 + seq_printf(m, "\t%9s: %s %s seq %llu\n", 453 453 type, 454 454 fence->ops->get_driver_name(fence), 455 455 fence->ops->get_timeline_name(fence),
+2 -2
drivers/gpu/drm/exynos/exynos_drm_mic.c
··· 246 246 } 247 247 248 248 static void mic_mode_set(struct drm_bridge *bridge, 249 - struct drm_display_mode *mode, 250 - struct drm_display_mode *adjusted_mode) 249 + const struct drm_display_mode *mode, 250 + const struct drm_display_mode *adjusted_mode) 251 251 { 252 252 struct exynos_mic *mic = bridge->driver_private; 253 253
+2 -1
drivers/gpu/drm/exynos/exynos_hdmi.c
··· 819 819 return; 820 820 } 821 821 822 - ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, m, false); 822 + ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, 823 + &hdata->connector, m); 823 824 if (!ret) 824 825 ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf)); 825 826 if (ret > 0) {
+3 -35
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
··· 22 22 #include <drm/drmP.h> 23 23 #include <drm/drm_gem_cma_helper.h> 24 24 #include <drm/drm_fb_cma_helper.h> 25 + #include <drm/drm_fb_helper.h> 25 26 #include <drm/drm_gem_framebuffer_helper.h> 26 27 #include <drm/drm_atomic_helper.h> 27 28 #include <drm/drm_crtc_helper.h> ··· 34 33 35 34 static int kirin_drm_kms_cleanup(struct drm_device *dev) 36 35 { 37 - struct kirin_drm_private *priv = dev->dev_private; 38 - 39 - if (priv->fbdev) { 40 - drm_fbdev_cma_fini(priv->fbdev); 41 - priv->fbdev = NULL; 42 - } 43 - 44 36 drm_kms_helper_poll_fini(dev); 45 37 dc_ops->cleanup(to_platform_device(dev->dev)); 46 38 drm_mode_config_cleanup(dev); 47 - devm_kfree(dev->dev, priv); 48 - dev->dev_private = NULL; 49 39 50 40 return 0; 51 41 } 52 42 53 - static void kirin_fbdev_output_poll_changed(struct drm_device *dev) 54 - { 55 - struct kirin_drm_private *priv = dev->dev_private; 56 - 57 - drm_fbdev_cma_hotplug_event(priv->fbdev); 58 - } 59 - 60 43 static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = { 61 44 .fb_create = drm_gem_fb_create, 62 - .output_poll_changed = kirin_fbdev_output_poll_changed, 63 45 .atomic_check = drm_atomic_helper_check, 64 46 .atomic_commit = drm_atomic_helper_commit, 65 47 }; ··· 60 76 61 77 static int kirin_drm_kms_init(struct drm_device *dev) 62 78 { 63 - struct kirin_drm_private *priv; 64 79 int ret; 65 80 66 - priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL); 67 - if (!priv) 68 - return -ENOMEM; 69 - 70 - dev->dev_private = priv; 71 81 dev_set_drvdata(dev->dev, dev); 72 82 73 83 /* dev->mode_config initialization */ ··· 95 117 /* init kms poll for handling hpd */ 96 118 drm_kms_helper_poll_init(dev); 97 119 98 - priv->fbdev = drm_fbdev_cma_init(dev, 32, 99 - dev->mode_config.num_connector); 100 - 101 - if (IS_ERR(priv->fbdev)) { 102 - DRM_ERROR("failed to initialize fbdev.\n"); 103 - ret = PTR_ERR(priv->fbdev); 104 - goto err_cleanup_poll; 105 - } 106 120 return 0; 107 121 108 - err_cleanup_poll: 109 - drm_kms_helper_poll_fini(dev); 110 122 err_unbind_all: 111 123 component_unbind_all(dev->dev, dev); 112 124 err_dc_cleanup: 113 125 dc_ops->cleanup(to_platform_device(dev->dev)); 114 126 err_mode_config_cleanup: 115 127 drm_mode_config_cleanup(dev); 116 - devm_kfree(dev->dev, priv); 117 - dev->dev_private = NULL; 118 128 119 129 return ret; 120 130 } ··· 164 198 ret = drm_dev_register(drm_dev, 0); 165 199 if (ret) 166 200 goto err_kms_cleanup; 201 + 202 + drm_fbdev_generic_setup(drm_dev, 32); 167 203 168 204 return 0; 169 205
-4
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
··· 19 19 void (*cleanup)(struct platform_device *pdev); 20 20 }; 21 21 22 - struct kirin_drm_private { 23 - struct drm_fbdev_cma *fbdev; 24 - }; 25 - 26 22 extern const struct kirin_dc_ops ade_dc_ops; 27 23 28 24 #endif /* __KIRIN_DRM_DRV_H__ */
+3 -3
drivers/gpu/drm/i2c/ch7006_drv.c
··· 359 359 if (modes_changed) { 360 360 drm_helper_probe_single_connector_modes(connector, 0, 0); 361 361 362 - /* Disable the crtc to ensure a full modeset is 363 - * performed whenever it's turned on again. */ 364 362 if (crtc) 365 - drm_crtc_force_disable(crtc); 363 + drm_crtc_helper_set_mode(crtc, &crtc->mode, 364 + crtc->x, crtc->y, 365 + crtc->primary->fb); 366 366 } 367 367 368 368 return 0;
+5 -5
drivers/gpu/drm/i2c/tda998x_drv.c
··· 845 845 } 846 846 847 847 static void 848 - tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode) 848 + tda998x_write_avi(struct tda998x_priv *priv, const struct drm_display_mode *mode) 849 849 { 850 850 union hdmi_infoframe frame; 851 851 852 - drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false); 852 + drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 853 + &priv->connector, mode); 853 854 frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_FULL; 854 855 855 856 tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame); ··· 1123 1122 } 1124 1123 1125 1124 static const struct drm_connector_funcs tda998x_connector_funcs = { 1126 - .dpms = drm_helper_connector_dpms, 1127 1125 .reset = drm_atomic_helper_connector_reset, 1128 1126 .fill_modes = drm_helper_probe_single_connector_modes, 1129 1127 .detect = tda998x_connector_detect, ··· 1339 1339 } 1340 1340 1341 1341 static void tda998x_bridge_mode_set(struct drm_bridge *bridge, 1342 - struct drm_display_mode *mode, 1343 - struct drm_display_mode *adjusted_mode) 1342 + const struct drm_display_mode *mode, 1343 + const struct drm_display_mode *adjusted_mode) 1344 1344 { 1345 1345 struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge); 1346 1346 unsigned long tmds_clock;
+1 -8
drivers/gpu/drm/i915/i915_debugfs.c
··· 2797 2797 for (i = 0; i < tabs; i++) 2798 2798 seq_putc(m, '\t'); 2799 2799 2800 - seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2801 - mode->base.id, mode->name, 2802 - mode->vrefresh, mode->clock, 2803 - mode->hdisplay, mode->hsync_start, 2804 - mode->hsync_end, mode->htotal, 2805 - mode->vdisplay, mode->vsync_start, 2806 - mode->vsync_end, mode->vtotal, 2807 - mode->type, mode->flags); 2800 + seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 2808 2801 } 2809 2802 2810 2803 static void intel_encoder_info(struct seq_file *m,
+4 -4
drivers/gpu/drm/i915/i915_gem_context.c
··· 645 645 rq = i915_gem_active_raw(&timeline->last_request, 646 646 &engine->i915->drm.struct_mutex); 647 647 if (rq && rq->engine == engine) { 648 - GEM_TRACE("last request for %s on engine %s: %llx:%d\n", 648 + GEM_TRACE("last request for %s on engine %s: %llx:%llu\n", 649 649 timeline->name, engine->name, 650 650 rq->fence.context, rq->fence.seqno); 651 651 GEM_BUG_ON(rq->timeline != timeline); ··· 682 682 * switch-to-kernel-context? 683 683 */ 684 684 if (!i915_timeline_sync_is_later(barrier, &rq->fence)) { 685 - GEM_TRACE("%s needs barrier for %llx:%d\n", 685 + GEM_TRACE("%s needs barrier for %llx:%lld\n", 686 686 ring->timeline->name, 687 687 rq->fence.context, 688 688 rq->fence.seqno); 689 689 return false; 690 690 } 691 691 692 - GEM_TRACE("%s has barrier after %llx:%d\n", 692 + GEM_TRACE("%s has barrier after %llx:%lld\n", 693 693 ring->timeline->name, 694 694 rq->fence.context, 695 695 rq->fence.seqno); ··· 745 745 if (prev->gem_context == i915->kernel_context) 746 746 continue; 747 747 748 - GEM_TRACE("add barrier on %s for %llx:%d\n", 748 + GEM_TRACE("add barrier on %s for %llx:%lld\n", 749 749 engine->name, 750 750 prev->fence.context, 751 751 prev->fence.seqno);
+6 -6
drivers/gpu/drm/i915/i915_request.c
··· 182 182 static void __retire_engine_request(struct intel_engine_cs *engine, 183 183 struct i915_request *rq) 184 184 { 185 - GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n", 185 + GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d\n", 186 186 __func__, engine->name, 187 187 rq->fence.context, rq->fence.seqno, 188 188 rq->global_seqno, ··· 244 244 { 245 245 struct i915_gem_active *active, *next; 246 246 247 - GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n", 247 + GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n", 248 248 request->engine->name, 249 249 request->fence.context, request->fence.seqno, 250 250 request->global_seqno, ··· 307 307 struct intel_ring *ring = rq->ring; 308 308 struct i915_request *tmp; 309 309 310 - GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n", 310 + GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n", 311 311 rq->engine->name, 312 312 rq->fence.context, rq->fence.seqno, 313 313 rq->global_seqno, ··· 355 355 struct intel_engine_cs *engine = request->engine; 356 356 u32 seqno; 357 357 358 - GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n", 358 + GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d\n", 359 359 engine->name, 360 360 request->fence.context, request->fence.seqno, 361 361 engine->timeline.seqno + 1, ··· 405 405 { 406 406 struct intel_engine_cs *engine = request->engine; 407 407 408 - GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n", 408 + GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d\n", 409 409 engine->name, 410 410 request->fence.context, request->fence.seqno, 411 411 request->global_seqno, ··· 874 874 struct i915_request *prev; 875 875 u32 *cs; 876 876 877 - GEM_TRACE("%s fence %llx:%d\n", 877 + GEM_TRACE("%s fence %llx:%lld\n", 878 878 engine->name, request->fence.context, request->fence.seqno); 879 879 880 880 lockdep_assert_held(&request->i915->drm.struct_mutex);
+1 -1
drivers/gpu/drm/i915/i915_sw_fence.c
··· 390 390 if (!fence) 391 391 return; 392 392 393 - pr_notice("Asynchronous wait on fence %s:%s:%x timed out (hint:%pS)\n", 393 + pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n", 394 394 cb->dma->ops->get_driver_name(cb->dma), 395 395 cb->dma->ops->get_timeline_name(cb->dma), 396 396 cb->dma->seqno,
+4
drivers/gpu/drm/i915/intel_connector.c
··· 94 94 intel_panel_fini(&intel_connector->panel); 95 95 96 96 drm_connector_cleanup(connector); 97 + 98 + if (intel_connector->port) 99 + drm_dp_mst_put_port_malloc(intel_connector->port); 100 + 97 101 kfree(connector); 98 102 } 99 103
+4
drivers/gpu/drm/i915/intel_display.c
··· 12739 12739 "[modeset]" : "[fastset]"); 12740 12740 } 12741 12741 12742 + ret = drm_dp_mst_atomic_check(state); 12743 + if (ret) 12744 + return ret; 12745 + 12742 12746 if (any_ms) { 12743 12747 ret = intel_modeset_checks(state); 12744 12748
+32 -33
drivers/gpu/drm/i915/intel_dp_mst.c
··· 40 40 struct drm_connector *connector = conn_state->connector; 41 41 void *port = to_intel_connector(connector)->port; 42 42 struct drm_atomic_state *state = pipe_config->base.state; 43 + struct drm_crtc *crtc = pipe_config->base.crtc; 44 + struct drm_crtc_state *old_crtc_state = 45 + drm_atomic_get_old_crtc_state(state, crtc); 43 46 int bpp; 44 - int lane_count, slots = 0; 47 + int lane_count, slots = 48 + to_intel_crtc_state(old_crtc_state)->dp_m_n.tu; 45 49 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 46 50 int mst_pbn; 47 51 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, ··· 110 106 return 0; 111 107 } 112 108 113 - static int intel_dp_mst_atomic_check(struct drm_connector *connector, 114 - struct drm_connector_state *new_conn_state) 109 + static int 110 + intel_dp_mst_atomic_check(struct drm_connector *connector, 111 + struct drm_connector_state *new_conn_state) 115 112 { 116 113 struct drm_atomic_state *state = new_conn_state->state; 117 - struct drm_connector_state *old_conn_state; 118 - struct drm_crtc *old_crtc; 114 + struct drm_connector_state *old_conn_state = 115 + drm_atomic_get_old_connector_state(state, connector); 116 + struct intel_connector *intel_connector = 117 + to_intel_connector(connector); 118 + struct drm_crtc *new_crtc = new_conn_state->crtc; 119 119 struct drm_crtc_state *crtc_state; 120 - int slots, ret = 0; 120 + struct drm_dp_mst_topology_mgr *mgr; 121 + int ret = 0; 121 122 122 - old_conn_state = drm_atomic_get_old_connector_state(state, connector); 123 - old_crtc = old_conn_state->crtc; 124 - if (!old_crtc) 125 - return ret; 123 + if (!old_conn_state->crtc) 124 + return 0; 126 125 127 - crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc); 128 - slots = to_intel_crtc_state(crtc_state)->dp_m_n.tu; 129 - if (drm_atomic_crtc_needs_modeset(crtc_state) && slots > 0) { 130 - struct drm_dp_mst_topology_mgr *mgr; 131 - struct drm_encoder *old_encoder; 126 + /* We only want to free VCPI if this state disables the CRTC on this 127 + * connector 128 + */ 129 + if (new_crtc) { 130 + crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); 132 131 133 - old_encoder = old_conn_state->best_encoder; 134 - mgr = &enc_to_mst(old_encoder)->primary->dp.mst_mgr; 135 - 136 - ret = drm_dp_atomic_release_vcpi_slots(state, mgr, slots); 137 - if (ret) 138 - DRM_DEBUG_KMS("failed releasing %d vcpi slots:%d\n", slots, ret); 139 - else 140 - to_intel_crtc_state(crtc_state)->dp_m_n.tu = 0; 132 + if (!crtc_state || 133 + !drm_atomic_crtc_needs_modeset(crtc_state) || 134 + crtc_state->enable) 135 + return 0; 141 136 } 137 + 138 + mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr; 139 + ret = drm_dp_atomic_release_vcpi_slots(state, mgr, 140 + intel_connector->port); 141 + 142 142 return ret; 143 143 } 144 144 ··· 464 456 intel_connector->get_hw_state = intel_dp_mst_get_hw_state; 465 457 intel_connector->mst_port = intel_dp; 466 458 intel_connector->port = port; 459 + drm_dp_mst_get_port_malloc(port); 467 460 468 461 connector = &intel_connector->base; 469 462 ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, ··· 525 516 drm_connector_put(connector); 526 517 } 527 518 528 - static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) 529 - { 530 - struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); 531 - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 532 - struct drm_device *dev = intel_dig_port->base.base.dev; 533 - 534 - drm_kms_helper_hotplug_event(dev); 535 - } 536 - 537 519 static const struct drm_dp_mst_topology_cbs mst_cbs = { 538 520 .add_connector = intel_dp_add_mst_connector, 539 521 .register_connector = intel_dp_register_mst_connector, 540 522 .destroy_connector = intel_dp_destroy_mst_connector, 541 - .hotplug = intel_dp_mst_hotplug, 542 523 }; 543 524 544 525 static struct intel_dp_mst_encoder *
-1
drivers/gpu/drm/i915/intel_drv.h
··· 1079 1079 } dp_dual_mode; 1080 1080 bool has_hdmi_sink; 1081 1081 bool has_audio; 1082 - bool rgb_quant_range_selectable; 1083 1082 struct intel_connector *attached_connector; 1084 1083 struct cec_notifier *cec_notifier; 1085 1084 };
+1 -1
drivers/gpu/drm/i915/intel_engine_cs.c
··· 1226 1226 1227 1227 x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf)); 1228 1228 1229 - drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n", 1229 + drm_printf(m, "%s%x%s [%llx:%llx]%s @ %dms: %s\n", 1230 1230 prefix, 1231 1231 rq->global_seqno, 1232 1232 i915_request_completed(rq) ? "!" : "",
+6 -14
drivers/gpu/drm/i915/intel_hdmi.c
··· 478 478 const struct intel_crtc_state *crtc_state, 479 479 const struct drm_connector_state *conn_state) 480 480 { 481 - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 482 481 const struct drm_display_mode *adjusted_mode = 483 482 &crtc_state->base.adjusted_mode; 484 - struct drm_connector *connector = &intel_hdmi->attached_connector->base; 485 - bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported || 486 - connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420; 487 483 union hdmi_infoframe frame; 488 484 int ret; 489 485 490 486 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 491 - adjusted_mode, 492 - is_hdmi2_sink); 487 + conn_state->connector, 488 + adjusted_mode); 493 489 if (ret < 0) { 494 490 DRM_ERROR("couldn't fill AVI infoframe\n"); 495 491 return; ··· 498 502 else 499 503 frame.avi.colorspace = HDMI_COLORSPACE_RGB; 500 504 501 - drm_hdmi_avi_infoframe_quant_range(&frame.avi, adjusted_mode, 505 + drm_hdmi_avi_infoframe_quant_range(&frame.avi, 506 + conn_state->connector, 507 + adjusted_mode, 502 508 crtc_state->limited_color_range ? 503 509 HDMI_QUANTIZATION_RANGE_LIMITED : 504 - HDMI_QUANTIZATION_RANGE_FULL, 505 - intel_hdmi->rgb_quant_range_selectable, 506 - is_hdmi2_sink); 510 + HDMI_QUANTIZATION_RANGE_FULL); 507 511 508 512 drm_hdmi_avi_infoframe_content_type(&frame.avi, 509 513 conn_state); ··· 1832 1836 1833 1837 intel_hdmi->has_hdmi_sink = false; 1834 1838 intel_hdmi->has_audio = false; 1835 - intel_hdmi->rgb_quant_range_selectable = false; 1836 1839 1837 1840 intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE; 1838 1841 intel_hdmi->dp_dual_mode.max_tmds_clock = 0; ··· 1916 1921 1917 1922 to_intel_connector(connector)->detect_edid = edid; 1918 1923 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 1919 - intel_hdmi->rgb_quant_range_selectable = 1920 - drm_rgb_quant_range_selectable(edid); 1921 - 1922 1924 intel_hdmi->has_audio = drm_detect_monitor_audio(edid); 1923 1925 intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 1924 1926
+3 -3
drivers/gpu/drm/i915/intel_lrc.c
··· 435 435 desc = execlists_update_context(rq); 436 436 GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc)); 437 437 438 - GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", 438 + GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n", 439 439 engine->name, n, 440 440 port[n].context_id, count, 441 441 rq->global_seqno, ··· 728 728 while (num_ports-- && port_isset(port)) { 729 729 struct i915_request *rq = port_request(port); 730 730 731 - GEM_TRACE("%s:port%u global=%d (fence %llx:%d), (current %d)\n", 731 + GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d)\n", 732 732 rq->engine->name, 733 733 (unsigned int)(port - execlists->port), 734 734 rq->global_seqno, ··· 956 956 EXECLISTS_ACTIVE_USER)); 957 957 958 958 rq = port_unpack(port, &count); 959 - GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", 959 + GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n", 960 960 engine->name, 961 961 port->context_id, count, 962 962 rq ? rq->global_seqno : 0,
+8 -8
drivers/gpu/drm/i915/intel_lspcon.c
··· 462 462 u8 buf[VIDEO_DIP_DATA_SIZE]; 463 463 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); 464 464 struct intel_lspcon *lspcon = &dig_port->lspcon; 465 - struct intel_dp *intel_dp = &dig_port->dp; 466 - struct drm_connector *connector = &intel_dp->attached_connector->base; 467 - const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode; 468 - bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported; 465 + const struct drm_display_mode *adjusted_mode = 466 + &crtc_state->base.adjusted_mode; 469 467 470 468 if (!lspcon->active) { 471 469 DRM_ERROR("Writing infoframes while LSPCON disabled ?\n"); ··· 471 473 } 472 474 473 475 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 474 - mode, is_hdmi2_sink); 476 + conn_state->connector, 477 + adjusted_mode); 475 478 if (ret < 0) { 476 479 DRM_ERROR("couldn't fill AVI infoframe\n"); 477 480 return; ··· 487 488 frame.avi.colorspace = HDMI_COLORSPACE_RGB; 488 489 } 489 490 490 - drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode, 491 + drm_hdmi_avi_infoframe_quant_range(&frame.avi, 492 + conn_state->connector, 493 + adjusted_mode, 491 494 crtc_state->limited_color_range ? 492 495 HDMI_QUANTIZATION_RANGE_LIMITED : 493 - HDMI_QUANTIZATION_RANGE_FULL, 494 - false, is_hdmi2_sink); 496 + HDMI_QUANTIZATION_RANGE_FULL); 495 497 496 498 ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf)); 497 499 if (ret < 0) {
+14 -16
drivers/gpu/drm/i915/intel_sdvo.c
··· 102 102 103 103 bool has_hdmi_monitor; 104 104 bool has_hdmi_audio; 105 - bool rgb_quant_range_selectable; 106 105 107 106 /* DDC bus used by this SDVO encoder */ 108 107 uint8_t ddc_bus; ··· 979 980 } 980 981 981 982 static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, 982 - const struct intel_crtc_state *pipe_config) 983 + const struct intel_crtc_state *pipe_config, 984 + const struct drm_connector_state *conn_state) 983 985 { 986 + const struct drm_display_mode *adjusted_mode = 987 + &pipe_config->base.adjusted_mode; 984 988 uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; 985 989 union hdmi_infoframe frame; 986 990 int ret; 987 991 ssize_t len; 988 992 989 993 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 990 - &pipe_config->base.adjusted_mode, 991 - false); 994 + conn_state->connector, 995 + adjusted_mode); 992 996 if (ret < 0) { 993 997 DRM_ERROR("couldn't fill AVI infoframe\n"); 994 998 return false; 995 999 } 996 1000 997 - if (intel_sdvo->rgb_quant_range_selectable) { 998 - if (pipe_config->limited_color_range) 999 - frame.avi.quantization_range = 1000 - HDMI_QUANTIZATION_RANGE_LIMITED; 1001 - else 1002 - frame.avi.quantization_range = 1003 - HDMI_QUANTIZATION_RANGE_FULL; 1004 - } 1001 + drm_hdmi_avi_infoframe_quant_range(&frame.avi, 1002 + conn_state->connector, 1003 + adjusted_mode, 1004 + pipe_config->limited_color_range ? 1005 + HDMI_QUANTIZATION_RANGE_LIMITED : 1006 + HDMI_QUANTIZATION_RANGE_FULL); 1005 1007 1006 1008 len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data)); 1007 1009 if (len < 0) ··· 1315 1315 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); 1316 1316 intel_sdvo_set_colorimetry(intel_sdvo, 1317 1317 SDVO_COLORIMETRY_RGB256); 1318 - intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state); 1318 + intel_sdvo_set_avi_infoframe(intel_sdvo, 1319 + crtc_state, conn_state); 1319 1320 } else 1320 1321 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); 1321 1322 ··· 1802 1801 if (intel_sdvo_connector->is_hdmi) { 1803 1802 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); 1804 1803 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); 1805 - intel_sdvo->rgb_quant_range_selectable = 1806 - drm_rgb_quant_range_selectable(edid); 1807 1804 } 1808 1805 } else 1809 1806 status = connector_status_disconnected; ··· 1850 1851 1851 1852 intel_sdvo->has_hdmi_monitor = false; 1852 1853 intel_sdvo->has_hdmi_audio = false; 1853 - intel_sdvo->rgb_quant_range_selectable = false; 1854 1854 1855 1855 if ((intel_sdvo_connector->output_flag & response) == 0) 1856 1856 ret = connector_status_disconnected;
+7 -7
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
··· 475 475 if (!wait_until_running(&h, rq)) { 476 476 struct drm_printer p = drm_info_printer(i915->drm.dev); 477 477 478 - pr_err("%s: Failed to start request %x, at %x\n", 478 + pr_err("%s: Failed to start request %llx, at %x\n", 479 479 __func__, rq->fence.seqno, hws_seqno(&h, rq)); 480 480 intel_engine_dump(engine, &p, 481 481 "%s\n", engine->name); ··· 576 576 return 0; 577 577 578 578 if (i915_request_wait(rq, 0, 5 * HZ) < 0) { 579 - GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n", 579 + GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld, seqno %d.\n", 580 580 rq->engine->name, 581 581 rq->fence.context, 582 582 rq->fence.seqno, ··· 753 753 if (!wait_until_running(&h, rq)) { 754 754 struct drm_printer p = drm_info_printer(i915->drm.dev); 755 755 756 - pr_err("%s: Failed to start request %x, at %x\n", 756 + pr_err("%s: Failed to start request %llx, at %x\n", 757 757 __func__, rq->fence.seqno, hws_seqno(&h, rq)); 758 758 intel_engine_dump(engine, &p, 759 759 "%s\n", engine->name); ··· 952 952 if (!wait_until_running(&h, rq)) { 953 953 struct drm_printer p = drm_info_printer(i915->drm.dev); 954 954 955 - pr_err("%s: Failed to start request %x, at %x\n", 955 + pr_err("%s: Failed to start request %llx, at %x\n", 956 956 __func__, rq->fence.seqno, hws_seqno(&h, rq)); 957 957 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); 958 958 ··· 1131 1131 if (!wait_until_running(&h, rq)) { 1132 1132 struct drm_printer p = drm_info_printer(i915->drm.dev); 1133 1133 1134 - pr_err("%s: Failed to start request %x, at %x\n", 1134 + pr_err("%s: Failed to start request %llx, at %x\n", 1135 1135 __func__, rq->fence.seqno, hws_seqno(&h, rq)); 1136 1136 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); 1137 1137 ··· 1326 1326 if (!wait_until_running(&h, prev)) { 1327 1327 struct drm_printer p = drm_info_printer(i915->drm.dev); 1328 1328 1329 - pr_err("%s(%s): Failed to start request %x, at %x\n", 1329 + pr_err("%s(%s): Failed to start request %llx, at %x\n", 1330 1330 __func__, engine->name, 1331 1331 prev->fence.seqno, hws_seqno(&h, prev)); 1332 1332 intel_engine_dump(engine, &p, ··· 1437 1437 if (!wait_until_running(&h, rq)) { 1438 1438 struct drm_printer p = drm_info_printer(i915->drm.dev); 1439 1439 1440 - pr_err("%s: Failed to start request %x, at %x\n", 1440 + pr_err("%s: Failed to start request %llx, at %x\n", 1441 1441 __func__, rq->fence.seqno, hws_seqno(&h, rq)); 1442 1442 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); 1443 1443
+4 -3
drivers/gpu/drm/mediatek/mtk_hdmi.c
··· 981 981 u8 buffer[17]; 982 982 ssize_t err; 983 983 984 - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); 984 + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, 985 + &hdmi->conn, mode); 985 986 if (err < 0) { 986 987 dev_err(hdmi->dev, 987 988 "Failed to get AVI infoframe from mode: %zd\n", err); ··· 1371 1370 } 1372 1371 1373 1372 static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge, 1374 - struct drm_display_mode *mode, 1375 - struct drm_display_mode *adjusted_mode) 1373 + const struct drm_display_mode *mode, 1374 + const struct drm_display_mode *adjusted_mode) 1376 1375 { 1377 1376 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); 1378 1377
+20
drivers/gpu/drm/meson/meson_drv.c
··· 152 152 writel_relaxed(0x20000, priv->io_base + _REG(VPU_WRARB_MODE_L2C1)); 153 153 } 154 154 155 + static void meson_remove_framebuffers(void) 156 + { 157 + struct apertures_struct *ap; 158 + 159 + ap = alloc_apertures(1); 160 + if (!ap) 161 + return; 162 + 163 + /* The framebuffer can be located anywhere in RAM */ 164 + ap->ranges[0].base = 0; 165 + ap->ranges[0].size = ~0; 166 + 167 + drm_fb_helper_remove_conflicting_framebuffers(ap, "meson-drm-fb", 168 + false); 169 + kfree(ap); 170 + } 171 + 155 172 static int meson_drv_bind_master(struct device *dev, bool has_components) 156 173 { 157 174 struct platform_device *pdev = to_platform_device(dev); ··· 278 261 ret = drm_vblank_init(drm, 1); 279 262 if (ret) 280 263 goto free_drm; 264 + 265 + /* Remove early framebuffers (ie. simplefb) */ 266 + meson_remove_framebuffers(); 281 267 282 268 drm_mode_config_init(drm); 283 269 drm->mode_config.max_width = 3840;
+3 -9
drivers/gpu/drm/meson/meson_dw_hdmi.c
··· 365 365 unsigned int wr_clk = 366 366 readl_relaxed(priv->io_base + _REG(VPU_HDMI_SETTING)); 367 367 368 - DRM_DEBUG_DRIVER("%d:\"%s\"\n", mode->base.id, mode->name); 368 + DRM_DEBUG_DRIVER("\"%s\"\n", mode->name); 369 369 370 370 /* Enable clocks */ 371 371 regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100); ··· 555 555 int vic = drm_match_cea_mode(mode); 556 556 enum drm_mode_status status; 557 557 558 - DRM_DEBUG_DRIVER("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", 559 - mode->base.id, mode->name, mode->vrefresh, mode->clock, 560 - mode->hdisplay, mode->hsync_start, 561 - mode->hsync_end, mode->htotal, 562 - mode->vdisplay, mode->vsync_start, 563 - mode->vsync_end, mode->vtotal, mode->type, mode->flags); 558 + DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 564 559 565 560 /* Check against non-VIC supported modes */ 566 561 if (!vic) { ··· 645 650 struct meson_drm *priv = dw_hdmi->priv; 646 651 int vic = drm_match_cea_mode(mode); 647 652 648 - DRM_DEBUG_DRIVER("%d:\"%s\" vic %d\n", 649 - mode->base.id, mode->name, vic); 653 + DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic); 650 654 651 655 /* VENC + VENC-DVI Mode setup */ 652 656 meson_venc_hdmi_mode_set(priv, vic, mode);
+1
drivers/gpu/drm/mgag200/mgag200_fb.c
··· 12 12 */ 13 13 #include <linux/module.h> 14 14 #include <drm/drmP.h> 15 + #include <drm/drm_util.h> 15 16 #include <drm/drm_fb_helper.h> 16 17 #include <drm/drm_crtc_helper.h> 17 18
+2 -8
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
··· 244 244 245 245 mode = &crtc->state->adjusted_mode; 246 246 247 - DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 248 - mdp4_crtc->name, mode->base.id, mode->name, 249 - mode->vrefresh, mode->clock, 250 - mode->hdisplay, mode->hsync_start, 251 - mode->hsync_end, mode->htotal, 252 - mode->vdisplay, mode->vsync_start, 253 - mode->vsync_end, mode->vtotal, 254 - mode->type, mode->flags); 247 + DBG("%s: set mode: " DRM_MODE_FMT, 248 + mdp4_crtc->name, DRM_MODE_ARG(mode)); 255 249 256 250 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), 257 251 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
+1 -8
drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
··· 58 58 59 59 mode = adjusted_mode; 60 60 61 - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 62 - mode->base.id, mode->name, 63 - mode->vrefresh, mode->clock, 64 - mode->hdisplay, mode->hsync_start, 65 - mode->hsync_end, mode->htotal, 66 - mode->vdisplay, mode->vsync_start, 67 - mode->vsync_end, mode->vtotal, 68 - mode->type, mode->flags); 61 + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); 69 62 70 63 ctrl_pol = 0; 71 64 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+1 -8
drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
··· 104 104 105 105 mode = adjusted_mode; 106 106 107 - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 108 - mode->base.id, mode->name, 109 - mode->vrefresh, mode->clock, 110 - mode->hdisplay, mode->hsync_start, 111 - mode->hsync_end, mode->htotal, 112 - mode->vdisplay, mode->vsync_start, 113 - mode->vsync_end, mode->vtotal, 114 - mode->type, mode->flags); 107 + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); 115 108 116 109 mdp4_dtv_encoder->pixclock = mode->clock * 1000; 117 110
+1 -8
drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
··· 273 273 274 274 mode = adjusted_mode; 275 275 276 - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 277 - mode->base.id, mode->name, 278 - mode->vrefresh, mode->clock, 279 - mode->hdisplay, mode->hsync_start, 280 - mode->hsync_end, mode->htotal, 281 - mode->vdisplay, mode->vsync_start, 282 - mode->vsync_end, mode->vtotal, 283 - mode->type, mode->flags); 276 + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); 284 277 285 278 mdp4_lcdc_encoder->pixclock = mode->clock * 1000; 286 279
+1 -8
drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
··· 134 134 { 135 135 mode = adjusted_mode; 136 136 137 - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 138 - mode->base.id, mode->name, 139 - mode->vrefresh, mode->clock, 140 - mode->hdisplay, mode->hsync_start, 141 - mode->hsync_end, mode->htotal, 142 - mode->vdisplay, mode->vsync_start, 143 - mode->vsync_end, mode->vtotal, 144 - mode->type, mode->flags); 137 + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); 145 138 pingpong_tearcheck_setup(encoder, mode); 146 139 mdp5_crtc_set_pipeline(encoder->crtc); 147 140 }
+1 -8
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
··· 384 384 385 385 mode = &crtc->state->adjusted_mode; 386 386 387 - DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 388 - crtc->name, mode->base.id, mode->name, 389 - mode->vrefresh, mode->clock, 390 - mode->hdisplay, mode->hsync_start, 391 - mode->hsync_end, mode->htotal, 392 - mode->vdisplay, mode->vsync_start, 393 - mode->vsync_end, mode->vtotal, 394 - mode->type, mode->flags); 387 + DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode)); 395 388 396 389 mixer_width = mode->hdisplay; 397 390 if (r_mixer)
+1 -8
drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
··· 118 118 119 119 mode = adjusted_mode; 120 120 121 - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 122 - mode->base.id, mode->name, 123 - mode->vrefresh, mode->clock, 124 - mode->hdisplay, mode->hsync_start, 125 - mode->hsync_end, mode->htotal, 126 - mode->vdisplay, mode->vsync_start, 127 - mode->vsync_end, mode->vtotal, 128 - mode->type, mode->flags); 121 + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); 129 122 130 123 ctrl_pol = 0; 131 124
+1 -1
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
··· 144 144 145 145 state->mdp5_kms = mdp5_kms; 146 146 147 - drm_atomic_private_obj_init(&mdp5_kms->glob_state, 147 + drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state, 148 148 &state->base, 149 149 &mdp5_global_state_funcs); 150 150 return 0;
+1
drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
··· 16 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 17 */ 18 18 19 + #include <drm/drm_util.h> 19 20 20 21 #include "mdp5_kms.h" 21 22 #include "mdp5_smp.h"
+1 -1
drivers/gpu/drm/msm/dsi/dsi.h
··· 168 168 bool is_dual_dsi); 169 169 int msm_dsi_host_power_off(struct mipi_dsi_host *host); 170 170 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, 171 - struct drm_display_mode *mode); 171 + const struct drm_display_mode *mode); 172 172 struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, 173 173 unsigned long *panel_flags); 174 174 struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
+1 -1
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 2424 2424 } 2425 2425 2426 2426 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, 2427 - struct drm_display_mode *mode) 2427 + const struct drm_display_mode *mode) 2428 2428 { 2429 2429 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2430 2430
+3 -10
drivers/gpu/drm/msm/dsi/dsi_manager.c
··· 527 527 } 528 528 529 529 static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, 530 - struct drm_display_mode *mode, 531 - struct drm_display_mode *adjusted_mode) 530 + const struct drm_display_mode *mode, 531 + const struct drm_display_mode *adjusted_mode) 532 532 { 533 533 int id = dsi_mgr_bridge_get_id(bridge); 534 534 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); ··· 536 536 struct mipi_dsi_host *host = msm_dsi->host; 537 537 bool is_dual_dsi = IS_DUAL_DSI(); 538 538 539 - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 540 - mode->base.id, mode->name, 541 - mode->vrefresh, mode->clock, 542 - mode->hdisplay, mode->hsync_start, 543 - mode->hsync_end, mode->htotal, 544 - mode->vdisplay, mode->vsync_start, 545 - mode->vsync_end, mode->vtotal, 546 - mode->type, mode->flags); 539 + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); 547 540 548 541 if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) 549 542 return;
+3 -10
drivers/gpu/drm/msm/edp/edp_bridge.c
··· 52 52 } 53 53 54 54 static void edp_bridge_mode_set(struct drm_bridge *bridge, 55 - struct drm_display_mode *mode, 56 - struct drm_display_mode *adjusted_mode) 55 + const struct drm_display_mode *mode, 56 + const struct drm_display_mode *adjusted_mode) 57 57 { 58 58 struct drm_device *dev = bridge->dev; 59 59 struct drm_connector *connector; 60 60 struct edp_bridge *edp_bridge = to_edp_bridge(bridge); 61 61 struct msm_edp *edp = edp_bridge->edp; 62 62 63 - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 64 - mode->base.id, mode->name, 65 - mode->vrefresh, mode->clock, 66 - mode->hdisplay, mode->hsync_start, 67 - mode->hsync_end, mode->htotal, 68 - mode->vdisplay, mode->vsync_start, 69 - mode->vsync_end, mode->vtotal, 70 - mode->type, mode->flags); 63 + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); 71 64 72 65 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 73 66 if ((connector->encoder != NULL) &&
+4 -3
drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
··· 101 101 u32 val; 102 102 int len; 103 103 104 - drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false); 104 + drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 105 + hdmi->connector, mode); 105 106 106 107 len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer)); 107 108 if (len < 0) { ··· 208 207 } 209 208 210 209 static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge, 211 - struct drm_display_mode *mode, 212 - struct drm_display_mode *adjusted_mode) 210 + const struct drm_display_mode *mode, 211 + const struct drm_display_mode *adjusted_mode) 213 212 { 214 213 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); 215 214 struct hdmi *hdmi = hdmi_bridge->hdmi;
+1 -1
drivers/gpu/drm/msm/msm_gem.c
··· 758 758 struct seq_file *m) 759 759 { 760 760 if (!dma_fence_is_signaled(fence)) 761 - seq_printf(m, "\t%9s: %s %s seq %u\n", type, 761 + seq_printf(m, "\t%9s: %s %s seq %llu\n", type, 762 762 fence->ops->get_driver_name(fence), 763 763 fence->ops->get_timeline_name(fence), 764 764 fence->seqno);
+2 -24
drivers/gpu/drm/mxsfb/mxsfb_drv.c
··· 263 263 264 264 drm_kms_helper_poll_init(drm); 265 265 266 - mxsfb->fbdev = drm_fbdev_cma_init(drm, 32, 267 - drm->mode_config.num_connector); 268 - if (IS_ERR(mxsfb->fbdev)) { 269 - ret = PTR_ERR(mxsfb->fbdev); 270 - mxsfb->fbdev = NULL; 271 - dev_err(drm->dev, "Failed to init FB CMA area\n"); 272 - goto err_cma; 273 - } 274 - 275 266 platform_set_drvdata(pdev, drm); 276 267 277 268 drm_helper_hpd_irq_event(drm); 278 269 279 270 return 0; 280 271 281 - err_cma: 282 - drm_irq_uninstall(drm); 283 272 err_irq: 284 273 drm_panel_detach(mxsfb->panel); 285 274 err_vblank: ··· 279 290 280 291 static void mxsfb_unload(struct drm_device *drm) 281 292 { 282 - struct mxsfb_drm_private *mxsfb = drm->dev_private; 283 - 284 - if (mxsfb->fbdev) 285 - drm_fbdev_cma_fini(mxsfb->fbdev); 286 - 287 293 drm_kms_helper_poll_fini(drm); 288 294 drm_mode_config_cleanup(drm); 289 295 ··· 289 305 drm->dev_private = NULL; 290 306 291 307 pm_runtime_disable(drm->dev); 292 - } 293 - 294 - static void mxsfb_lastclose(struct drm_device *drm) 295 - { 296 - struct mxsfb_drm_private *mxsfb = drm->dev_private; 297 - 298 - drm_fbdev_cma_restore_mode(mxsfb->fbdev); 299 308 } 300 309 301 310 static void mxsfb_irq_preinstall(struct drm_device *drm) ··· 324 347 .driver_features = DRIVER_GEM | DRIVER_MODESET | 325 348 DRIVER_PRIME | DRIVER_ATOMIC | 326 349 DRIVER_HAVE_IRQ, 327 - .lastclose = mxsfb_lastclose, 328 350 .irq_handler = mxsfb_irq_handler, 329 351 .irq_preinstall = mxsfb_irq_preinstall, 330 352 .irq_uninstall = mxsfb_irq_preinstall, ··· 387 411 ret = drm_dev_register(drm, 0); 388 412 if (ret) 389 413 goto err_unload; 414 + 415 + drm_fbdev_generic_setup(drm, 32); 390 416 391 417 return 0; 392 418
-1
drivers/gpu/drm/mxsfb/mxsfb_drv.h
··· 37 37 struct drm_simple_display_pipe pipe; 38 38 struct drm_connector connector; 39 39 struct drm_panel *panel; 40 - struct drm_fbdev_cma *fbdev; 41 40 }; 42 41 43 42 int mxsfb_setup_crtc(struct drm_device *dev);
+3 -1
drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
··· 750 750 /* Disable the crtc to ensure a full modeset is 751 751 * performed whenever it's turned on again. */ 752 752 if (crtc) 753 - drm_crtc_force_disable(crtc); 753 + drm_crtc_helper_set_mode(crtc, &crtc->mode, 754 + crtc->x, crtc->y, 755 + crtc->primary->fb); 754 756 } 755 757 756 758 return 0;
+74 -37
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 561 561 u32 max_ac_packet; 562 562 union hdmi_infoframe avi_frame; 563 563 union hdmi_infoframe vendor_frame; 564 - bool scdc_supported, high_tmds_clock_ratio = false, scrambling = false; 564 + bool high_tmds_clock_ratio = false, scrambling = false; 565 565 u8 config; 566 566 int ret; 567 567 int size; ··· 571 571 return; 572 572 573 573 hdmi = &nv_connector->base.display_info.hdmi; 574 - scdc_supported = hdmi->scdc.supported; 575 574 576 - ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode, 577 - scdc_supported); 575 + ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, 576 + &nv_connector->base, mode); 578 577 if (!ret) { 579 578 /* We have an AVI InfoFrame, populate it to the display */ 580 579 args.pwr.avi_infoframe_length ··· 679 680 struct nv50_mstm *mstm = mstc->mstm; 680 681 int vcpi = mstc->port->vcpi.vcpi, i; 681 682 683 + WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock)); 684 + 682 685 NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi); 683 686 for (i = 0; i < mstm->mgr.max_payloads; i++) { 684 687 struct drm_dp_payload *payload = &mstm->mgr.payloads[i]; ··· 705 704 struct nv50_mstc *mstc = msto->mstc; 706 705 struct nv50_mstm *mstm = mstc->mstm; 707 706 707 + if (!msto->disabled) 708 + return; 709 + 708 710 NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name); 709 - if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto)) 710 - drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port); 711 - if (msto->disabled) { 712 - msto->mstc = NULL; 713 - msto->head = NULL; 714 - msto->disabled = false; 715 - } 711 + 712 + drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port); 713 + 714 + msto->mstc = NULL; 715 + msto->head = NULL; 716 + msto->disabled = false; 716 717 } 717 718 718 719 static void ··· 734 731 (0x0100 << msto->head->base.index), 735 732 }; 736 733 734 + mutex_lock(&mstm->mgr.payload_lock); 735 + 737 736 NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name); 738 - if (mstc->port && mstc->port->vcpi.vcpi > 0) { 737 + if (mstc->port->vcpi.vcpi > 0) { 739 738 struct drm_dp_payload *payload = nv50_msto_payload(msto); 740 739 if (payload) { 741 740 args.vcpi.start_slot = payload->start_slot; ··· 751 746 msto->encoder.name, msto->head->base.base.name, 752 747 args.vcpi.start_slot, args.vcpi.num_slots, 753 748 args.vcpi.pbn, args.vcpi.aligned_pbn); 749 + 754 750 nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args)); 751 + mutex_unlock(&mstm->mgr.payload_lock); 755 752 } 756 753 757 754 static int ··· 761 754 struct drm_crtc_state *crtc_state, 762 755 struct drm_connector_state *conn_state) 763 756 { 764 - struct nv50_mstc *mstc = nv50_mstc(conn_state->connector); 757 + struct drm_atomic_state *state = crtc_state->state; 758 + struct drm_connector *connector = conn_state->connector; 759 + struct nv50_mstc *mstc = nv50_mstc(connector); 765 760 struct nv50_mstm *mstm = mstc->mstm; 766 - int bpp = conn_state->connector->display_info.bpc * 3; 761 + int bpp = connector->display_info.bpc * 3; 767 762 int slots; 768 763 769 - mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp); 764 + mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, 765 + bpp); 770 766 771 - slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn); 772 - if (slots < 0) 773 - return slots; 767 + if (drm_atomic_crtc_needs_modeset(crtc_state) && 768 + !drm_connector_is_unregistered(connector)) { 769 + slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, 770 + mstc->port, mstc->pbn); 771 + if (slots < 0) 772 + return slots; 773 + } 774 774 775 775 return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, 776 776 mstc->native); ··· 843 829 struct nv50_mstc *mstc = msto->mstc; 844 830 struct nv50_mstm *mstm = mstc->mstm; 845 831 846 - if (mstc->port) 847 - drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port); 832 + drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port); 848 833 849 834 mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0); 850 835 mstm->modified = true; ··· 940 927 return ret; 941 928 } 942 929 930 + static int 931 + nv50_mstc_atomic_check(struct drm_connector *connector, 932 + struct drm_connector_state *new_conn_state) 933 + { 934 + struct drm_atomic_state *state = new_conn_state->state; 935 + struct nv50_mstc *mstc = nv50_mstc(connector); 936 + struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr; 937 + struct drm_connector_state *old_conn_state = 938 + drm_atomic_get_old_connector_state(state, connector); 939 + struct drm_crtc_state *crtc_state; 940 + struct drm_crtc *new_crtc = new_conn_state->crtc; 941 + 942 + if (!old_conn_state->crtc) 943 + return 0; 944 + 945 + /* We only want to free VCPI if this state disables the CRTC on this 946 + * connector 947 + */ 948 + if (new_crtc) { 949 + crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); 950 + 951 + if (!crtc_state || 952 + !drm_atomic_crtc_needs_modeset(crtc_state) || 953 + crtc_state->enable) 954 + return 0; 955 + } 956 + 957 + return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port); 958 + } 959 + 943 960 static const struct drm_connector_helper_funcs 944 961 nv50_mstc_help = { 945 962 .get_modes = nv50_mstc_get_modes, 946 963 .mode_valid = nv50_mstc_mode_valid, 947 964 .best_encoder = nv50_mstc_best_encoder, 948 965 .atomic_best_encoder = nv50_mstc_atomic_best_encoder, 966 + .atomic_check = nv50_mstc_atomic_check, 949 967 }; 950 968 951 969 static enum drm_connector_status ··· 986 942 enum drm_connector_status conn_status; 987 943 int ret; 988 944 989 - if (!mstc->port) 945 + if (drm_connector_is_unregistered(connector)) 990 946 return connector_status_disconnected; 991 947 992 948 ret = pm_runtime_get_sync(connector->dev->dev); ··· 1005 961 nv50_mstc_destroy(struct drm_connector *connector) 1006 962 { 1007 963 struct nv50_mstc *mstc = nv50_mstc(connector); 964 + 1008 965 drm_connector_cleanup(&mstc->connector); 966 + drm_dp_mst_put_port_malloc(mstc->port); 967 + 1009 968 kfree(mstc); 1010 969 } 1011 970 ··· 1056 1009 drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0); 1057 1010 drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0); 1058 1011 drm_connector_set_path_property(&mstc->connector, path); 1012 + drm_dp_mst_get_port_malloc(port); 1059 1013 return 0; 1060 1014 } 1061 1015 ··· 1111 1063 } 1112 1064 1113 1065 static void 1114 - nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr) 1115 - { 1116 - struct nv50_mstm *mstm = nv50_mstm(mgr); 1117 - drm_kms_helper_hotplug_event(mstm->outp->base.base.dev); 1118 - } 1119 - 1120 - static void 1121 1066 nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr, 1122 1067 struct drm_connector *connector) 1123 1068 { ··· 1120 1079 drm_connector_unregister(&mstc->connector); 1121 1080 1122 1081 drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector); 1123 - 1124 - drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL); 1125 - mstc->port = NULL; 1126 - drm_modeset_unlock(&drm->dev->mode_config.connection_mutex); 1127 1082 1128 1083 drm_connector_put(&mstc->connector); 1129 1084 } ··· 1143 1106 int ret; 1144 1107 1145 1108 ret = nv50_mstc_new(mstm, port, path, &mstc); 1146 - if (ret) { 1147 - if (mstc) 1148 - mstc->connector.funcs->destroy(&mstc->connector); 1109 + if (ret) 1149 1110 return NULL; 1150 - } 1151 1111 1152 1112 return &mstc->connector; 1153 1113 } ··· 1154 1120 .add_connector = nv50_mstm_add_connector, 1155 1121 .register_connector = nv50_mstm_register_connector, 1156 1122 .destroy_connector = nv50_mstm_destroy_connector, 1157 - .hotplug = nv50_mstm_hotplug, 1158 1123 }; 1159 1124 1160 1125 void ··· 2157 2124 if (ret) 2158 2125 return ret; 2159 2126 } 2127 + 2128 + ret = drm_dp_mst_atomic_check(state); 2129 + if (ret) 2130 + return ret; 2160 2131 2161 2132 return 0; 2162 2133 }
+1 -1
drivers/gpu/drm/nouveau/nouveau_display.c
··· 453 453 if (drm_drv_uses_atomic_modeset(dev)) 454 454 drm_atomic_helper_shutdown(dev); 455 455 else 456 - drm_crtc_force_disable_all(dev); 456 + drm_helper_force_disable_all(dev); 457 457 } 458 458 459 459 /* disable flip completion events */
+2 -7
drivers/gpu/drm/omapdrm/omap_connector.c
··· 305 305 drm_mode_destroy(dev, new_mode); 306 306 307 307 done: 308 - DBG("connector: mode %s: " 309 - "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 308 + DBG("connector: mode %s: " DRM_MODE_FMT, 310 309 (ret == MODE_OK) ? "valid" : "invalid", 311 - mode->base.id, mode->name, mode->vrefresh, mode->clock, 312 - mode->hdisplay, mode->hsync_start, 313 - mode->hsync_end, mode->htotal, 314 - mode->vdisplay, mode->vsync_start, 315 - mode->vsync_end, mode->vtotal, mode->type, mode->flags); 310 + DRM_MODE_ARG(mode)); 316 311 317 312 return ret; 318 313 }
+2 -6
drivers/gpu/drm/omapdrm/omap_crtc.c
··· 427 427 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 428 428 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 429 429 430 - DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 431 - omap_crtc->name, mode->base.id, mode->name, 432 - mode->vrefresh, mode->clock, 433 - mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, 434 - mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal, 435 - mode->type, mode->flags); 430 + DBG("%s: set mode: " DRM_MODE_FMT, 431 + omap_crtc->name, DRM_MODE_ARG(mode)); 436 432 437 433 drm_display_mode_to_videomode(mode, &omap_crtc->vm); 438 434 }
+2 -2
drivers/gpu/drm/omapdrm/omap_encoder.c
··· 76 76 struct hdmi_avi_infoframe avi; 77 77 int r; 78 78 79 - r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode, 80 - false); 79 + r = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector, 80 + adjusted_mode); 81 81 if (r == 0) 82 82 dssdev->ops->hdmi.set_infoframe(dssdev, &avi); 83 83 }
+1
drivers/gpu/drm/omapdrm/omap_fbdev.c
··· 16 16 */ 17 17 18 18 #include <drm/drm_crtc.h> 19 + #include <drm/drm_util.h> 19 20 #include <drm/drm_fb_helper.h> 20 21 21 22 #include "omap_drv.h"
+9
drivers/gpu/drm/panel/Kconfig
··· 204 204 Say Y here if you want to enable support for the Sitronix 205 205 ST7789V controller for 240x320 LCD panels 206 206 207 + config DRM_PANEL_TPO_TPG110 208 + tristate "TPO TPG 800x400 panel" 209 + depends on OF && SPI && GPIOLIB 210 + depends on BACKLIGHT_CLASS_DEVICE 211 + help 212 + Say Y here if you want to enable support for TPO TPG110 213 + 400CH LTPS TFT LCD Single Chip Digital Driver for up to 214 + 800x400 LCD panels. 215 + 207 216 config DRM_PANEL_TRULY_NT35597_WQXGA 208 217 tristate "Truly WQXGA" 209 218 depends on OF
+1
drivers/gpu/drm/panel/Makefile
··· 21 21 obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o 22 22 obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o 23 23 obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o 24 + obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o 24 25 obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+496
drivers/gpu/drm/panel/panel-tpo-tpg110.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Panel driver for the TPO TPG110 400CH LTPS TFT LCD Single Chip 4 + * Digital Driver. 5 + * 6 + * This chip drives a TFT LCD, so it does not know what kind of 7 + * display is actually connected to it, so the width and height of that 8 + * display needs to be supplied from the machine configuration. 9 + * 10 + * Author: 11 + * Linus Walleij <linus.walleij@linaro.org> 12 + */ 13 + #include <drm/drm_modes.h> 14 + #include <drm/drm_panel.h> 15 + #include <drm/drm_print.h> 16 + 17 + #include <linux/backlight.h> 18 + #include <linux/bitops.h> 19 + #include <linux/delay.h> 20 + #include <linux/gpio/consumer.h> 21 + #include <linux/init.h> 22 + #include <linux/kernel.h> 23 + #include <linux/module.h> 24 + #include <linux/platform_device.h> 25 + #include <linux/spi/spi.h> 26 + 27 + #define TPG110_TEST 0x00 28 + #define TPG110_CHIPID 0x01 29 + #define TPG110_CTRL1 0x02 30 + #define TPG110_RES_MASK GENMASK(2, 0) 31 + #define TPG110_RES_800X480 0x07 32 + #define TPG110_RES_640X480 0x06 33 + #define TPG110_RES_480X272 0x05 34 + #define TPG110_RES_480X640 0x04 35 + #define TPG110_RES_480X272_D 0x01 /* Dual scan: outputs 800x480 */ 36 + #define TPG110_RES_400X240_D 0x00 /* Dual scan: outputs 800x480 */ 37 + #define TPG110_CTRL2 0x03 38 + #define TPG110_CTRL2_PM BIT(0) 39 + #define TPG110_CTRL2_RES_PM_CTRL BIT(7) 40 + 41 + /** 42 + * struct tpg110_panel_mode - lookup struct for the supported modes 43 + */ 44 + struct tpg110_panel_mode { 45 + /** 46 + * @name: the name of this panel 47 + */ 48 + const char *name; 49 + /** 50 + * @magic: the magic value from the detection register 51 + */ 52 + u32 magic; 53 + /** 54 + * @mode: the DRM display mode for this panel 55 + */ 56 + struct drm_display_mode mode; 57 + /** 58 + * @bus_flags: the DRM bus flags for this panel e.g. inverted clock 59 + */ 60 + u32 bus_flags; 61 + }; 62 + 63 + /** 64 + * struct tpg110 - state container for the TPG110 panel 65 + */ 66 + struct tpg110 { 67 + /** 68 + * @dev: the container device 69 + */ 70 + struct device *dev; 71 + /** 72 + * @spi: the corresponding SPI device 73 + */ 74 + struct spi_device *spi; 75 + /** 76 + * @panel: the DRM panel instance for this device 77 + */ 78 + struct drm_panel panel; 79 + /** 80 + * @backlight: backlight for this panel 81 + */ 82 + struct backlight_device *backlight; 83 + /** 84 + * @panel_type: the panel mode as detected 85 + */ 86 + const struct tpg110_panel_mode *panel_mode; 87 + /** 88 + * @width: the width of this panel in mm 89 + */ 90 + u32 width; 91 + /** 92 + * @height: the height of this panel in mm 93 + */ 94 + u32 height; 95 + /** 96 + * @grestb: reset GPIO line 97 + */ 98 + struct gpio_desc *grestb; 99 + }; 100 + 101 + /* 102 + * TPG110 modes, these are the simple modes, the dualscan modes that 103 + * take 400x240 or 480x272 in and display as 800x480 are not listed. 104 + */ 105 + static const struct tpg110_panel_mode tpg110_modes[] = { 106 + { 107 + .name = "800x480 RGB", 108 + .magic = TPG110_RES_800X480, 109 + .mode = { 110 + .clock = 33200, 111 + .hdisplay = 800, 112 + .hsync_start = 800 + 40, 113 + .hsync_end = 800 + 40 + 1, 114 + .htotal = 800 + 40 + 1 + 216, 115 + .vdisplay = 480, 116 + .vsync_start = 480 + 10, 117 + .vsync_end = 480 + 10 + 1, 118 + .vtotal = 480 + 10 + 1 + 35, 119 + .vrefresh = 60, 120 + }, 121 + .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, 122 + }, 123 + { 124 + .name = "640x480 RGB", 125 + .magic = TPG110_RES_640X480, 126 + .mode = { 127 + .clock = 25200, 128 + .hdisplay = 640, 129 + .hsync_start = 640 + 24, 130 + .hsync_end = 640 + 24 + 1, 131 + .htotal = 640 + 24 + 1 + 136, 132 + .vdisplay = 480, 133 + .vsync_start = 480 + 18, 134 + .vsync_end = 480 + 18 + 1, 135 + .vtotal = 480 + 18 + 1 + 27, 136 + .vrefresh = 60, 137 + }, 138 + .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, 139 + }, 140 + { 141 + .name = "480x272 RGB", 142 + .magic = TPG110_RES_480X272, 143 + .mode = { 144 + .clock = 9000, 145 + .hdisplay = 480, 146 + .hsync_start = 480 + 2, 147 + .hsync_end = 480 + 2 + 1, 148 + .htotal = 480 + 2 + 1 + 43, 149 + .vdisplay = 272, 150 + .vsync_start = 272 + 2, 151 + .vsync_end = 272 + 2 + 1, 152 + .vtotal = 272 + 2 + 1 + 12, 153 + .vrefresh = 60, 154 + }, 155 + .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, 156 + }, 157 + { 158 + .name = "480x640 RGB", 159 + .magic = TPG110_RES_480X640, 160 + .mode = { 161 + .clock = 20500, 162 + .hdisplay = 480, 163 + .hsync_start = 480 + 2, 164 + .hsync_end = 480 + 2 + 1, 165 + .htotal = 480 + 2 + 1 + 43, 166 + .vdisplay = 640, 167 + .vsync_start = 640 + 4, 168 + .vsync_end = 640 + 4 + 1, 169 + .vtotal = 640 + 4 + 1 + 8, 170 + .vrefresh = 60, 171 + }, 172 + .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, 173 + }, 174 + { 175 + .name = "400x240 RGB", 176 + .magic = TPG110_RES_400X240_D, 177 + .mode = { 178 + .clock = 8300, 179 + .hdisplay = 400, 180 + .hsync_start = 400 + 20, 181 + .hsync_end = 400 + 20 + 1, 182 + .htotal = 400 + 20 + 1 + 108, 183 + .vdisplay = 240, 184 + .vsync_start = 240 + 2, 185 + .vsync_end = 240 + 2 + 1, 186 + .vtotal = 240 + 2 + 1 + 20, 187 + .vrefresh = 60, 188 + }, 189 + .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, 190 + }, 191 + }; 192 + 193 + static inline struct tpg110 * 194 + to_tpg110(struct drm_panel *panel) 195 + { 196 + return container_of(panel, struct tpg110, panel); 197 + } 198 + 199 + static u8 tpg110_readwrite_reg(struct tpg110 *tpg, bool write, 200 + u8 address, u8 outval) 201 + { 202 + struct spi_message m; 203 + struct spi_transfer t[2]; 204 + u8 buf[2]; 205 + int ret; 206 + 207 + spi_message_init(&m); 208 + memset(t, 0, sizeof(t)); 209 + 210 + if (write) { 211 + /* 212 + * Clear address bit 0, 1 when writing, just to be sure 213 + * The actual bit indicating a write here is bit 1, bit 214 + * 0 is just surplus to pad it up to 8 bits. 215 + */ 216 + buf[0] = address << 2; 217 + buf[0] &= ~0x03; 218 + buf[1] = outval; 219 + 220 + t[0].bits_per_word = 8; 221 + t[0].tx_buf = &buf[0]; 222 + t[0].len = 1; 223 + 224 + t[1].tx_buf = &buf[1]; 225 + t[1].len = 1; 226 + t[1].bits_per_word = 8; 227 + } else { 228 + /* Set address bit 0 to 1 to read */ 229 + buf[0] = address << 1; 230 + buf[0] |= 0x01; 231 + 232 + /* 233 + * The last bit/clock is Hi-Z turnaround cycle, so we need 234 + * to send only 7 bits here. The 8th bit is the high impedance 235 + * turn-around cycle. 236 + */ 237 + t[0].bits_per_word = 7; 238 + t[0].tx_buf = &buf[0]; 239 + t[0].len = 1; 240 + 241 + t[1].rx_buf = &buf[1]; 242 + t[1].len = 1; 243 + t[1].bits_per_word = 8; 244 + } 245 + 246 + spi_message_add_tail(&t[0], &m); 247 + spi_message_add_tail(&t[1], &m); 248 + ret = spi_sync(tpg->spi, &m); 249 + if (ret) { 250 + DRM_DEV_ERROR(tpg->dev, "SPI message error %d\n", ret); 251 + return ret; 252 + } 253 + if (write) 254 + return 0; 255 + /* Read */ 256 + return buf[1]; 257 + } 258 + 259 + static u8 tpg110_read_reg(struct tpg110 *tpg, u8 address) 260 + { 261 + return tpg110_readwrite_reg(tpg, false, address, 0); 262 + } 263 + 264 + static void tpg110_write_reg(struct tpg110 *tpg, u8 address, u8 outval) 265 + { 266 + tpg110_readwrite_reg(tpg, true, address, outval); 267 + } 268 + 269 + static int tpg110_startup(struct tpg110 *tpg) 270 + { 271 + u8 val; 272 + int i; 273 + 274 + /* De-assert the reset signal */ 275 + gpiod_set_value_cansleep(tpg->grestb, 0); 276 + usleep_range(1000, 2000); 277 + DRM_DEV_DEBUG(tpg->dev, "de-asserted GRESTB\n"); 278 + 279 + /* Test display communication */ 280 + tpg110_write_reg(tpg, TPG110_TEST, 0x55); 281 + val = tpg110_read_reg(tpg, TPG110_TEST); 282 + if (val != 0x55) { 283 + DRM_DEV_ERROR(tpg->dev, "failed communication test\n"); 284 + return -ENODEV; 285 + } 286 + 287 + val = tpg110_read_reg(tpg, TPG110_CHIPID); 288 + DRM_DEV_INFO(tpg->dev, "TPG110 chip ID: %d version: %d\n", 289 + val >> 4, val & 0x0f); 290 + 291 + /* Show display resolution */ 292 + val = tpg110_read_reg(tpg, TPG110_CTRL1); 293 + val &= TPG110_RES_MASK; 294 + switch (val) { 295 + case TPG110_RES_400X240_D: 296 + DRM_DEV_INFO(tpg->dev, 297 + "IN 400x240 RGB -> OUT 800x480 RGB (dual scan)\n"); 298 + break; 299 + case TPG110_RES_480X272_D: 300 + DRM_DEV_INFO(tpg->dev, 301 + "IN 480x272 RGB -> OUT 800x480 RGB (dual scan)\n"); 302 + break; 303 + case TPG110_RES_480X640: 304 + DRM_DEV_INFO(tpg->dev, "480x640 RGB\n"); 305 + break; 306 + case TPG110_RES_480X272: 307 + DRM_DEV_INFO(tpg->dev, "480x272 RGB\n"); 308 + break; 309 + case TPG110_RES_640X480: 310 + DRM_DEV_INFO(tpg->dev, "640x480 RGB\n"); 311 + break; 312 + case TPG110_RES_800X480: 313 + DRM_DEV_INFO(tpg->dev, "800x480 RGB\n"); 314 + break; 315 + default: 316 + DRM_DEV_ERROR(tpg->dev, "ILLEGAL RESOLUTION 0x%02x\n", val); 317 + break; 318 + } 319 + 320 + /* From the producer side, this is the same resolution */ 321 + if (val == TPG110_RES_480X272_D) 322 + val = TPG110_RES_480X272; 323 + 324 + for (i = 0; i < ARRAY_SIZE(tpg110_modes); i++) { 325 + const struct tpg110_panel_mode *pm; 326 + 327 + pm = &tpg110_modes[i]; 328 + if (pm->magic == val) { 329 + tpg->panel_mode = pm; 330 + break; 331 + } 332 + } 333 + if (i == ARRAY_SIZE(tpg110_modes)) { 334 + DRM_DEV_ERROR(tpg->dev, "unsupported mode (%02x) detected\n", 335 + val); 336 + return -ENODEV; 337 + } 338 + 339 + val = tpg110_read_reg(tpg, TPG110_CTRL2); 340 + DRM_DEV_INFO(tpg->dev, "resolution and standby is controlled by %s\n", 341 + (val & TPG110_CTRL2_RES_PM_CTRL) ? "software" : "hardware"); 342 + /* Take control over resolution and standby */ 343 + val |= TPG110_CTRL2_RES_PM_CTRL; 344 + tpg110_write_reg(tpg, TPG110_CTRL2, val); 345 + 346 + return 0; 347 + } 348 + 349 + static int tpg110_disable(struct drm_panel *panel) 350 + { 351 + struct tpg110 *tpg = to_tpg110(panel); 352 + u8 val; 353 + 354 + /* Put chip into standby */ 355 + val = tpg110_read_reg(tpg, TPG110_CTRL2_PM); 356 + val &= ~TPG110_CTRL2_PM; 357 + tpg110_write_reg(tpg, TPG110_CTRL2_PM, val); 358 + 359 + backlight_disable(tpg->backlight); 360 + 361 + return 0; 362 + } 363 + 364 + static int tpg110_enable(struct drm_panel *panel) 365 + { 366 + struct tpg110 *tpg = to_tpg110(panel); 367 + u8 val; 368 + 369 + backlight_enable(tpg->backlight); 370 + 371 + /* Take chip out of standby */ 372 + val = tpg110_read_reg(tpg, TPG110_CTRL2_PM); 373 + val |= TPG110_CTRL2_PM; 374 + tpg110_write_reg(tpg, TPG110_CTRL2_PM, val); 375 + 376 + return 0; 377 + } 378 + 379 + /** 380 + * tpg110_get_modes() - return the appropriate mode 381 + * @panel: the panel to get the mode for 382 + * 383 + * This currently does not present a forest of modes, instead it 384 + * presents the mode that is configured for the system under use, 385 + * and which is detected by reading the registers of the display. 386 + */ 387 + static int tpg110_get_modes(struct drm_panel *panel) 388 + { 389 + struct drm_connector *connector = panel->connector; 390 + struct tpg110 *tpg = to_tpg110(panel); 391 + struct drm_display_mode *mode; 392 + 393 + strncpy(connector->display_info.name, tpg->panel_mode->name, 394 + DRM_DISPLAY_INFO_LEN); 395 + connector->display_info.width_mm = tpg->width; 396 + connector->display_info.height_mm = tpg->height; 397 + connector->display_info.bus_flags = tpg->panel_mode->bus_flags; 398 + 399 + mode = drm_mode_duplicate(panel->drm, &tpg->panel_mode->mode); 400 + drm_mode_set_name(mode); 401 + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; 402 + 403 + mode->width_mm = tpg->width; 404 + mode->height_mm = tpg->height; 405 + 406 + drm_mode_probed_add(connector, mode); 407 + 408 + return 1; 409 + } 410 + 411 + static const struct drm_panel_funcs tpg110_drm_funcs = { 412 + .disable = tpg110_disable, 413 + .enable = tpg110_enable, 414 + .get_modes = tpg110_get_modes, 415 + }; 416 + 417 + static int tpg110_probe(struct spi_device *spi) 418 + { 419 + struct device *dev = &spi->dev; 420 + struct device_node *np = dev->of_node; 421 + struct tpg110 *tpg; 422 + int ret; 423 + 424 + tpg = devm_kzalloc(dev, sizeof(*tpg), GFP_KERNEL); 425 + if (!tpg) 426 + return -ENOMEM; 427 + tpg->dev = dev; 428 + 429 + /* We get the physical display dimensions from the DT */ 430 + ret = of_property_read_u32(np, "width-mm", &tpg->width); 431 + if (ret) 432 + DRM_DEV_ERROR(dev, "no panel width specified\n"); 433 + ret = of_property_read_u32(np, "height-mm", &tpg->height); 434 + if (ret) 435 + DRM_DEV_ERROR(dev, "no panel height specified\n"); 436 + 437 + /* Look for some optional backlight */ 438 + tpg->backlight = devm_of_find_backlight(dev); 439 + if (IS_ERR(tpg->backlight)) 440 + return PTR_ERR(tpg->backlight); 441 + 442 + /* This asserts the GRESTB signal, putting the display into reset */ 443 + tpg->grestb = devm_gpiod_get(dev, "grestb", GPIOD_OUT_HIGH); 444 + if (IS_ERR(tpg->grestb)) { 445 + DRM_DEV_ERROR(dev, "no GRESTB GPIO\n"); 446 + return -ENODEV; 447 + } 448 + 449 + spi->bits_per_word = 8; 450 + spi->mode |= SPI_3WIRE_HIZ; 451 + ret = spi_setup(spi); 452 + if (ret < 0) { 453 + DRM_DEV_ERROR(dev, "spi setup failed.\n"); 454 + return ret; 455 + } 456 + tpg->spi = spi; 457 + 458 + ret = tpg110_startup(tpg); 459 + if (ret) 460 + return ret; 461 + 462 + drm_panel_init(&tpg->panel); 463 + tpg->panel.dev = dev; 464 + tpg->panel.funcs = &tpg110_drm_funcs; 465 + spi_set_drvdata(spi, tpg); 466 + 467 + return drm_panel_add(&tpg->panel); 468 + } 469 + 470 + static int tpg110_remove(struct spi_device *spi) 471 + { 472 + struct tpg110 *tpg = spi_get_drvdata(spi); 473 + 474 + drm_panel_remove(&tpg->panel); 475 + return 0; 476 + } 477 + 478 + static const struct of_device_id tpg110_match[] = { 479 + { .compatible = "tpo,tpg110", }, 480 + {}, 481 + }; 482 + MODULE_DEVICE_TABLE(of, tpg110_match); 483 + 484 + static struct spi_driver tpg110_driver = { 485 + .probe = tpg110_probe, 486 + .remove = tpg110_remove, 487 + .driver = { 488 + .name = "tpo-tpg110-panel", 489 + .of_match_table = tpg110_match, 490 + }, 491 + }; 492 + module_spi_driver(tpg110_driver); 493 + 494 + MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); 495 + MODULE_DESCRIPTION("TPO TPG110 panel driver"); 496 + MODULE_LICENSE("GPL v2");
+2
drivers/gpu/drm/qxl/qxl_cmd.c
··· 25 25 26 26 /* QXL cmd/ring handling */ 27 27 28 + #include <drm/drm_util.h> 29 + 28 30 #include "qxl_drv.h" 29 31 #include "qxl_object.h" 30 32
+2 -3
drivers/gpu/drm/qxl/qxl_display.c
··· 48 48 } 49 49 if (!qdev->client_monitors_config) { 50 50 qdev->client_monitors_config = kzalloc( 51 - sizeof(struct qxl_monitors_config) + 52 - sizeof(struct qxl_head) * count, GFP_KERNEL); 51 + struct_size(qdev->client_monitors_config, 52 + heads, count), GFP_KERNEL); 53 53 if (!qdev->client_monitors_config) 54 54 return -ENOMEM; 55 55 } ··· 1010 1010 } 1011 1011 1012 1012 static const struct drm_connector_funcs qxl_connector_funcs = { 1013 - .dpms = drm_helper_connector_dpms, 1014 1013 .detect = qxl_conn_detect, 1015 1014 .fill_modes = drm_helper_probe_single_connector_modes, 1016 1015 .destroy = qxl_conn_destroy,
+3 -2
drivers/gpu/drm/r128/r128_cce.c
··· 560 560 dev_priv->gart_info.addr = NULL; 561 561 dev_priv->gart_info.bus_addr = 0; 562 562 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; 563 - if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { 563 + rc = drm_ati_pcigart_init(dev, &dev_priv->gart_info); 564 + if (rc) { 564 565 DRM_ERROR("failed to init PCI GART!\n"); 565 566 dev->dev_private = (void *)dev_priv; 566 567 r128_do_cleanup_cce(dev); 567 - return -ENOMEM; 568 + return rc; 568 569 } 569 570 R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr); 570 571 #if IS_ENABLED(CONFIG_AGP)
+2
drivers/gpu/drm/radeon/atom.c
··· 27 27 #include <linux/slab.h> 28 28 #include <asm/unaligned.h> 29 29 30 + #include <drm/drm_util.h> 31 + 30 32 #define ATOM_DEBUG 31 33 32 34 #include "atom.h"
+5 -9
drivers/gpu/drm/radeon/radeon_audio.c
··· 516 516 if (!connector) 517 517 return -EINVAL; 518 518 519 - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); 519 + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); 520 520 if (err < 0) { 521 521 DRM_ERROR("failed to setup AVI infoframe: %d\n", err); 522 522 return err; 523 523 } 524 524 525 525 if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) { 526 - if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) { 527 - if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB) 528 - frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED; 529 - else 530 - frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL; 531 - } else { 532 - frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; 533 - } 526 + drm_hdmi_avi_infoframe_quant_range(&frame, connector, mode, 527 + radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB ? 528 + HDMI_QUANTIZATION_RANGE_LIMITED : 529 + HDMI_QUANTIZATION_RANGE_FULL); 534 530 } 535 531 536 532 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+1 -1
drivers/gpu/drm/radeon/radeon_display.c
··· 1646 1646 if (rdev->mode_info.mode_config_initialized) { 1647 1647 drm_kms_helper_poll_fini(rdev->ddev); 1648 1648 radeon_hpd_fini(rdev); 1649 - drm_crtc_force_disable_all(rdev->ddev); 1649 + drm_helper_force_disable_all(rdev->ddev); 1650 1650 radeon_fbdev_fini(rdev); 1651 1651 radeon_afmt_fini(rdev); 1652 1652 drm_mode_config_cleanup(rdev->ddev);
-9
drivers/gpu/drm/radeon/radeon_dp_mst.c
··· 320 320 DRM_DEBUG_KMS("\n"); 321 321 } 322 322 323 - static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) 324 - { 325 - struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); 326 - struct drm_device *dev = master->base.dev; 327 - 328 - drm_kms_helper_hotplug_event(dev); 329 - } 330 - 331 323 static const struct drm_dp_mst_topology_cbs mst_cbs = { 332 324 .add_connector = radeon_dp_add_mst_connector, 333 325 .register_connector = radeon_dp_register_mst_connector, 334 326 .destroy_connector = radeon_dp_destroy_mst_connector, 335 - .hotplug = radeon_dp_mst_hotplug, 336 327 }; 337 328 338 329 static struct
+1
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
··· 24 24 * Alex Deucher 25 25 */ 26 26 #include <drm/drmP.h> 27 + #include <drm/drm_util.h> 27 28 #include <drm/drm_crtc_helper.h> 28 29 #include <drm/radeon_drm.h> 29 30 #include "radeon.h"
+20 -23
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
··· 11 11 #include <linux/mutex.h> 12 12 #include <linux/sys_soc.h> 13 13 14 - #include <drm/drmP.h> 15 14 #include <drm/drm_atomic.h> 16 15 #include <drm/drm_atomic_helper.h> 17 16 #include <drm/drm_crtc.h> ··· 21 22 22 23 #include "rcar_du_crtc.h" 23 24 #include "rcar_du_drv.h" 25 + #include "rcar_du_encoder.h" 24 26 #include "rcar_du_kms.h" 25 27 #include "rcar_du_plane.h" 26 28 #include "rcar_du_regs.h" ··· 314 314 315 315 rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1); 316 316 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); 317 - } 318 - 319 - void rcar_du_crtc_route_output(struct drm_crtc *crtc, 320 - enum rcar_du_output output) 321 - { 322 - struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 323 - struct rcar_du_device *rcdu = rcrtc->group->dev; 324 - 325 - /* 326 - * Store the route from the CRTC output to the DU output. The DU will be 327 - * configured when starting the CRTC. 328 - */ 329 - rcrtc->outputs |= BIT(output); 330 - 331 - /* 332 - * Store RGB routing to DPAD0, the hardware will be configured when 333 - * starting the CRTC. 334 - */ 335 - if (output == RCAR_DU_OUTPUT_DPAD0) 336 - rcdu->dpad0_source = rcrtc->index; 337 317 } 338 318 339 319 static unsigned int plane_zpos(struct rcar_du_plane *plane) ··· 635 655 * CRTC Functions 636 656 */ 637 657 658 + static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc, 659 + struct drm_crtc_state *state) 660 + { 661 + struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state); 662 + struct drm_encoder *encoder; 663 + 664 + /* Store the routes from the CRTC output to the DU outputs. */ 665 + rstate->outputs = 0; 666 + 667 + drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) { 668 + struct rcar_du_encoder *renc = to_rcar_encoder(encoder); 669 + 670 + rstate->outputs |= BIT(renc->output); 671 + } 672 + 673 + return 0; 674 + } 675 + 638 676 static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, 639 677 struct drm_crtc_state *old_state) 640 678 { ··· 676 678 crtc->state->event = NULL; 677 679 } 678 680 spin_unlock_irq(&crtc->dev->event_lock); 679 - 680 - rcrtc->outputs = 0; 681 681 } 682 682 683 683 static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, ··· 751 755 } 752 756 753 757 static const struct drm_crtc_helper_funcs crtc_helper_funcs = { 758 + .atomic_check = rcar_du_crtc_atomic_check, 754 759 .atomic_begin = rcar_du_crtc_atomic_begin, 755 760 .atomic_flush = rcar_du_crtc_atomic_flush, 756 761 .atomic_enable = rcar_du_crtc_atomic_enable,
+2 -6
drivers/gpu/drm/rcar-du/rcar_du_crtc.h
··· 14 14 #include <linux/spinlock.h> 15 15 #include <linux/wait.h> 16 16 17 - #include <drm/drmP.h> 18 17 #include <drm/drm_crtc.h> 19 18 20 19 #include <media/vsp1.h> ··· 36 37 * @vblank_lock: protects vblank_wait and vblank_count 37 38 * @vblank_wait: wait queue used to signal vertical blanking 38 39 * @vblank_count: number of vertical blanking interrupts to wait for 39 - * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC 40 40 * @group: CRTC group this CRTC belongs to 41 41 * @vsp: VSP feeding video to this CRTC 42 42 * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC ··· 59 61 wait_queue_head_t vblank_wait; 60 62 unsigned int vblank_count; 61 63 62 - unsigned int outputs; 63 - 64 64 struct rcar_du_group *group; 65 65 struct rcar_du_vsp *vsp; 66 66 unsigned int vsp_pipe; ··· 73 77 * struct rcar_du_crtc_state - Driver-specific CRTC state 74 78 * @state: base DRM CRTC state 75 79 * @crc: CRC computation configuration 80 + * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC 76 81 */ 77 82 struct rcar_du_crtc_state { 78 83 struct drm_crtc_state state; 79 84 80 85 struct vsp1_du_crc_config crc; 86 + unsigned int outputs; 81 87 }; 82 88 83 89 #define to_rcar_crtc_state(s) container_of(s, struct rcar_du_crtc_state, state) ··· 100 102 void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc); 101 103 void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc); 102 104 103 - void rcar_du_crtc_route_output(struct drm_crtc *crtc, 104 - enum rcar_du_output output); 105 105 void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc); 106 106 107 107 void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set);
+28 -14
drivers/gpu/drm/rcar-du/rcar_du_drv.c
··· 17 17 #include <linux/slab.h> 18 18 #include <linux/wait.h> 19 19 20 - #include <drm/drmP.h> 21 20 #include <drm/drm_atomic_helper.h> 22 21 #include <drm/drm_crtc_helper.h> 23 22 #include <drm/drm_fb_cma_helper.h> ··· 35 36 static const struct rcar_du_device_info rzg1_du_r8a7743_info = { 36 37 .gen = 2, 37 38 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 38 - | RCAR_DU_FEATURE_EXT_CTRL_REGS 39 39 | RCAR_DU_FEATURE_INTERLACED 40 40 | RCAR_DU_FEATURE_TVM_SYNC, 41 41 .channels_mask = BIT(1) | BIT(0), ··· 57 59 static const struct rcar_du_device_info rzg1_du_r8a7745_info = { 58 60 .gen = 2, 59 61 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 60 - | RCAR_DU_FEATURE_EXT_CTRL_REGS 61 62 | RCAR_DU_FEATURE_INTERLACED 62 63 | RCAR_DU_FEATURE_TVM_SYNC, 63 64 .channels_mask = BIT(1) | BIT(0), ··· 78 81 static const struct rcar_du_device_info rzg1_du_r8a77470_info = { 79 82 .gen = 2, 80 83 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 81 - | RCAR_DU_FEATURE_EXT_CTRL_REGS 82 84 | RCAR_DU_FEATURE_INTERLACED 83 85 | RCAR_DU_FEATURE_TVM_SYNC, 84 86 .channels_mask = BIT(1) | BIT(0), ··· 101 105 }, 102 106 }; 103 107 108 + static const struct rcar_du_device_info rcar_du_r8a774c0_info = { 109 + .gen = 3, 110 + .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 111 + | RCAR_DU_FEATURE_VSP1_SOURCE, 112 + .channels_mask = BIT(1) | BIT(0), 113 + .routes = { 114 + /* 115 + * R8A774C0 has one RGB output and two LVDS outputs 116 + */ 117 + [RCAR_DU_OUTPUT_DPAD0] = { 118 + .possible_crtcs = BIT(0) | BIT(1), 119 + .port = 0, 120 + }, 121 + [RCAR_DU_OUTPUT_LVDS0] = { 122 + .possible_crtcs = BIT(0), 123 + .port = 1, 124 + }, 125 + [RCAR_DU_OUTPUT_LVDS1] = { 126 + .possible_crtcs = BIT(1), 127 + .port = 2, 128 + }, 129 + }, 130 + .num_lvds = 2, 131 + .lvds_clk_mask = BIT(1) | BIT(0), 132 + }; 133 + 104 134 static const struct rcar_du_device_info rcar_du_r8a7779_info = { 105 - .gen = 2, 135 + .gen = 1, 106 136 .features = RCAR_DU_FEATURE_INTERLACED 107 137 | RCAR_DU_FEATURE_TVM_SYNC, 108 138 .channels_mask = BIT(1) | BIT(0), ··· 151 129 static const struct rcar_du_device_info rcar_du_r8a7790_info = { 152 130 .gen = 2, 153 131 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 154 - | RCAR_DU_FEATURE_EXT_CTRL_REGS 155 132 | RCAR_DU_FEATURE_INTERLACED 156 133 | RCAR_DU_FEATURE_TVM_SYNC, 157 134 .quirks = RCAR_DU_QUIRK_ALIGN_128B, ··· 180 159 static const struct rcar_du_device_info rcar_du_r8a7791_info = { 181 160 .gen = 2, 182 161 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 183 - | RCAR_DU_FEATURE_EXT_CTRL_REGS 184 162 | RCAR_DU_FEATURE_INTERLACED 185 163 | RCAR_DU_FEATURE_TVM_SYNC, 186 164 .channels_mask = BIT(1) | BIT(0), ··· 203 183 static const struct rcar_du_device_info rcar_du_r8a7792_info = { 204 184 .gen = 2, 205 185 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 206 - | RCAR_DU_FEATURE_EXT_CTRL_REGS 207 186 | RCAR_DU_FEATURE_INTERLACED 208 187 | RCAR_DU_FEATURE_TVM_SYNC, 209 188 .channels_mask = BIT(1) | BIT(0), ··· 222 203 static const struct rcar_du_device_info rcar_du_r8a7794_info = { 223 204 .gen = 2, 224 205 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 225 - | RCAR_DU_FEATURE_EXT_CTRL_REGS 226 206 | RCAR_DU_FEATURE_INTERLACED 227 207 | RCAR_DU_FEATURE_TVM_SYNC, 228 208 .channels_mask = BIT(1) | BIT(0), ··· 244 226 static const struct rcar_du_device_info rcar_du_r8a7795_info = { 245 227 .gen = 3, 246 228 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 247 - | RCAR_DU_FEATURE_EXT_CTRL_REGS 248 229 | RCAR_DU_FEATURE_VSP1_SOURCE 249 230 | RCAR_DU_FEATURE_INTERLACED 250 231 | RCAR_DU_FEATURE_TVM_SYNC, ··· 277 260 static const struct rcar_du_device_info rcar_du_r8a7796_info = { 278 261 .gen = 3, 279 262 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 280 - | RCAR_DU_FEATURE_EXT_CTRL_REGS 281 263 | RCAR_DU_FEATURE_VSP1_SOURCE 282 264 | RCAR_DU_FEATURE_INTERLACED 283 265 | RCAR_DU_FEATURE_TVM_SYNC, ··· 306 290 static const struct rcar_du_device_info rcar_du_r8a77965_info = { 307 291 .gen = 3, 308 292 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 309 - | RCAR_DU_FEATURE_EXT_CTRL_REGS 310 293 | RCAR_DU_FEATURE_VSP1_SOURCE 311 294 | RCAR_DU_FEATURE_INTERLACED 312 295 | RCAR_DU_FEATURE_TVM_SYNC, ··· 335 320 static const struct rcar_du_device_info rcar_du_r8a77970_info = { 336 321 .gen = 3, 337 322 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 338 - | RCAR_DU_FEATURE_EXT_CTRL_REGS 339 323 | RCAR_DU_FEATURE_VSP1_SOURCE 340 324 | RCAR_DU_FEATURE_INTERLACED 341 325 | RCAR_DU_FEATURE_TVM_SYNC, ··· 356 342 static const struct rcar_du_device_info rcar_du_r8a7799x_info = { 357 343 .gen = 3, 358 344 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 359 - | RCAR_DU_FEATURE_EXT_CTRL_REGS 360 345 | RCAR_DU_FEATURE_VSP1_SOURCE, 361 346 .channels_mask = BIT(1) | BIT(0), 362 347 .routes = { ··· 385 372 { .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info }, 386 373 { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info }, 387 374 { .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info }, 375 + { .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info }, 388 376 { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info }, 389 377 { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info }, 390 378 { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
+5 -4
drivers/gpu/drm/rcar-du/rcar_du_drv.h
··· 20 20 struct clk; 21 21 struct device; 22 22 struct drm_device; 23 + struct drm_property; 23 24 struct rcar_du_device; 24 25 25 26 #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */ 26 - #define RCAR_DU_FEATURE_EXT_CTRL_REGS BIT(1) /* Has extended control registers */ 27 - #define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */ 28 - #define RCAR_DU_FEATURE_INTERLACED BIT(3) /* HW supports interlaced */ 29 - #define RCAR_DU_FEATURE_TVM_SYNC BIT(4) /* Has TV switch/sync modes */ 27 + #define RCAR_DU_FEATURE_VSP1_SOURCE BIT(1) /* Has inputs from VSP1 */ 28 + #define RCAR_DU_FEATURE_INTERLACED BIT(2) /* HW supports interlaced */ 29 + #define RCAR_DU_FEATURE_TVM_SYNC BIT(3) /* Has TV switch/sync modes */ 30 30 31 31 #define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */ 32 32 ··· 89 89 } props; 90 90 91 91 unsigned int dpad0_source; 92 + unsigned int dpad1_source; 92 93 unsigned int vspd1_sink; 93 94 }; 94 95
-11
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
··· 9 9 10 10 #include <linux/export.h> 11 11 12 - #include <drm/drmP.h> 13 12 #include <drm/drm_crtc.h> 14 13 #include <drm/drm_crtc_helper.h> 15 14 #include <drm/drm_panel.h> ··· 21 22 * Encoder 22 23 */ 23 24 24 - static void rcar_du_encoder_mode_set(struct drm_encoder *encoder, 25 - struct drm_crtc_state *crtc_state, 26 - struct drm_connector_state *conn_state) 27 - { 28 - struct rcar_du_encoder *renc = to_rcar_encoder(encoder); 29 - 30 - rcar_du_crtc_route_output(crtc_state->crtc, renc->output); 31 - } 32 - 33 25 static const struct drm_encoder_helper_funcs encoder_helper_funcs = { 34 - .atomic_mode_set = rcar_du_encoder_mode_set, 35 26 }; 36 27 37 28 static const struct drm_encoder_funcs encoder_funcs = {
-2
drivers/gpu/drm/rcar-du/rcar_du_encoder.h
··· 10 10 #ifndef __RCAR_DU_ENCODER_H__ 11 11 #define __RCAR_DU_ENCODER_H__ 12 12 13 - #include <drm/drm_crtc.h> 14 13 #include <drm/drm_encoder.h> 15 14 16 - struct drm_panel; 17 15 struct rcar_du_device; 18 16 19 17 struct rcar_du_encoder {
+47 -4
drivers/gpu/drm/rcar-du/rcar_du_group.c
··· 147 147 148 148 rcar_du_group_setup_pins(rgrp); 149 149 150 - if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) { 150 + if (rcdu->info->gen >= 2) { 151 151 rcar_du_group_setup_defr8(rgrp); 152 152 rcar_du_group_setup_didsr(rgrp); 153 153 } ··· 262 262 unsigned int index; 263 263 int ret; 264 264 265 - if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_EXT_CTRL_REGS)) 265 + if (rcdu->info->gen < 2) 266 266 return 0; 267 267 268 268 /* ··· 287 287 return 0; 288 288 } 289 289 290 + static void rcar_du_group_set_dpad_levels(struct rcar_du_group *rgrp) 291 + { 292 + static const u32 doflr_values[2] = { 293 + DOFLR_HSYCFL0 | DOFLR_VSYCFL0 | DOFLR_ODDFL0 | 294 + DOFLR_DISPFL0 | DOFLR_CDEFL0 | DOFLR_RGBFL0, 295 + DOFLR_HSYCFL1 | DOFLR_VSYCFL1 | DOFLR_ODDFL1 | 296 + DOFLR_DISPFL1 | DOFLR_CDEFL1 | DOFLR_RGBFL1, 297 + }; 298 + static const u32 dpad_mask = BIT(RCAR_DU_OUTPUT_DPAD1) 299 + | BIT(RCAR_DU_OUTPUT_DPAD0); 300 + struct rcar_du_device *rcdu = rgrp->dev; 301 + u32 doflr = DOFLR_CODE; 302 + unsigned int i; 303 + 304 + if (rcdu->info->gen < 2) 305 + return; 306 + 307 + /* 308 + * The DPAD outputs can't be controlled directly. However, the parallel 309 + * output of the DU channels routed to DPAD can be set to fixed levels 310 + * through the DOFLR group register. Use this to turn the DPAD on or off 311 + * by driving fixed low-level signals at the output of any DU channel 312 + * not routed to a DPAD output. This doesn't affect the DU output 313 + * signals going to other outputs, such as the internal LVDS and HDMI 314 + * encoders. 315 + */ 316 + 317 + for (i = 0; i < rgrp->num_crtcs; ++i) { 318 + struct rcar_du_crtc_state *rstate; 319 + struct rcar_du_crtc *rcrtc; 320 + 321 + rcrtc = &rcdu->crtcs[rgrp->index * 2 + i]; 322 + rstate = to_rcar_crtc_state(rcrtc->crtc.state); 323 + 324 + if (!(rstate->outputs & dpad_mask)) 325 + doflr |= doflr_values[i]; 326 + } 327 + 328 + rcar_du_group_write(rgrp, DOFLR, doflr); 329 + } 330 + 290 331 int rcar_du_group_set_routing(struct rcar_du_group *rgrp) 291 332 { 292 - struct rcar_du_crtc *crtc0 = &rgrp->dev->crtcs[rgrp->index * 2]; 333 + struct rcar_du_device *rcdu = rgrp->dev; 293 334 u32 dorcr = rcar_du_group_read(rgrp, DORCR); 294 335 295 336 dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK); ··· 340 299 * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1 341 300 * by default. 342 301 */ 343 - if (crtc0->outputs & BIT(RCAR_DU_OUTPUT_DPAD1)) 302 + if (rcdu->dpad1_source == rgrp->index * 2) 344 303 dorcr |= DORCR_PG2D_DS1; 345 304 else 346 305 dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2; 347 306 348 307 rcar_du_group_write(rgrp, DORCR, dorcr); 308 + 309 + rcar_du_group_set_dpad_levels(rgrp); 349 310 350 311 return rcar_du_set_dpad0_vsp1_routing(rgrp->dev); 351 312 }
+22 -1
drivers/gpu/drm/rcar-du/rcar_du_kms.c
··· 7 7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 8 8 */ 9 9 10 - #include <drm/drmP.h> 11 10 #include <drm/drm_atomic.h> 12 11 #include <drm/drm_atomic_helper.h> 13 12 #include <drm/drm_crtc.h> ··· 277 278 static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state) 278 279 { 279 280 struct drm_device *dev = old_state->dev; 281 + struct rcar_du_device *rcdu = dev->dev_private; 282 + struct drm_crtc_state *crtc_state; 283 + struct drm_crtc *crtc; 284 + unsigned int i; 285 + 286 + /* 287 + * Store RGB routing to DPAD0 and DPAD1, the hardware will be configured 288 + * when starting the CRTCs. 289 + */ 290 + rcdu->dpad1_source = -1; 291 + 292 + for_each_new_crtc_in_state(old_state, crtc, crtc_state, i) { 293 + struct rcar_du_crtc_state *rcrtc_state = 294 + to_rcar_crtc_state(crtc_state); 295 + struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 296 + 297 + if (rcrtc_state->outputs & BIT(RCAR_DU_OUTPUT_DPAD0)) 298 + rcdu->dpad0_source = rcrtc->index; 299 + 300 + if (rcrtc_state->outputs & BIT(RCAR_DU_OUTPUT_DPAD1)) 301 + rcdu->dpad1_source = rcrtc->index; 302 + } 280 303 281 304 /* Apply the atomic update. */ 282 305 drm_atomic_helper_commit_modeset_disables(dev, old_state);
+44 -51
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7790.dts
··· 7 7 8 8 /dts-v1/; 9 9 /plugin/; 10 - / { 11 - fragment@0 { 12 - target-path = "/"; 13 - __overlay__ { 14 - #address-cells = <2>; 15 - #size-cells = <2>; 16 10 17 - lvds@feb90000 { 18 - compatible = "renesas,r8a7790-lvds"; 19 - reg = <0 0xfeb90000 0 0x1c>; 11 + &{/} { 12 + #address-cells = <2>; 13 + #size-cells = <2>; 20 14 21 - ports { 22 - #address-cells = <1>; 23 - #size-cells = <0>; 15 + lvds@feb90000 { 16 + compatible = "renesas,r8a7790-lvds"; 17 + reg = <0 0xfeb90000 0 0x1c>; 24 18 25 - port@0 { 26 - reg = <0>; 27 - lvds0_input: endpoint { 28 - }; 29 - }; 30 - port@1 { 31 - reg = <1>; 32 - lvds0_out: endpoint { 33 - }; 34 - }; 19 + ports { 20 + #address-cells = <1>; 21 + #size-cells = <0>; 22 + 23 + port@0 { 24 + reg = <0>; 25 + lvds0_input: endpoint { 35 26 }; 36 27 }; 37 - 38 - lvds@feb94000 { 39 - compatible = "renesas,r8a7790-lvds"; 40 - reg = <0 0xfeb94000 0 0x1c>; 41 - 42 - ports { 43 - #address-cells = <1>; 44 - #size-cells = <0>; 45 - 46 - port@0 { 47 - reg = <0>; 48 - lvds1_input: endpoint { 49 - }; 50 - }; 51 - port@1 { 52 - reg = <1>; 53 - lvds1_out: endpoint { 54 - }; 55 - }; 28 + port@1 { 29 + reg = <1>; 30 + lvds0_out: endpoint { 56 31 }; 57 32 }; 58 33 }; 59 34 }; 60 35 61 - fragment@1 { 62 - target-path = "/display@feb00000/ports"; 63 - __overlay__ { 36 + lvds@feb94000 { 37 + compatible = "renesas,r8a7790-lvds"; 38 + reg = <0 0xfeb94000 0 0x1c>; 39 + 40 + ports { 41 + #address-cells = <1>; 42 + #size-cells = <0>; 43 + 44 + port@0 { 45 + reg = <0>; 46 + lvds1_input: endpoint { 47 + }; 48 + }; 64 49 port@1 { 65 - endpoint { 66 - remote-endpoint = <&lvds0_input>; 50 + reg = <1>; 51 + lvds1_out: endpoint { 67 52 }; 68 53 }; 69 - port@2 { 70 - endpoint { 71 - remote-endpoint = <&lvds1_input>; 72 - }; 73 - }; 54 + }; 55 + }; 56 + }; 57 + 58 + &{/display@feb00000/ports} { 59 + port@1 { 60 + endpoint { 61 + remote-endpoint = <&lvds0_input>; 62 + }; 63 + }; 64 + port@2 { 65 + endpoint { 66 + remote-endpoint = <&lvds1_input>; 74 67 }; 75 68 }; 76 69 };
+23 -30
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7791.dts
··· 7 7 8 8 /dts-v1/; 9 9 /plugin/; 10 - / { 11 - fragment@0 { 12 - target-path = "/"; 13 - __overlay__ { 14 - #address-cells = <2>; 15 - #size-cells = <2>; 16 10 17 - lvds@feb90000 { 18 - compatible = "renesas,r8a7791-lvds"; 19 - reg = <0 0xfeb90000 0 0x1c>; 11 + &{/} { 12 + #address-cells = <2>; 13 + #size-cells = <2>; 20 14 21 - ports { 22 - #address-cells = <1>; 23 - #size-cells = <0>; 15 + lvds@feb90000 { 16 + compatible = "renesas,r8a7791-lvds"; 17 + reg = <0 0xfeb90000 0 0x1c>; 24 18 25 - port@0 { 26 - reg = <0>; 27 - lvds0_input: endpoint { 28 - }; 29 - }; 30 - port@1 { 31 - reg = <1>; 32 - lvds0_out: endpoint { 33 - }; 34 - }; 19 + ports { 20 + #address-cells = <1>; 21 + #size-cells = <0>; 22 + 23 + port@0 { 24 + reg = <0>; 25 + lvds0_input: endpoint { 26 + }; 27 + }; 28 + port@1 { 29 + reg = <1>; 30 + lvds0_out: endpoint { 35 31 }; 36 32 }; 37 33 }; 38 34 }; 35 + }; 39 36 40 - fragment@1 { 41 - target-path = "/display@feb00000/ports"; 42 - __overlay__ { 43 - port@1 { 44 - endpoint { 45 - remote-endpoint = <&lvds0_input>; 46 - }; 47 - }; 37 + &{/display@feb00000/ports} { 38 + port@1 { 39 + endpoint { 40 + remote-endpoint = <&lvds0_input>; 48 41 }; 49 42 }; 50 43 };
+23 -30
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7793.dts
··· 7 7 8 8 /dts-v1/; 9 9 /plugin/; 10 - / { 11 - fragment@0 { 12 - target-path = "/"; 13 - __overlay__ { 14 - #address-cells = <2>; 15 - #size-cells = <2>; 16 10 17 - lvds@feb90000 { 18 - compatible = "renesas,r8a7793-lvds"; 19 - reg = <0 0xfeb90000 0 0x1c>; 11 + &{/} { 12 + #address-cells = <2>; 13 + #size-cells = <2>; 20 14 21 - ports { 22 - #address-cells = <1>; 23 - #size-cells = <0>; 15 + lvds@feb90000 { 16 + compatible = "renesas,r8a7793-lvds"; 17 + reg = <0 0xfeb90000 0 0x1c>; 24 18 25 - port@0 { 26 - reg = <0>; 27 - lvds0_input: endpoint { 28 - }; 29 - }; 30 - port@1 { 31 - reg = <1>; 32 - lvds0_out: endpoint { 33 - }; 34 - }; 19 + ports { 20 + #address-cells = <1>; 21 + #size-cells = <0>; 22 + 23 + port@0 { 24 + reg = <0>; 25 + lvds0_input: endpoint { 26 + }; 27 + }; 28 + port@1 { 29 + reg = <1>; 30 + lvds0_out: endpoint { 35 31 }; 36 32 }; 37 33 }; 38 34 }; 35 + }; 39 36 40 - fragment@1 { 41 - target-path = "/display@feb00000/ports"; 42 - __overlay__ { 43 - port@1 { 44 - endpoint { 45 - remote-endpoint = <&lvds0_input>; 46 - }; 47 - }; 37 + &{/display@feb00000/ports} { 38 + port@1 { 39 + endpoint { 40 + remote-endpoint = <&lvds0_input>; 48 41 }; 49 42 }; 50 43 };
+23 -30
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7795.dts
··· 7 7 8 8 /dts-v1/; 9 9 /plugin/; 10 - / { 11 - fragment@0 { 12 - target-path = "/soc"; 13 - __overlay__ { 14 - #address-cells = <2>; 15 - #size-cells = <2>; 16 10 17 - lvds@feb90000 { 18 - compatible = "renesas,r8a7795-lvds"; 19 - reg = <0 0xfeb90000 0 0x14>; 11 + &{/soc} { 12 + #address-cells = <2>; 13 + #size-cells = <2>; 20 14 21 - ports { 22 - #address-cells = <1>; 23 - #size-cells = <0>; 15 + lvds@feb90000 { 16 + compatible = "renesas,r8a7795-lvds"; 17 + reg = <0 0xfeb90000 0 0x14>; 24 18 25 - port@0 { 26 - reg = <0>; 27 - lvds0_input: endpoint { 28 - }; 29 - }; 30 - port@1 { 31 - reg = <1>; 32 - lvds0_out: endpoint { 33 - }; 34 - }; 19 + ports { 20 + #address-cells = <1>; 21 + #size-cells = <0>; 22 + 23 + port@0 { 24 + reg = <0>; 25 + lvds0_input: endpoint { 26 + }; 27 + }; 28 + port@1 { 29 + reg = <1>; 30 + lvds0_out: endpoint { 35 31 }; 36 32 }; 37 33 }; 38 34 }; 35 + }; 39 36 40 - fragment@1 { 41 - target-path = "/soc/display@feb00000/ports"; 42 - __overlay__ { 43 - port@3 { 44 - endpoint { 45 - remote-endpoint = <&lvds0_input>; 46 - }; 47 - }; 37 + &{/soc/display@feb00000/ports} { 38 + port@3 { 39 + endpoint { 40 + remote-endpoint = <&lvds0_input>; 48 41 }; 49 42 }; 50 43 };
+23 -30
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7796.dts
··· 7 7 8 8 /dts-v1/; 9 9 /plugin/; 10 - / { 11 - fragment@0 { 12 - target-path = "/soc"; 13 - __overlay__ { 14 - #address-cells = <2>; 15 - #size-cells = <2>; 16 10 17 - lvds@feb90000 { 18 - compatible = "renesas,r8a7796-lvds"; 19 - reg = <0 0xfeb90000 0 0x14>; 11 + &{/soc} { 12 + #address-cells = <2>; 13 + #size-cells = <2>; 20 14 21 - ports { 22 - #address-cells = <1>; 23 - #size-cells = <0>; 15 + lvds@feb90000 { 16 + compatible = "renesas,r8a7796-lvds"; 17 + reg = <0 0xfeb90000 0 0x14>; 24 18 25 - port@0 { 26 - reg = <0>; 27 - lvds0_input: endpoint { 28 - }; 29 - }; 30 - port@1 { 31 - reg = <1>; 32 - lvds0_out: endpoint { 33 - }; 34 - }; 19 + ports { 20 + #address-cells = <1>; 21 + #size-cells = <0>; 22 + 23 + port@0 { 24 + reg = <0>; 25 + lvds0_input: endpoint { 26 + }; 27 + }; 28 + port@1 { 29 + reg = <1>; 30 + lvds0_out: endpoint { 35 31 }; 36 32 }; 37 33 }; 38 34 }; 35 + }; 39 36 40 - fragment@1 { 41 - target-path = "/soc/display@feb00000/ports"; 42 - __overlay__ { 43 - port@3 { 44 - endpoint { 45 - remote-endpoint = <&lvds0_input>; 46 - }; 47 - }; 37 + &{/soc/display@feb00000/ports} { 38 + port@3 { 39 + endpoint { 40 + remote-endpoint = <&lvds0_input>; 48 41 }; 49 42 }; 50 43 };
-1
drivers/gpu/drm/rcar-du/rcar_du_plane.c
··· 7 7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 8 8 */ 9 9 10 - #include <drm/drmP.h> 11 10 #include <drm/drm_atomic.h> 12 11 #include <drm/drm_atomic_helper.h> 13 12 #include <drm/drm_crtc.h>
+1 -2
drivers/gpu/drm/rcar-du/rcar_du_plane.h
··· 10 10 #ifndef __RCAR_DU_PLANE_H__ 11 11 #define __RCAR_DU_PLANE_H__ 12 12 13 - #include <drm/drmP.h> 14 - #include <drm/drm_crtc.h> 13 + #include <drm/drm_plane.h> 15 14 16 15 struct rcar_du_format_info; 17 16 struct rcar_du_group;
-1
drivers/gpu/drm/rcar-du/rcar_du_vsp.c
··· 7 7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 8 8 */ 9 9 10 - #include <drm/drmP.h> 11 10 #include <drm/drm_atomic_helper.h> 12 11 #include <drm/drm_crtc.h> 13 12 #include <drm/drm_crtc_helper.h>
+1 -2
drivers/gpu/drm/rcar-du/rcar_du_vsp.h
··· 10 10 #ifndef __RCAR_DU_VSP_H__ 11 11 #define __RCAR_DU_VSP_H__ 12 12 13 - #include <drm/drmP.h> 14 - #include <drm/drm_crtc.h> 13 + #include <drm/drm_plane.h> 15 14 16 15 struct rcar_du_format_info; 17 16 struct rcar_du_vsp;
+17
drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
··· 7 7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 8 8 */ 9 9 10 + #include <linux/mod_devicetable.h> 10 11 #include <linux/module.h> 11 12 #include <linux/platform_device.h> 12 13 13 14 #include <drm/bridge/dw_hdmi.h> 15 + #include <drm/drm_modes.h> 14 16 15 17 #define RCAR_HDMI_PHY_OPMODE_PLLCFG 0x06 /* Mode of operation and PLL dividers */ 16 18 #define RCAR_HDMI_PHY_PLLCURRGMPCTRL 0x10 /* PLL current and Gmp (conductance) */ ··· 36 34 { 297000000, 0x0000, 0x0084, 0x0105 }, 37 35 { ~0UL, 0x0000, 0x0000, 0x0000 }, 38 36 }; 37 + 38 + static enum drm_mode_status 39 + rcar_hdmi_mode_valid(struct drm_connector *connector, 40 + const struct drm_display_mode *mode) 41 + { 42 + /* 43 + * The maximum supported clock frequency is 297 MHz, as shown in the PHY 44 + * parameters table. 45 + */ 46 + if (mode->clock > 297000) 47 + return MODE_CLOCK_HIGH; 48 + 49 + return MODE_OK; 50 + } 39 51 40 52 static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi, 41 53 const struct dw_hdmi_plat_data *pdata, ··· 75 59 } 76 60 77 61 static const struct dw_hdmi_plat_data rcar_dw_hdmi_plat_data = { 62 + .mode_valid = rcar_hdmi_mode_valid, 78 63 .configure_phy = rcar_hdmi_phy_configure, 79 64 }; 80 65
+3 -2
drivers/gpu/drm/rcar-du/rcar_lvds.c
··· 520 520 } 521 521 522 522 static void rcar_lvds_mode_set(struct drm_bridge *bridge, 523 - struct drm_display_mode *mode, 524 - struct drm_display_mode *adjusted_mode) 523 + const struct drm_display_mode *mode, 524 + const struct drm_display_mode *adjusted_mode) 525 525 { 526 526 struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); 527 527 ··· 785 785 786 786 static const struct of_device_id rcar_lvds_of_table[] = { 787 787 { .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info }, 788 + { .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info }, 788 789 { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info }, 789 790 { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info }, 790 791 { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
+1 -1
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
··· 467 467 } 468 468 469 469 static int 470 - dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode, 470 + dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode, 471 471 unsigned long mode_flags, u32 lanes, u32 format, 472 472 unsigned int *lane_mbps) 473 473 {
+3 -1
drivers/gpu/drm/rockchip/inno_hdmi.c
··· 295 295 union hdmi_infoframe frame; 296 296 int rc; 297 297 298 - rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false); 298 + rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 299 + &hdmi->connector, 300 + mode); 299 301 300 302 if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444) 301 303 frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
-36
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
··· 128 128 } 129 129 130 130 static void 131 - rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state) 132 - { 133 - struct drm_crtc *crtc; 134 - struct drm_crtc_state *crtc_state; 135 - struct drm_encoder *encoder; 136 - u32 encoder_mask = 0; 137 - int i; 138 - 139 - for_each_old_crtc_in_state(state, crtc, crtc_state, i) { 140 - encoder_mask |= crtc_state->encoder_mask; 141 - encoder_mask |= crtc->state->encoder_mask; 142 - } 143 - 144 - drm_for_each_encoder_mask(encoder, state->dev, encoder_mask) 145 - rockchip_drm_psr_inhibit_get(encoder); 146 - } 147 - 148 - static void 149 - rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state) 150 - { 151 - struct drm_crtc *crtc; 152 - struct drm_crtc_state *crtc_state; 153 - struct drm_encoder *encoder; 154 - u32 encoder_mask = 0; 155 - int i; 156 - 157 - for_each_old_crtc_in_state(state, crtc, crtc_state, i) { 158 - encoder_mask |= crtc_state->encoder_mask; 159 - encoder_mask |= crtc->state->encoder_mask; 160 - } 161 - 162 - drm_for_each_encoder_mask(encoder, state->dev, encoder_mask) 163 - rockchip_drm_psr_inhibit_put(encoder); 164 - } 165 - 166 - static void 167 131 rockchip_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state) 168 132 { 169 133 struct drm_device *dev = old_state->dev;
+37
drivers/gpu/drm/rockchip/rockchip_drm_psr.c
··· 13 13 */ 14 14 15 15 #include <drm/drmP.h> 16 + #include <drm/drm_atomic.h> 16 17 #include <drm/drm_crtc_helper.h> 17 18 18 19 #include "rockchip_drm_drv.h" ··· 109 108 return 0; 110 109 } 111 110 EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put); 111 + 112 + void rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state) 113 + { 114 + struct drm_crtc *crtc; 115 + struct drm_crtc_state *crtc_state; 116 + struct drm_encoder *encoder; 117 + u32 encoder_mask = 0; 118 + int i; 119 + 120 + for_each_old_crtc_in_state(state, crtc, crtc_state, i) { 121 + encoder_mask |= crtc_state->encoder_mask; 122 + encoder_mask |= crtc->state->encoder_mask; 123 + } 124 + 125 + drm_for_each_encoder_mask(encoder, state->dev, encoder_mask) 126 + rockchip_drm_psr_inhibit_get(encoder); 127 + } 128 + EXPORT_SYMBOL(rockchip_drm_psr_inhibit_get_state); 129 + 130 + void rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state) 131 + { 132 + struct drm_crtc *crtc; 133 + struct drm_crtc_state *crtc_state; 134 + struct drm_encoder *encoder; 135 + u32 encoder_mask = 0; 136 + int i; 137 + 138 + for_each_old_crtc_in_state(state, crtc, crtc_state, i) { 139 + encoder_mask |= crtc_state->encoder_mask; 140 + encoder_mask |= crtc->state->encoder_mask; 141 + } 142 + 143 + drm_for_each_encoder_mask(encoder, state->dev, encoder_mask) 144 + rockchip_drm_psr_inhibit_put(encoder); 145 + } 146 + EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put_state); 112 147 113 148 /** 114 149 * rockchip_drm_psr_inhibit_get - acquire PSR inhibit on given encoder
+3
drivers/gpu/drm/rockchip/rockchip_drm_psr.h
··· 20 20 int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder); 21 21 int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder); 22 22 23 + void rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state); 24 + void rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state); 25 + 23 26 int rockchip_drm_psr_register(struct drm_encoder *encoder, 24 27 int (*psr_set)(struct drm_encoder *, bool enable)); 25 28 void rockchip_drm_psr_unregister(struct drm_encoder *encoder);
+153 -6
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
··· 15 15 #include <drm/drm.h> 16 16 #include <drm/drmP.h> 17 17 #include <drm/drm_atomic.h> 18 + #include <drm/drm_atomic_uapi.h> 18 19 #include <drm/drm_crtc.h> 19 20 #include <drm/drm_crtc_helper.h> 20 21 #include <drm/drm_flip_work.h> 22 + #include <drm/drm_gem_framebuffer_helper.h> 21 23 #include <drm/drm_plane_helper.h> 22 24 #ifdef CONFIG_DRM_ANALOGIX_DP 23 25 #include <drm/bridge/analogix_dp.h> ··· 46 44 #include "rockchip_drm_vop.h" 47 45 #include "rockchip_rgb.h" 48 46 49 - #define VOP_WIN_SET(x, win, name, v) \ 47 + #define VOP_WIN_SET(vop, win, name, v) \ 50 48 vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name) 51 - #define VOP_SCL_SET(x, win, name, v) \ 49 + #define VOP_SCL_SET(vop, win, name, v) \ 52 50 vop_reg_set(vop, &win->phy->scl->name, win->base, ~0, v, #name) 53 - #define VOP_SCL_SET_EXT(x, win, name, v) \ 51 + #define VOP_SCL_SET_EXT(vop, win, name, v) \ 54 52 vop_reg_set(vop, &win->phy->scl->ext->name, \ 55 53 win->base, ~0, v, #name) 54 + 55 + #define VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, name, v) \ 56 + do { \ 57 + if (win_yuv2yuv && win_yuv2yuv->name.mask) \ 58 + vop_reg_set(vop, &win_yuv2yuv->name, 0, ~0, v, #name); \ 59 + } while (0) 60 + 61 + #define VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop, win_yuv2yuv, name, v) \ 62 + do { \ 63 + if (win_yuv2yuv && win_yuv2yuv->phy->name.mask) \ 64 + vop_reg_set(vop, &win_yuv2yuv->phy->name, win_yuv2yuv->base, ~0, v, #name); \ 65 + } while (0) 56 66 57 67 #define VOP_INTR_SET_MASK(vop, name, mask, v) \ 58 68 vop_reg_set(vop, &vop->data->intr->name, 0, mask, v, #name) ··· 86 72 #define VOP_INTR_GET_TYPE(vop, name, type) \ 87 73 vop_get_intr_type(vop, &vop->data->intr->name, type) 88 74 89 - #define VOP_WIN_GET(x, win, name) \ 90 - vop_read_reg(x, win->offset, win->phy->name) 75 + #define VOP_WIN_GET(vop, win, name) \ 76 + vop_read_reg(vop, win->offset, win->phy->name) 77 + 78 + #define VOP_WIN_HAS_REG(win, name) \ 79 + (!!(win->phy->name.mask)) 91 80 92 81 #define VOP_WIN_GET_YRGBADDR(vop, win) \ 93 82 vop_readl(vop, win->base + win->phy->yrgb_mst.offset) ··· 101 84 #define to_vop(x) container_of(x, struct vop, crtc) 102 85 #define to_vop_win(x) container_of(x, struct vop_win, base) 103 86 87 + /* 88 + * The coefficients of the following matrix are all fixed points. 89 + * The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets. 90 + * They are all represented in two's complement. 91 + */ 92 + static const uint32_t bt601_yuv2rgb[] = { 93 + 0x4A8, 0x0, 0x662, 94 + 0x4A8, 0x1E6F, 0x1CBF, 95 + 0x4A8, 0x812, 0x0, 96 + 0x321168, 0x0877CF, 0x2EB127 97 + }; 98 + 104 99 enum vop_pending { 105 100 VOP_PENDING_FB_UNREF, 106 101 }; ··· 120 91 struct vop_win { 121 92 struct drm_plane base; 122 93 const struct vop_win_data *data; 94 + const struct vop_win_yuv2yuv_data *yuv2yuv_data; 123 95 struct vop *vop; 124 96 }; 125 97 ··· 715 685 return -EINVAL; 716 686 } 717 687 688 + if (fb->format->is_yuv && state->rotation & DRM_MODE_REFLECT_Y) { 689 + DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n"); 690 + return -EINVAL; 691 + } 692 + 718 693 return 0; 719 694 } 720 695 ··· 747 712 struct drm_crtc *crtc = state->crtc; 748 713 struct vop_win *vop_win = to_vop_win(plane); 749 714 const struct vop_win_data *win = vop_win->data; 715 + const struct vop_win_yuv2yuv_data *win_yuv2yuv = vop_win->yuv2yuv_data; 750 716 struct vop *vop = to_vop(state->crtc); 751 717 struct drm_framebuffer *fb = state->fb; 752 718 unsigned int actual_w, actual_h; ··· 763 727 bool rb_swap; 764 728 int win_index = VOP_WIN_TO_INDEX(vop_win); 765 729 int format; 730 + int is_yuv = fb->format->is_yuv; 731 + int i; 766 732 767 733 /* 768 734 * can't update plane when vop is disabled. ··· 798 760 offset += (src->y1 >> 16) * fb->pitches[0]; 799 761 dma_addr = rk_obj->dma_addr + offset + fb->offsets[0]; 800 762 763 + /* 764 + * For y-mirroring we need to move address 765 + * to the beginning of the last line. 766 + */ 767 + if (state->rotation & DRM_MODE_REFLECT_Y) 768 + dma_addr += (actual_h - 1) * fb->pitches[0]; 769 + 801 770 format = vop_convert_format(fb->format->format); 802 771 803 772 spin_lock(&vop->reg_lock); ··· 812 767 VOP_WIN_SET(vop, win, format, format); 813 768 VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4)); 814 769 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); 815 - if (fb->format->is_yuv) { 770 + VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, y2r_en, is_yuv); 771 + VOP_WIN_SET(vop, win, y_mir_en, 772 + (state->rotation & DRM_MODE_REFLECT_Y) ? 1 : 0); 773 + VOP_WIN_SET(vop, win, x_mir_en, 774 + (state->rotation & DRM_MODE_REFLECT_X) ? 1 : 0); 775 + 776 + if (is_yuv) { 816 777 int hsub = drm_format_horz_chroma_subsampling(fb->format->format); 817 778 int vsub = drm_format_vert_chroma_subsampling(fb->format->format); 818 779 int bpp = fb->format->cpp[1]; ··· 832 781 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; 833 782 VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4)); 834 783 VOP_WIN_SET(vop, win, uv_mst, dma_addr); 784 + 785 + for (i = 0; i < NUM_YUV2YUV_COEFFICIENTS; i++) { 786 + VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop, 787 + win_yuv2yuv, 788 + y2r_coefficients[i], 789 + bt601_yuv2rgb[i]); 790 + } 835 791 } 836 792 837 793 if (win->phy->scl) ··· 877 819 spin_unlock(&vop->reg_lock); 878 820 } 879 821 822 + static int vop_plane_atomic_async_check(struct drm_plane *plane, 823 + struct drm_plane_state *state) 824 + { 825 + struct vop_win *vop_win = to_vop_win(plane); 826 + const struct vop_win_data *win = vop_win->data; 827 + int min_scale = win->phy->scl ? FRAC_16_16(1, 8) : 828 + DRM_PLANE_HELPER_NO_SCALING; 829 + int max_scale = win->phy->scl ? FRAC_16_16(8, 1) : 830 + DRM_PLANE_HELPER_NO_SCALING; 831 + struct drm_crtc_state *crtc_state; 832 + 833 + if (plane != state->crtc->cursor) 834 + return -EINVAL; 835 + 836 + if (!plane->state) 837 + return -EINVAL; 838 + 839 + if (!plane->state->fb) 840 + return -EINVAL; 841 + 842 + if (state->state) 843 + crtc_state = drm_atomic_get_existing_crtc_state(state->state, 844 + state->crtc); 845 + else /* Special case for asynchronous cursor updates. */ 846 + crtc_state = plane->crtc->state; 847 + 848 + return drm_atomic_helper_check_plane_state(plane->state, crtc_state, 849 + min_scale, max_scale, 850 + true, true); 851 + } 852 + 853 + static void vop_plane_atomic_async_update(struct drm_plane *plane, 854 + struct drm_plane_state *new_state) 855 + { 856 + struct vop *vop = to_vop(plane->state->crtc); 857 + struct drm_plane_state *plane_state; 858 + 859 + plane_state = plane->funcs->atomic_duplicate_state(plane); 860 + plane_state->crtc_x = new_state->crtc_x; 861 + plane_state->crtc_y = new_state->crtc_y; 862 + plane_state->crtc_h = new_state->crtc_h; 863 + plane_state->crtc_w = new_state->crtc_w; 864 + plane_state->src_x = new_state->src_x; 865 + plane_state->src_y = new_state->src_y; 866 + plane_state->src_h = new_state->src_h; 867 + plane_state->src_w = new_state->src_w; 868 + 869 + if (plane_state->fb != new_state->fb) 870 + drm_atomic_set_fb_for_plane(plane_state, new_state->fb); 871 + 872 + swap(plane_state, plane->state); 873 + 874 + if (plane->state->fb && plane->state->fb != new_state->fb) { 875 + drm_framebuffer_get(plane->state->fb); 876 + WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0); 877 + drm_flip_work_queue(&vop->fb_unref_work, plane->state->fb); 878 + set_bit(VOP_PENDING_FB_UNREF, &vop->pending); 879 + } 880 + 881 + if (vop->is_enabled) { 882 + rockchip_drm_psr_inhibit_get_state(new_state->state); 883 + vop_plane_atomic_update(plane, plane->state); 884 + spin_lock(&vop->reg_lock); 885 + vop_cfg_done(vop); 886 + spin_unlock(&vop->reg_lock); 887 + rockchip_drm_psr_inhibit_put_state(new_state->state); 888 + } 889 + 890 + plane->funcs->atomic_destroy_state(plane, plane_state); 891 + } 892 + 880 893 static const struct drm_plane_helper_funcs plane_helper_funcs = { 881 894 .atomic_check = vop_plane_atomic_check, 882 895 .atomic_update = vop_plane_atomic_update, 883 896 .atomic_disable = vop_plane_atomic_disable, 897 + .atomic_async_check = vop_plane_atomic_async_check, 898 + .atomic_async_update = vop_plane_atomic_async_update, 899 + .prepare_fb = drm_gem_fb_prepare_fb, 884 900 }; 885 901 886 902 static const struct drm_plane_funcs vop_plane_funcs = { ··· 1404 1272 return ret; 1405 1273 } 1406 1274 1275 + static void vop_plane_add_properties(struct drm_plane *plane, 1276 + const struct vop_win_data *win_data) 1277 + { 1278 + unsigned int flags = 0; 1279 + 1280 + flags |= VOP_WIN_HAS_REG(win_data, x_mir_en) ? DRM_MODE_REFLECT_X : 0; 1281 + flags |= VOP_WIN_HAS_REG(win_data, y_mir_en) ? DRM_MODE_REFLECT_Y : 0; 1282 + if (flags) 1283 + drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 1284 + DRM_MODE_ROTATE_0 | flags); 1285 + } 1286 + 1407 1287 static int vop_create_crtc(struct vop *vop) 1408 1288 { 1409 1289 const struct vop_data *vop_data = vop->data; ··· 1453 1309 1454 1310 plane = &vop_win->base; 1455 1311 drm_plane_helper_add(plane, &plane_helper_funcs); 1312 + vop_plane_add_properties(plane, win_data); 1456 1313 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 1457 1314 primary = plane; 1458 1315 else if (plane->type == DRM_PLANE_TYPE_CURSOR) ··· 1491 1346 goto err_cleanup_crtc; 1492 1347 } 1493 1348 drm_plane_helper_add(&vop_win->base, &plane_helper_funcs); 1349 + vop_plane_add_properties(&vop_win->base, win_data); 1494 1350 } 1495 1351 1496 1352 port = of_get_child_by_name(dev->of_node, "port"); ··· 1675 1529 1676 1530 vop_win->data = win_data; 1677 1531 vop_win->vop = vop; 1532 + vop_win->yuv2yuv_data = &vop_data->win_yuv2yuv[i]; 1678 1533 } 1679 1534 } 1680 1535
+15
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
··· 23 23 #define VOP_MAJOR(version) ((version) >> 8) 24 24 #define VOP_MINOR(version) ((version) & 0xff) 25 25 26 + #define NUM_YUV2YUV_COEFFICIENTS 12 27 + 26 28 enum vop_data_format { 27 29 VOP_FMT_ARGB8888 = 0, 28 30 VOP_FMT_RGB888, ··· 126 124 struct vop_reg scale_cbcr_y; 127 125 }; 128 126 127 + struct vop_yuv2yuv_phy { 128 + struct vop_reg y2r_coefficients[NUM_YUV2YUV_COEFFICIENTS]; 129 + }; 130 + 129 131 struct vop_win_phy { 130 132 const struct vop_scl_regs *scl; 131 133 const uint32_t *data_formats; ··· 146 140 struct vop_reg uv_mst; 147 141 struct vop_reg yrgb_vir; 148 142 struct vop_reg uv_vir; 143 + struct vop_reg y_mir_en; 144 + struct vop_reg x_mir_en; 149 145 150 146 struct vop_reg dst_alpha_ctl; 151 147 struct vop_reg src_alpha_ctl; 152 148 struct vop_reg channel; 149 + }; 150 + 151 + struct vop_win_yuv2yuv_data { 152 + uint32_t base; 153 + const struct vop_yuv2yuv_phy *phy; 154 + struct vop_reg y2r_en; 153 155 }; 154 156 155 157 struct vop_win_data { ··· 173 159 const struct vop_misc *misc; 174 160 const struct vop_modeset *modeset; 175 161 const struct vop_output *output; 162 + const struct vop_win_yuv2yuv_data *win_yuv2yuv; 176 163 const struct vop_win_data *win; 177 164 unsigned int win_size; 178 165
+174 -6
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
··· 299 299 .win_size = ARRAY_SIZE(px30_vop_lit_win_data), 300 300 }; 301 301 302 + static const struct vop_scl_regs rk3066_win_scl = { 303 + .scale_yrgb_x = VOP_REG(RK3066_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0), 304 + .scale_yrgb_y = VOP_REG(RK3066_WIN0_SCL_FACTOR_YRGB, 0xffff, 16), 305 + .scale_cbcr_x = VOP_REG(RK3066_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0), 306 + .scale_cbcr_y = VOP_REG(RK3066_WIN0_SCL_FACTOR_CBR, 0xffff, 16), 307 + }; 308 + 309 + static const struct vop_win_phy rk3066_win0_data = { 310 + .scl = &rk3066_win_scl, 311 + .data_formats = formats_win_full, 312 + .nformats = ARRAY_SIZE(formats_win_full), 313 + .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0), 314 + .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4), 315 + .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19), 316 + .act_info = VOP_REG(RK3066_WIN0_ACT_INFO, 0x1fff1fff, 0), 317 + .dsp_info = VOP_REG(RK3066_WIN0_DSP_INFO, 0x0fff0fff, 0), 318 + .dsp_st = VOP_REG(RK3066_WIN0_DSP_ST, 0x1fff1fff, 0), 319 + .yrgb_mst = VOP_REG(RK3066_WIN0_YRGB_MST0, 0xffffffff, 0), 320 + .uv_mst = VOP_REG(RK3066_WIN0_CBR_MST0, 0xffffffff, 0), 321 + .yrgb_vir = VOP_REG(RK3066_WIN0_VIR, 0xffff, 0), 322 + .uv_vir = VOP_REG(RK3066_WIN0_VIR, 0x1fff, 16), 323 + }; 324 + 325 + static const struct vop_win_phy rk3066_win1_data = { 326 + .scl = &rk3066_win_scl, 327 + .data_formats = formats_win_full, 328 + .nformats = ARRAY_SIZE(formats_win_full), 329 + .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1), 330 + .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7), 331 + .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23), 332 + .act_info = VOP_REG(RK3066_WIN1_ACT_INFO, 0x1fff1fff, 0), 333 + .dsp_info = VOP_REG(RK3066_WIN1_DSP_INFO, 0x0fff0fff, 0), 334 + .dsp_st = VOP_REG(RK3066_WIN1_DSP_ST, 0x1fff1fff, 0), 335 + .yrgb_mst = VOP_REG(RK3066_WIN1_YRGB_MST, 0xffffffff, 0), 336 + .uv_mst = VOP_REG(RK3066_WIN1_CBR_MST, 0xffffffff, 0), 337 + .yrgb_vir = VOP_REG(RK3066_WIN1_VIR, 0xffff, 0), 338 + .uv_vir = VOP_REG(RK3066_WIN1_VIR, 0x1fff, 16), 339 + }; 340 + 341 + static const struct vop_win_phy rk3066_win2_data = { 342 + .data_formats = formats_win_lite, 343 + .nformats = ARRAY_SIZE(formats_win_lite), 344 + .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2), 345 + .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10), 346 + .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27), 347 + .dsp_info = VOP_REG(RK3066_WIN2_DSP_INFO, 0x0fff0fff, 0), 348 + .dsp_st = VOP_REG(RK3066_WIN2_DSP_ST, 0x1fff1fff, 0), 349 + .yrgb_mst = VOP_REG(RK3066_WIN2_MST, 0xffffffff, 0), 350 + .yrgb_vir = VOP_REG(RK3066_WIN2_VIR, 0xffff, 0), 351 + }; 352 + 353 + static const struct vop_modeset rk3066_modeset = { 354 + .htotal_pw = VOP_REG(RK3066_DSP_HTOTAL_HS_END, 0x1fff1fff, 0), 355 + .hact_st_end = VOP_REG(RK3066_DSP_HACT_ST_END, 0x1fff1fff, 0), 356 + .vtotal_pw = VOP_REG(RK3066_DSP_VTOTAL_VS_END, 0x1fff1fff, 0), 357 + .vact_st_end = VOP_REG(RK3066_DSP_VACT_ST_END, 0x1fff1fff, 0), 358 + }; 359 + 360 + static const struct vop_output rk3066_output = { 361 + .pin_pol = VOP_REG(RK3066_DSP_CTRL0, 0x7, 4), 362 + }; 363 + 364 + static const struct vop_common rk3066_common = { 365 + .standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1), 366 + .out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0), 367 + .cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0), 368 + .dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24), 369 + }; 370 + 371 + static const struct vop_win_data rk3066_vop_win_data[] = { 372 + { .base = 0x00, .phy = &rk3066_win0_data, 373 + .type = DRM_PLANE_TYPE_PRIMARY }, 374 + { .base = 0x00, .phy = &rk3066_win1_data, 375 + .type = DRM_PLANE_TYPE_OVERLAY }, 376 + { .base = 0x00, .phy = &rk3066_win2_data, 377 + .type = DRM_PLANE_TYPE_CURSOR }, 378 + }; 379 + 380 + static const int rk3066_vop_intrs[] = { 381 + /* 382 + * hs_start interrupt fires at frame-start, so serves 383 + * the same purpose as dsp_hold in the driver. 384 + */ 385 + DSP_HOLD_VALID_INTR, 386 + FS_INTR, 387 + LINE_FLAG_INTR, 388 + BUS_ERROR_INTR, 389 + }; 390 + 391 + static const struct vop_intr rk3066_intr = { 392 + .intrs = rk3066_vop_intrs, 393 + .nintrs = ARRAY_SIZE(rk3066_vop_intrs), 394 + .line_flag_num[0] = VOP_REG(RK3066_INT_STATUS, 0xfff, 12), 395 + .status = VOP_REG(RK3066_INT_STATUS, 0xf, 0), 396 + .enable = VOP_REG(RK3066_INT_STATUS, 0xf, 4), 397 + .clear = VOP_REG(RK3066_INT_STATUS, 0xf, 8), 398 + }; 399 + 400 + static const struct vop_data rk3066_vop = { 401 + .version = VOP_VERSION(2, 1), 402 + .intr = &rk3066_intr, 403 + .common = &rk3066_common, 404 + .modeset = &rk3066_modeset, 405 + .output = &rk3066_output, 406 + .win = rk3066_vop_win_data, 407 + .win_size = ARRAY_SIZE(rk3066_vop_win_data), 408 + }; 409 + 302 410 static const struct vop_scl_regs rk3188_win_scl = { 303 411 .scale_yrgb_x = VOP_REG(RK3188_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0), 304 412 .scale_yrgb_y = VOP_REG(RK3188_WIN0_SCL_FACTOR_YRGB, 0xffff, 16), ··· 658 550 .clear = VOP_REG_MASK_SYNC(RK3368_INTR_CLEAR, 0x3fff, 0), 659 551 }; 660 552 553 + static const struct vop_win_phy rk3368_win01_data = { 554 + .scl = &rk3288_win_full_scl, 555 + .data_formats = formats_win_full, 556 + .nformats = ARRAY_SIZE(formats_win_full), 557 + .enable = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 0), 558 + .format = VOP_REG(RK3368_WIN0_CTRL0, 0x7, 1), 559 + .rb_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 12), 560 + .x_mir_en = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 21), 561 + .y_mir_en = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 22), 562 + .act_info = VOP_REG(RK3368_WIN0_ACT_INFO, 0x1fff1fff, 0), 563 + .dsp_info = VOP_REG(RK3368_WIN0_DSP_INFO, 0x0fff0fff, 0), 564 + .dsp_st = VOP_REG(RK3368_WIN0_DSP_ST, 0x1fff1fff, 0), 565 + .yrgb_mst = VOP_REG(RK3368_WIN0_YRGB_MST, 0xffffffff, 0), 566 + .uv_mst = VOP_REG(RK3368_WIN0_CBR_MST, 0xffffffff, 0), 567 + .yrgb_vir = VOP_REG(RK3368_WIN0_VIR, 0x3fff, 0), 568 + .uv_vir = VOP_REG(RK3368_WIN0_VIR, 0x3fff, 16), 569 + .src_alpha_ctl = VOP_REG(RK3368_WIN0_SRC_ALPHA_CTRL, 0xff, 0), 570 + .dst_alpha_ctl = VOP_REG(RK3368_WIN0_DST_ALPHA_CTRL, 0xff, 0), 571 + .channel = VOP_REG(RK3368_WIN0_CTRL2, 0xff, 0), 572 + }; 573 + 661 574 static const struct vop_win_phy rk3368_win23_data = { 662 575 .data_formats = formats_win_lite, 663 576 .nformats = ARRAY_SIZE(formats_win_lite), ··· 686 557 .enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4), 687 558 .format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5), 688 559 .rb_swap = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 20), 560 + .y_mir_en = VOP_REG(RK3368_WIN2_CTRL1, 0x1, 15), 689 561 .dsp_info = VOP_REG(RK3368_WIN2_DSP_INFO0, 0x0fff0fff, 0), 690 562 .dsp_st = VOP_REG(RK3368_WIN2_DSP_ST0, 0x1fff1fff, 0), 691 563 .yrgb_mst = VOP_REG(RK3368_WIN2_MST0, 0xffffffff, 0), ··· 696 566 }; 697 567 698 568 static const struct vop_win_data rk3368_vop_win_data[] = { 699 - { .base = 0x00, .phy = &rk3288_win01_data, 569 + { .base = 0x00, .phy = &rk3368_win01_data, 700 570 .type = DRM_PLANE_TYPE_PRIMARY }, 701 - { .base = 0x40, .phy = &rk3288_win01_data, 571 + { .base = 0x40, .phy = &rk3368_win01_data, 702 572 .type = DRM_PLANE_TYPE_OVERLAY }, 703 573 { .base = 0x00, .phy = &rk3368_win23_data, 704 574 .type = DRM_PLANE_TYPE_OVERLAY }, ··· 767 637 .mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3), 768 638 }; 769 639 640 + static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win01_data = { 641 + .y2r_coefficients = { 642 + VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 0), 643 + VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 16), 644 + VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 4, 0xffff, 0), 645 + VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 4, 0xffff, 16), 646 + VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 8, 0xffff, 0), 647 + VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 8, 0xffff, 16), 648 + VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 12, 0xffff, 0), 649 + VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 12, 0xffff, 16), 650 + VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 16, 0xffff, 0), 651 + VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 20, 0xffffffff, 0), 652 + VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 24, 0xffffffff, 0), 653 + VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 28, 0xffffffff, 0), 654 + }, 655 + }; 656 + 657 + static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win23_data = { }; 658 + 659 + static const struct vop_win_yuv2yuv_data rk3399_vop_big_win_yuv2yuv_data[] = { 660 + { .base = 0x00, .phy = &rk3399_yuv2yuv_win01_data, 661 + .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 1) }, 662 + { .base = 0x60, .phy = &rk3399_yuv2yuv_win01_data, 663 + .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9) }, 664 + { .base = 0xC0, .phy = &rk3399_yuv2yuv_win23_data }, 665 + { .base = 0x120, .phy = &rk3399_yuv2yuv_win23_data }, 666 + }; 667 + 770 668 static const struct vop_data rk3399_vop_big = { 771 669 .version = VOP_VERSION(3, 5), 772 670 .feature = VOP_FEATURE_OUTPUT_RGB10, ··· 805 647 .misc = &rk3368_misc, 806 648 .win = rk3368_vop_win_data, 807 649 .win_size = ARRAY_SIZE(rk3368_vop_win_data), 650 + .win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data, 808 651 }; 809 652 810 653 static const struct vop_win_data rk3399_vop_lit_win_data[] = { 811 - { .base = 0x00, .phy = &rk3288_win01_data, 654 + { .base = 0x00, .phy = &rk3368_win01_data, 812 655 .type = DRM_PLANE_TYPE_PRIMARY }, 813 656 { .base = 0x00, .phy = &rk3368_win23_data, 814 657 .type = DRM_PLANE_TYPE_CURSOR}, 658 + }; 659 + 660 + static const struct vop_win_yuv2yuv_data rk3399_vop_lit_win_yuv2yuv_data[] = { 661 + { .base = 0x00, .phy = &rk3399_yuv2yuv_win01_data, 662 + .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 1)}, 663 + { .base = 0x60, .phy = &rk3399_yuv2yuv_win23_data }, 815 664 }; 816 665 817 666 static const struct vop_data rk3399_vop_lit = { ··· 830 665 .misc = &rk3368_misc, 831 666 .win = rk3399_vop_lit_win_data, 832 667 .win_size = ARRAY_SIZE(rk3399_vop_lit_win_data), 668 + .win_yuv2yuv = rk3399_vop_lit_win_yuv2yuv_data, 833 669 }; 834 670 835 671 static const struct vop_win_data rk3228_vop_win_data[] = { ··· 896 730 }; 897 731 898 732 static const struct vop_win_data rk3328_vop_win_data[] = { 899 - { .base = 0xd0, .phy = &rk3288_win01_data, 733 + { .base = 0xd0, .phy = &rk3368_win01_data, 900 734 .type = DRM_PLANE_TYPE_PRIMARY }, 901 - { .base = 0x1d0, .phy = &rk3288_win01_data, 735 + { .base = 0x1d0, .phy = &rk3368_win01_data, 902 736 .type = DRM_PLANE_TYPE_OVERLAY }, 903 - { .base = 0x2d0, .phy = &rk3288_win01_data, 737 + { .base = 0x2d0, .phy = &rk3368_win01_data, 904 738 .type = DRM_PLANE_TYPE_CURSOR }, 905 739 }; 906 740 ··· 925 759 .data = &px30_vop_big }, 926 760 { .compatible = "rockchip,px30-vop-lit", 927 761 .data = &px30_vop_lit }, 762 + { .compatible = "rockchip,rk3066-vop", 763 + .data = &rk3066_vop }, 928 764 { .compatible = "rockchip,rk3188-vop", 929 765 .data = &rk3188_vop }, 930 766 { .compatible = "rockchip,rk3288-vop",
+53
drivers/gpu/drm/rockchip/rockchip_vop_reg.h
··· 983 983 #define RK3188_REG_CFG_DONE 0x90 984 984 /* rk3188 register definition end */ 985 985 986 + /* rk3066 register definition */ 987 + #define RK3066_SYS_CTRL0 0x00 988 + #define RK3066_SYS_CTRL1 0x04 989 + #define RK3066_DSP_CTRL0 0x08 990 + #define RK3066_DSP_CTRL1 0x0c 991 + #define RK3066_INT_STATUS 0x10 992 + #define RK3066_MCU_CTRL 0x14 993 + #define RK3066_BLEND_CTRL 0x18 994 + #define RK3066_WIN0_COLOR_KEY_CTRL 0x1c 995 + #define RK3066_WIN1_COLOR_KEY_CTRL 0x20 996 + #define RK3066_WIN2_COLOR_KEY_CTRL 0x24 997 + #define RK3066_WIN0_YRGB_MST0 0x28 998 + #define RK3066_WIN0_CBR_MST0 0x2c 999 + #define RK3066_WIN0_YRGB_MST1 0x30 1000 + #define RK3066_WIN0_CBR_MST1 0x34 1001 + #define RK3066_WIN0_VIR 0x38 1002 + #define RK3066_WIN0_ACT_INFO 0x3c 1003 + #define RK3066_WIN0_DSP_INFO 0x40 1004 + #define RK3066_WIN0_DSP_ST 0x44 1005 + #define RK3066_WIN0_SCL_FACTOR_YRGB 0x48 1006 + #define RK3066_WIN0_SCL_FACTOR_CBR 0x4c 1007 + #define RK3066_WIN0_SCL_OFFSET 0x50 1008 + #define RK3066_WIN1_YRGB_MST 0x54 1009 + #define RK3066_WIN1_CBR_MST 0x58 1010 + #define RK3066_WIN1_VIR 0x5c 1011 + #define RK3066_WIN1_ACT_INFO 0x60 1012 + #define RK3066_WIN1_DSP_INFO 0x64 1013 + #define RK3066_WIN1_DSP_ST 0x68 1014 + #define RK3066_WIN1_SCL_FACTOR_YRGB 0x6c 1015 + #define RK3066_WIN1_SCL_FACTOR_CBR 0x70 1016 + #define RK3066_WIN1_SCL_OFFSET 0x74 1017 + #define RK3066_WIN2_MST 0x78 1018 + #define RK3066_WIN2_VIR 0x7c 1019 + #define RK3066_WIN2_DSP_INFO 0x80 1020 + #define RK3066_WIN2_DSP_ST 0x84 1021 + #define RK3066_HWC_MST 0x88 1022 + #define RK3066_HWC_DSP_ST 0x8c 1023 + #define RK3066_HWC_COLOR_LUT0 0x90 1024 + #define RK3066_HWC_COLOR_LUT1 0x94 1025 + #define RK3066_HWC_COLOR_LUT2 0x98 1026 + #define RK3066_DSP_HTOTAL_HS_END 0x9c 1027 + #define RK3066_DSP_HACT_ST_END 0xa0 1028 + #define RK3066_DSP_VTOTAL_VS_END 0xa4 1029 + #define RK3066_DSP_VACT_ST_END 0xa8 1030 + #define RK3066_DSP_VS_ST_END_F1 0xac 1031 + #define RK3066_DSP_VACT_ST_END_F1 0xb0 1032 + #define RK3066_REG_CFG_DONE 0xc0 1033 + #define RK3066_MCU_BYPASS_WPORT 0x100 1034 + #define RK3066_MCU_BYPASS_RPORT 0x200 1035 + #define RK3066_WIN2_LUT_ADDR 0x400 1036 + #define RK3066_DSP_LUT_ADDR 0x800 1037 + /* rk3066 register definition end */ 1038 + 986 1039 #endif /* _ROCKCHIP_VOP_REG_H */
+2 -2
drivers/gpu/drm/shmobile/shmob_drm_drv.c
··· 229 229 230 230 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 231 231 sdev->mmio = devm_ioremap_resource(&pdev->dev, res); 232 - if (sdev->mmio == NULL) 233 - return -ENOMEM; 232 + if (IS_ERR(sdev->mmio)) 233 + return PTR_ERR(sdev->mmio); 234 234 235 235 ret = shmob_drm_setup_clocks(sdev, pdata->clk_source); 236 236 if (ret < 0)
+3 -11
drivers/gpu/drm/sti/sti_crtc.c
··· 53 53 struct clk *compo_clk, *pix_clk; 54 54 int rate = mode->clock * 1000; 55 55 56 - DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n", 57 - crtc->base.id, sti_mixer_to_str(mixer), 58 - mode->base.id, mode->name); 56 + DRM_DEBUG_KMS("CRTC:%d (%s) mode: (%s)\n", 57 + crtc->base.id, sti_mixer_to_str(mixer), mode->name); 59 58 60 - DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", 61 - mode->vrefresh, mode->clock, 62 - mode->hdisplay, 63 - mode->hsync_start, mode->hsync_end, 64 - mode->htotal, 65 - mode->vdisplay, 66 - mode->vsync_start, mode->vsync_end, 67 - mode->vtotal, mode->type, mode->flags); 59 + DRM_DEBUG_KMS(DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 68 60 69 61 if (mixer->id == STI_MIXER_MAIN) { 70 62 compo_clk = compo->clk_compo_main;
+2 -2
drivers/gpu/drm/sti/sti_dvo.c
··· 277 277 } 278 278 279 279 static void sti_dvo_set_mode(struct drm_bridge *bridge, 280 - struct drm_display_mode *mode, 281 - struct drm_display_mode *adjusted_mode) 280 + const struct drm_display_mode *mode, 281 + const struct drm_display_mode *adjusted_mode) 282 282 { 283 283 struct sti_dvo *dvo = bridge->driver_private; 284 284 struct sti_mixer *mixer = to_sti_mixer(dvo->encoder->crtc);
+2 -2
drivers/gpu/drm/sti/sti_hda.c
··· 508 508 } 509 509 510 510 static void sti_hda_set_mode(struct drm_bridge *bridge, 511 - struct drm_display_mode *mode, 512 - struct drm_display_mode *adjusted_mode) 511 + const struct drm_display_mode *mode, 512 + const struct drm_display_mode *adjusted_mode) 513 513 { 514 514 struct sti_hda *hda = bridge->driver_private; 515 515 u32 mode_idx;
+4 -3
drivers/gpu/drm/sti/sti_hdmi.c
··· 434 434 435 435 DRM_DEBUG_DRIVER("\n"); 436 436 437 - ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, mode, false); 437 + ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, 438 + hdmi->drm_connector, mode); 438 439 if (ret < 0) { 439 440 DRM_ERROR("failed to setup AVI infoframe: %d\n", ret); 440 441 return ret; ··· 918 917 } 919 918 920 919 static void sti_hdmi_set_mode(struct drm_bridge *bridge, 921 - struct drm_display_mode *mode, 922 - struct drm_display_mode *adjusted_mode) 920 + const struct drm_display_mode *mode, 921 + const struct drm_display_mode *adjusted_mode) 923 922 { 924 923 struct sti_hdmi *hdmi = bridge->driver_private; 925 924 int ret;
+1 -1
drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
··· 215 215 } 216 216 217 217 static int 218 - dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode, 218 + dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode, 219 219 unsigned long mode_flags, u32 lanes, u32 format, 220 220 unsigned int *lane_mbps) 221 221 {
+2 -1
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
··· 52 52 u8 buffer[17]; 53 53 int i, ret; 54 54 55 - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); 55 + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, 56 + &hdmi->connector, mode); 56 57 if (ret < 0) { 57 58 DRM_ERROR("Failed to get infoframes from mode\n"); 58 59 return ret;
+2 -1
drivers/gpu/drm/tegra/hdmi.c
··· 741 741 u8 buffer[17]; 742 742 ssize_t err; 743 743 744 - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); 744 + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, 745 + &hdmi->output.connector, mode); 745 746 if (err < 0) { 746 747 dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err); 747 748 return;
+1 -1
drivers/gpu/drm/tegra/hub.c
··· 716 716 if (!state) 717 717 return -ENOMEM; 718 718 719 - drm_atomic_private_obj_init(&hub->base, &state->base, 719 + drm_atomic_private_obj_init(drm, &hub->base, &state->base, 720 720 &tegra_display_hub_state_funcs); 721 721 722 722 tegra->hub = hub;
+2 -1
drivers/gpu/drm/tegra/sor.c
··· 2116 2116 value &= ~INFOFRAME_CTRL_ENABLE; 2117 2117 tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL); 2118 2118 2119 - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); 2119 + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, 2120 + &sor->output.connector, mode); 2120 2121 if (err < 0) { 2121 2122 dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err); 2122 2123 return err;
+17 -17
drivers/gpu/drm/tilcdc/tilcdc_drv.c
··· 183 183 { 184 184 struct tilcdc_drm_private *priv = dev->dev_private; 185 185 186 + #ifdef CONFIG_CPU_FREQ 187 + if (priv->freq_transition.notifier_call) 188 + cpufreq_unregister_notifier(&priv->freq_transition, 189 + CPUFREQ_TRANSITION_NOTIFIER); 190 + #endif 191 + 186 192 if (priv->crtc) 187 193 tilcdc_crtc_shutdown(priv->crtc); 188 194 ··· 199 193 drm_irq_uninstall(dev); 200 194 drm_mode_config_cleanup(dev); 201 195 tilcdc_remove_external_device(dev); 202 - 203 - #ifdef CONFIG_CPU_FREQ 204 - if (priv->freq_transition.notifier_call) 205 - cpufreq_unregister_notifier(&priv->freq_transition, 206 - CPUFREQ_TRANSITION_NOTIFIER); 207 - #endif 208 196 209 197 if (priv->clk) 210 198 clk_put(priv->clk); ··· 269 269 ret = -ENODEV; 270 270 goto init_failed; 271 271 } 272 - 273 - #ifdef CONFIG_CPU_FREQ 274 - priv->freq_transition.notifier_call = cpufreq_transition; 275 - ret = cpufreq_register_notifier(&priv->freq_transition, 276 - CPUFREQ_TRANSITION_NOTIFIER); 277 - if (ret) { 278 - dev_err(dev, "failed to register cpufreq notifier\n"); 279 - priv->freq_transition.notifier_call = NULL; 280 - goto init_failed; 281 - } 282 - #endif 283 272 284 273 if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth)) 285 274 priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH; ··· 345 356 goto init_failed; 346 357 } 347 358 modeset_init(ddev); 359 + 360 + #ifdef CONFIG_CPU_FREQ 361 + priv->freq_transition.notifier_call = cpufreq_transition; 362 + ret = cpufreq_register_notifier(&priv->freq_transition, 363 + CPUFREQ_TRANSITION_NOTIFIER); 364 + if (ret) { 365 + dev_err(dev, "failed to register cpufreq notifier\n"); 366 + priv->freq_transition.notifier_call = NULL; 367 + goto init_failed; 368 + } 369 + #endif 348 370 349 371 if (priv->is_componentized) { 350 372 ret = component_bind_all(dev, ddev);
+3
drivers/gpu/drm/tinydrm/core/tinydrm-core.c
··· 10 10 #include <drm/drm_atomic.h> 11 11 #include <drm/drm_atomic_helper.h> 12 12 #include <drm/drm_crtc_helper.h> 13 + #include <drm/drm_drv.h> 13 14 #include <drm/drm_fb_helper.h> 14 15 #include <drm/drm_gem_framebuffer_helper.h> 16 + #include <drm/drm_print.h> 15 17 #include <drm/tinydrm/tinydrm.h> 16 18 #include <linux/device.h> 17 19 #include <linux/dma-buf.h> 20 + #include <linux/module.h> 18 21 19 22 /** 20 23 * DOC: overview
+3
drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
··· 9 9 10 10 #include <drm/drm_atomic_helper.h> 11 11 #include <drm/drm_crtc_helper.h> 12 + #include <drm/drm_drv.h> 12 13 #include <drm/drm_gem_framebuffer_helper.h> 13 14 #include <drm/drm_modes.h> 15 + #include <drm/drm_print.h> 16 + #include <drm/drm_vblank.h> 14 17 #include <drm/tinydrm/tinydrm.h> 15 18 16 19 struct tinydrm_connector {
+1
drivers/gpu/drm/tinydrm/hx8357d.c
··· 16 16 #include <linux/property.h> 17 17 #include <linux/spi/spi.h> 18 18 19 + #include <drm/drm_drv.h> 19 20 #include <drm/drm_gem_cma_helper.h> 20 21 #include <drm/drm_gem_framebuffer_helper.h> 21 22 #include <drm/drm_modeset_helper.h>
+2
drivers/gpu/drm/tinydrm/ili9225.c
··· 20 20 #include <linux/spi/spi.h> 21 21 #include <video/mipi_display.h> 22 22 23 + #include <drm/drm_drv.h> 23 24 #include <drm/drm_fb_cma_helper.h> 25 + #include <drm/drm_fourcc.h> 24 26 #include <drm/drm_gem_cma_helper.h> 25 27 #include <drm/drm_gem_framebuffer_helper.h> 26 28 #include <drm/tinydrm/mipi-dbi.h>
+1
drivers/gpu/drm/tinydrm/ili9341.c
··· 15 15 #include <linux/property.h> 16 16 #include <linux/spi/spi.h> 17 17 18 + #include <drm/drm_drv.h> 18 19 #include <drm/drm_gem_cma_helper.h> 19 20 #include <drm/drm_gem_framebuffer_helper.h> 20 21 #include <drm/drm_modeset_helper.h>
+1
drivers/gpu/drm/tinydrm/mi0283qt.c
··· 17 17 #include <linux/regulator/consumer.h> 18 18 #include <linux/spi/spi.h> 19 19 20 + #include <drm/drm_drv.h> 20 21 #include <drm/drm_gem_cma_helper.h> 21 22 #include <drm/drm_gem_framebuffer_helper.h> 22 23 #include <drm/drm_modeset_helper.h>
+3
drivers/gpu/drm/tinydrm/mipi-dbi.c
··· 10 10 */ 11 11 12 12 #include <linux/debugfs.h> 13 + #include <linux/delay.h> 13 14 #include <linux/dma-buf.h> 14 15 #include <linux/gpio/consumer.h> 15 16 #include <linux/module.h> 16 17 #include <linux/regulator/consumer.h> 17 18 #include <linux/spi/spi.h> 18 19 20 + #include <drm/drm_drv.h> 19 21 #include <drm/drm_fb_cma_helper.h> 20 22 #include <drm/drm_gem_cma_helper.h> 23 + #include <drm/drm_fourcc.h> 21 24 #include <drm/drm_gem_framebuffer_helper.h> 22 25 #include <drm/tinydrm/mipi-dbi.h> 23 26 #include <drm/tinydrm/tinydrm-helpers.h>
+1
drivers/gpu/drm/tinydrm/repaper.c
··· 26 26 #include <linux/spi/spi.h> 27 27 #include <linux/thermal.h> 28 28 29 + #include <drm/drm_drv.h> 29 30 #include <drm/drm_fb_cma_helper.h> 30 31 #include <drm/drm_gem_cma_helper.h> 31 32 #include <drm/drm_gem_framebuffer_helper.h>
+1
drivers/gpu/drm/tinydrm/st7586.c
··· 17 17 #include <linux/spi/spi.h> 18 18 #include <video/mipi_display.h> 19 19 20 + #include <drm/drm_drv.h> 20 21 #include <drm/drm_fb_cma_helper.h> 21 22 #include <drm/drm_gem_cma_helper.h> 22 23 #include <drm/drm_gem_framebuffer_helper.h>
+1
drivers/gpu/drm/tinydrm/st7735r.c
··· 14 14 #include <linux/spi/spi.h> 15 15 #include <video/mipi_display.h> 16 16 17 + #include <drm/drm_drv.h> 17 18 #include <drm/drm_gem_cma_helper.h> 18 19 #include <drm/drm_gem_framebuffer_helper.h> 19 20 #include <drm/tinydrm/mipi-dbi.h>
-1
drivers/gpu/drm/v3d/v3d_drv.h
··· 308 308 void v3d_tfu_job_put(struct v3d_tfu_job *exec); 309 309 void v3d_reset(struct v3d_dev *v3d); 310 310 void v3d_invalidate_caches(struct v3d_dev *v3d); 311 - void v3d_flush_caches(struct v3d_dev *v3d); 312 311 313 312 /* v3d_irq.c */ 314 313 void v3d_irq_init(struct v3d_dev *v3d);
+18 -42
drivers/gpu/drm/v3d/v3d_gem.c
··· 130 130 } 131 131 } 132 132 133 - /* Invalidates the (read-only) L2 cache. */ 133 + /* Invalidates the (read-only) L2C cache. This was the L2 cache for 134 + * uniforms and instructions on V3D 3.2. 135 + */ 134 136 static void 135 - v3d_invalidate_l2(struct v3d_dev *v3d, int core) 137 + v3d_invalidate_l2c(struct v3d_dev *v3d, int core) 136 138 { 139 + if (v3d->ver > 32) 140 + return; 141 + 137 142 V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, 138 143 V3D_L2CACTL_L2CCLR | 139 144 V3D_L2CACTL_L2CENA); 140 - } 141 - 142 - static void 143 - v3d_invalidate_l1td(struct v3d_dev *v3d, int core) 144 - { 145 - V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF); 146 - if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & 147 - V3D_L2TCACTL_L2TFLS), 100)) { 148 - DRM_ERROR("Timeout waiting for L1T write combiner flush\n"); 149 - } 150 145 } 151 146 152 147 /* Invalidates texture L2 cachelines */ 153 148 static void 154 149 v3d_flush_l2t(struct v3d_dev *v3d, int core) 155 150 { 156 - v3d_invalidate_l1td(v3d, core); 157 - 151 + /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't 152 + * need to wait for completion before dispatching the job -- 153 + * L2T accesses will be stalled until the flush has completed. 154 + */ 158 155 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, 159 156 V3D_L2TCACTL_L2TFLS | 160 157 V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM)); 161 - if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & 162 - V3D_L2TCACTL_L2TFLS), 100)) { 163 - DRM_ERROR("Timeout waiting for L2T flush\n"); 164 - } 165 158 } 166 159 167 160 /* Invalidates the slice caches. These are read-only caches. */ ··· 168 175 V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC)); 169 176 } 170 177 171 - /* Invalidates texture L2 cachelines */ 172 - static void 173 - v3d_invalidate_l2t(struct v3d_dev *v3d, int core) 174 - { 175 - V3D_CORE_WRITE(core, 176 - V3D_CTL_L2TCACTL, 177 - V3D_L2TCACTL_L2TFLS | 178 - V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAR, V3D_L2TCACTL_FLM)); 179 - if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & 180 - V3D_L2TCACTL_L2TFLS), 100)) { 181 - DRM_ERROR("Timeout waiting for L2T invalidate\n"); 182 - } 183 - } 184 - 185 178 void 186 179 v3d_invalidate_caches(struct v3d_dev *v3d) 187 180 { 181 + /* Invalidate the caches from the outside in. That way if 182 + * another CL's concurrent use of nearby memory were to pull 183 + * an invalidated cacheline back in, we wouldn't leave stale 184 + * data in the inner cache. 185 + */ 188 186 v3d_flush_l3(v3d); 189 - 190 - v3d_invalidate_l2(v3d, 0); 191 - v3d_invalidate_slices(v3d, 0); 187 + v3d_invalidate_l2c(v3d, 0); 192 188 v3d_flush_l2t(v3d, 0); 193 - } 194 - 195 - void 196 - v3d_flush_caches(struct v3d_dev *v3d) 197 - { 198 - v3d_invalidate_l1td(v3d, 0); 199 - v3d_invalidate_l2t(v3d, 0); 189 + v3d_invalidate_slices(v3d, 0); 200 190 } 201 191 202 192 static void
+43
drivers/gpu/drm/vc4/vc4_crtc.c
··· 49 49 struct drm_mm_node mm; 50 50 bool feed_txp; 51 51 bool txp_armed; 52 + 53 + struct { 54 + unsigned int left; 55 + unsigned int right; 56 + unsigned int top; 57 + unsigned int bottom; 58 + } margins; 52 59 }; 53 60 54 61 static inline struct vc4_crtc_state * ··· 631 624 return MODE_OK; 632 625 } 633 626 627 + void vc4_crtc_get_margins(struct drm_crtc_state *state, 628 + unsigned int *left, unsigned int *right, 629 + unsigned int *top, unsigned int *bottom) 630 + { 631 + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); 632 + struct drm_connector_state *conn_state; 633 + struct drm_connector *conn; 634 + int i; 635 + 636 + *left = vc4_state->margins.left; 637 + *right = vc4_state->margins.right; 638 + *top = vc4_state->margins.top; 639 + *bottom = vc4_state->margins.bottom; 640 + 641 + /* We have to interate over all new connector states because 642 + * vc4_crtc_get_margins() might be called before 643 + * vc4_crtc_atomic_check() which means margins info in vc4_crtc_state 644 + * might be outdated. 645 + */ 646 + for_each_new_connector_in_state(state->state, conn, conn_state, i) { 647 + if (conn_state->crtc != state->crtc) 648 + continue; 649 + 650 + *left = conn_state->tv.margins.left; 651 + *right = conn_state->tv.margins.right; 652 + *top = conn_state->tv.margins.top; 653 + *bottom = conn_state->tv.margins.bottom; 654 + break; 655 + } 656 + } 657 + 634 658 static int vc4_crtc_atomic_check(struct drm_crtc *crtc, 635 659 struct drm_crtc_state *state) 636 660 { ··· 709 671 vc4_state->feed_txp = false; 710 672 } 711 673 674 + vc4_state->margins.left = conn_state->tv.margins.left; 675 + vc4_state->margins.right = conn_state->tv.margins.right; 676 + vc4_state->margins.top = conn_state->tv.margins.top; 677 + vc4_state->margins.bottom = conn_state->tv.margins.bottom; 712 678 break; 713 679 } 714 680 ··· 1014 972 1015 973 old_vc4_state = to_vc4_crtc_state(crtc->state); 1016 974 vc4_state->feed_txp = old_vc4_state->feed_txp; 975 + vc4_state->margins = old_vc4_state->margins; 1017 976 1018 977 __drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base); 1019 978 return &vc4_state->base;
+4
drivers/gpu/drm/vc4/vc4_drv.h
··· 9 9 #include <linux/mm_types.h> 10 10 #include <linux/reservation.h> 11 11 #include <drm/drmP.h> 12 + #include <drm/drm_util.h> 12 13 #include <drm/drm_encoder.h> 13 14 #include <drm/drm_gem_cma_helper.h> 14 15 #include <drm/drm_atomic.h> ··· 708 707 const struct drm_display_mode *mode); 709 708 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc); 710 709 void vc4_crtc_txp_armed(struct drm_crtc_state *state); 710 + void vc4_crtc_get_margins(struct drm_crtc_state *state, 711 + unsigned int *right, unsigned int *left, 712 + unsigned int *top, unsigned int *bottom); 711 713 712 714 /* vc4_debugfs.c */ 713 715 int vc4_debugfs_init(struct drm_minor *minor);
+21 -11
drivers/gpu/drm/vc4/vc4_hdmi.c
··· 109 109 struct vc4_encoder base; 110 110 bool hdmi_monitor; 111 111 bool limited_rgb_range; 112 - bool rgb_range_selectable; 113 112 }; 114 113 115 114 static inline struct vc4_hdmi_encoder * ··· 279 280 280 281 vc4_encoder->hdmi_monitor = drm_detect_hdmi_monitor(edid); 281 282 282 - if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 283 - vc4_encoder->rgb_range_selectable = 284 - drm_rgb_quant_range_selectable(edid); 285 - } 286 - 287 283 drm_connector_update_edid_property(connector, edid); 288 284 ret = drm_add_edid_modes(connector, edid); 289 285 kfree(edid); ··· 304 310 { 305 311 struct drm_connector *connector; 306 312 struct vc4_hdmi_connector *hdmi_connector; 313 + int ret; 307 314 308 315 hdmi_connector = devm_kzalloc(dev->dev, sizeof(*hdmi_connector), 309 316 GFP_KERNEL); ··· 317 322 drm_connector_init(dev, connector, &vc4_hdmi_connector_funcs, 318 323 DRM_MODE_CONNECTOR_HDMIA); 319 324 drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs); 325 + 326 + /* Create and attach TV margin props to this connector. */ 327 + ret = drm_mode_create_tv_margin_properties(dev); 328 + if (ret) 329 + return ERR_PTR(ret); 330 + 331 + drm_connector_attach_tv_margin_properties(connector); 320 332 321 333 connector->polled = (DRM_CONNECTOR_POLL_CONNECT | 322 334 DRM_CONNECTOR_POLL_DISCONNECT); ··· 410 408 static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) 411 409 { 412 410 struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); 411 + struct vc4_dev *vc4 = encoder->dev->dev_private; 412 + struct vc4_hdmi *hdmi = vc4->hdmi; 413 + struct drm_connector_state *cstate = hdmi->connector->state; 413 414 struct drm_crtc *crtc = encoder->crtc; 414 415 const struct drm_display_mode *mode = &crtc->state->adjusted_mode; 415 416 union hdmi_infoframe frame; 416 417 int ret; 417 418 418 - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false); 419 + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 420 + hdmi->connector, mode); 419 421 if (ret < 0) { 420 422 DRM_ERROR("couldn't fill AVI infoframe\n"); 421 423 return; 422 424 } 423 425 424 - drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode, 426 + drm_hdmi_avi_infoframe_quant_range(&frame.avi, 427 + hdmi->connector, mode, 425 428 vc4_encoder->limited_rgb_range ? 426 429 HDMI_QUANTIZATION_RANGE_LIMITED : 427 - HDMI_QUANTIZATION_RANGE_FULL, 428 - vc4_encoder->rgb_range_selectable, 429 - false); 430 + HDMI_QUANTIZATION_RANGE_FULL); 431 + 432 + frame.avi.right_bar = cstate->tv.margins.right; 433 + frame.avi.left_bar = cstate->tv.margins.left; 434 + frame.avi.top_bar = cstate->tv.margins.top; 435 + frame.avi.bottom_bar = cstate->tv.margins.bottom; 430 436 431 437 vc4_hdmi_write_infoframe(encoder, &frame); 432 438 }
+2 -1
drivers/gpu/drm/vc4/vc4_kms.c
··· 432 432 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); 433 433 if (!ctm_state) 434 434 return -ENOMEM; 435 - drm_atomic_private_obj_init(&vc4->ctm_manager, &ctm_state->base, 435 + 436 + drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base, 436 437 &vc4_ctm_state_funcs); 437 438 438 439 drm_mode_config_reset(dev);
+118 -27
drivers/gpu/drm/vc4/vc4_plane.c
··· 258 258 } 259 259 } 260 260 261 + static int vc4_plane_margins_adj(struct drm_plane_state *pstate) 262 + { 263 + struct vc4_plane_state *vc4_pstate = to_vc4_plane_state(pstate); 264 + unsigned int left, right, top, bottom, adjhdisplay, adjvdisplay; 265 + struct drm_crtc_state *crtc_state; 266 + 267 + crtc_state = drm_atomic_get_new_crtc_state(pstate->state, 268 + pstate->crtc); 269 + 270 + vc4_crtc_get_margins(crtc_state, &left, &right, &top, &bottom); 271 + if (!left && !right && !top && !bottom) 272 + return 0; 273 + 274 + if (left + right >= crtc_state->mode.hdisplay || 275 + top + bottom >= crtc_state->mode.vdisplay) 276 + return -EINVAL; 277 + 278 + adjhdisplay = crtc_state->mode.hdisplay - (left + right); 279 + vc4_pstate->crtc_x = DIV_ROUND_CLOSEST(vc4_pstate->crtc_x * 280 + adjhdisplay, 281 + crtc_state->mode.hdisplay); 282 + vc4_pstate->crtc_x += left; 283 + if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - left) 284 + vc4_pstate->crtc_x = crtc_state->mode.hdisplay - left; 285 + 286 + adjvdisplay = crtc_state->mode.vdisplay - (top + bottom); 287 + vc4_pstate->crtc_y = DIV_ROUND_CLOSEST(vc4_pstate->crtc_y * 288 + adjvdisplay, 289 + crtc_state->mode.vdisplay); 290 + vc4_pstate->crtc_y += top; 291 + if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - top) 292 + vc4_pstate->crtc_y = crtc_state->mode.vdisplay - top; 293 + 294 + vc4_pstate->crtc_w = DIV_ROUND_CLOSEST(vc4_pstate->crtc_w * 295 + adjhdisplay, 296 + crtc_state->mode.hdisplay); 297 + vc4_pstate->crtc_h = DIV_ROUND_CLOSEST(vc4_pstate->crtc_h * 298 + adjvdisplay, 299 + crtc_state->mode.vdisplay); 300 + 301 + if (!vc4_pstate->crtc_w || !vc4_pstate->crtc_h) 302 + return -EINVAL; 303 + 304 + return 0; 305 + } 306 + 261 307 static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) 262 308 { 263 309 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); ··· 351 305 vc4_state->crtc_y = state->dst.y1; 352 306 vc4_state->crtc_w = state->dst.x2 - state->dst.x1; 353 307 vc4_state->crtc_h = state->dst.y2 - state->dst.y1; 308 + 309 + ret = vc4_plane_margins_adj(state); 310 + if (ret) 311 + return ret; 354 312 355 313 vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0], 356 314 vc4_state->crtc_w); ··· 542 492 bool mix_plane_alpha; 543 493 bool covers_screen; 544 494 u32 scl0, scl1, pitch0; 545 - u32 tiling; 495 + u32 tiling, src_y; 546 496 u32 hvs_format = format->hvs; 497 + unsigned int rotation; 547 498 int ret, i; 548 499 549 500 if (vc4_state->dlist_initialized) ··· 571 520 h_subsample = drm_format_horz_chroma_subsampling(format->drm); 572 521 v_subsample = drm_format_vert_chroma_subsampling(format->drm); 573 522 523 + rotation = drm_rotation_simplify(state->rotation, 524 + DRM_MODE_ROTATE_0 | 525 + DRM_MODE_REFLECT_X | 526 + DRM_MODE_REFLECT_Y); 527 + 528 + /* We must point to the last line when Y reflection is enabled. */ 529 + src_y = vc4_state->src_y; 530 + if (rotation & DRM_MODE_REFLECT_Y) 531 + src_y += vc4_state->src_h[0] - 1; 532 + 574 533 switch (base_format_mod) { 575 534 case DRM_FORMAT_MOD_LINEAR: 576 535 tiling = SCALER_CTL0_TILING_LINEAR; ··· 590 529 * out. 591 530 */ 592 531 for (i = 0; i < num_planes; i++) { 593 - vc4_state->offsets[i] += vc4_state->src_y / 532 + vc4_state->offsets[i] += src_y / 594 533 (i ? v_subsample : 1) * 595 534 fb->pitches[i]; 535 + 596 536 vc4_state->offsets[i] += vc4_state->src_x / 597 537 (i ? h_subsample : 1) * 598 538 fb->format->cpp[i]; ··· 619 557 u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift); 620 558 u32 tiles_l = vc4_state->src_x >> tile_w_shift; 621 559 u32 tiles_r = tiles_w - tiles_l; 622 - u32 tiles_t = vc4_state->src_y >> tile_h_shift; 560 + u32 tiles_t = src_y >> tile_h_shift; 623 561 /* Intra-tile offsets, which modify the base address (the 624 562 * SCALER_PITCH0_TILE_Y_OFFSET tells HVS how to walk from that 625 563 * base address). 626 564 */ 627 - u32 tile_y = (vc4_state->src_y >> 4) & 1; 628 - u32 subtile_y = (vc4_state->src_y >> 2) & 3; 629 - u32 utile_y = vc4_state->src_y & 3; 565 + u32 tile_y = (src_y >> 4) & 1; 566 + u32 subtile_y = (src_y >> 2) & 3; 567 + u32 utile_y = src_y & 3; 630 568 u32 x_off = vc4_state->src_x & tile_w_mask; 631 - u32 y_off = vc4_state->src_y & tile_h_mask; 569 + u32 y_off = src_y & tile_h_mask; 570 + 571 + /* When Y reflection is requested we must set the 572 + * SCALER_PITCH0_TILE_LINE_DIR flag to tell HVS that all lines 573 + * after the initial one should be fetched in descending order, 574 + * which makes sense since we start from the last line and go 575 + * backward. 576 + * Don't know why we need y_off = max_y_off - y_off, but it's 577 + * definitely required (I guess it's also related to the "going 578 + * backward" situation). 579 + */ 580 + if (rotation & DRM_MODE_REFLECT_Y) { 581 + y_off = tile_h_mask - y_off; 582 + pitch0 = SCALER_PITCH0_TILE_LINE_DIR; 583 + } else { 584 + pitch0 = 0; 585 + } 632 586 633 587 tiling = SCALER_CTL0_TILING_256B_OR_T; 634 - pitch0 = (VC4_SET_FIELD(x_off, SCALER_PITCH0_SINK_PIX) | 635 - VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) | 636 - VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) | 637 - VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R)); 588 + pitch0 |= (VC4_SET_FIELD(x_off, SCALER_PITCH0_SINK_PIX) | 589 + VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) | 590 + VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) | 591 + VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R)); 638 592 vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift); 639 593 vc4_state->offsets[0] += subtile_y << 8; 640 594 vc4_state->offsets[0] += utile_y << 4; ··· 673 595 case DRM_FORMAT_MOD_BROADCOM_SAND128: 674 596 case DRM_FORMAT_MOD_BROADCOM_SAND256: { 675 597 uint32_t param = fourcc_mod_broadcom_param(fb->modifier); 598 + u32 tile_w, tile, x_off, pix_per_tile; 676 599 677 - /* Column-based NV12 or RGBA. 678 - */ 679 - if (fb->format->num_planes > 1) { 680 - if (hvs_format != HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE) { 681 - DRM_DEBUG_KMS("SAND format only valid for NV12/21"); 682 - return -EINVAL; 683 - } 684 - hvs_format = HVS_PIXEL_FORMAT_H264; 685 - } else { 686 - if (base_format_mod == DRM_FORMAT_MOD_BROADCOM_SAND256) { 687 - DRM_DEBUG_KMS("SAND256 format only valid for H.264"); 688 - return -EINVAL; 689 - } 690 - } 600 + hvs_format = HVS_PIXEL_FORMAT_H264; 691 601 692 602 switch (base_format_mod) { 693 603 case DRM_FORMAT_MOD_BROADCOM_SAND64: 694 604 tiling = SCALER_CTL0_TILING_64B; 605 + tile_w = 64; 695 606 break; 696 607 case DRM_FORMAT_MOD_BROADCOM_SAND128: 697 608 tiling = SCALER_CTL0_TILING_128B; 609 + tile_w = 128; 698 610 break; 699 611 case DRM_FORMAT_MOD_BROADCOM_SAND256: 700 612 tiling = SCALER_CTL0_TILING_256B_OR_T; 613 + tile_w = 256; 701 614 break; 702 615 default: 703 616 break; ··· 697 628 if (param > SCALER_TILE_HEIGHT_MASK) { 698 629 DRM_DEBUG_KMS("SAND height too large (%d)\n", param); 699 630 return -EINVAL; 631 + } 632 + 633 + pix_per_tile = tile_w / fb->format->cpp[0]; 634 + tile = vc4_state->src_x / pix_per_tile; 635 + x_off = vc4_state->src_x % pix_per_tile; 636 + 637 + /* Adjust the base pointer to the first pixel to be scanned 638 + * out. 639 + */ 640 + for (i = 0; i < num_planes; i++) { 641 + vc4_state->offsets[i] += param * tile_w * tile; 642 + vc4_state->offsets[i] += src_y / 643 + (i ? v_subsample : 1) * 644 + tile_w; 645 + vc4_state->offsets[i] += x_off / 646 + (i ? h_subsample : 1) * 647 + fb->format->cpp[i]; 700 648 } 701 649 702 650 pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT); ··· 729 643 /* Control word */ 730 644 vc4_dlist_write(vc4_state, 731 645 SCALER_CTL0_VALID | 646 + (rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) | 647 + (rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) | 732 648 VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) | 733 649 (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) | 734 650 (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) | ··· 1138 1050 switch (fourcc_mod_broadcom_mod(modifier)) { 1139 1051 case DRM_FORMAT_MOD_LINEAR: 1140 1052 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: 1141 - case DRM_FORMAT_MOD_BROADCOM_SAND64: 1142 - case DRM_FORMAT_MOD_BROADCOM_SAND128: 1143 1053 return true; 1144 1054 default: 1145 1055 return false; ··· 1209 1123 drm_plane_helper_add(plane, &vc4_plane_helper_funcs); 1210 1124 1211 1125 drm_plane_create_alpha_property(plane); 1126 + drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 1127 + DRM_MODE_ROTATE_0 | 1128 + DRM_MODE_ROTATE_180 | 1129 + DRM_MODE_REFLECT_X | 1130 + DRM_MODE_REFLECT_Y); 1212 1131 1213 1132 return plane; 1214 1133 }
+2 -2
drivers/gpu/drm/vgem/vgem_fence.c
··· 53 53 54 54 static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size) 55 55 { 56 - snprintf(str, size, "%u", fence->seqno); 56 + snprintf(str, size, "%llu", fence->seqno); 57 57 } 58 58 59 59 static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str, 60 60 int size) 61 61 { 62 - snprintf(str, size, "%u", 62 + snprintf(str, size, "%llu", 63 63 dma_fence_is_signaled(fence) ? fence->seqno : 0); 64 64 } 65 65
+1 -1
drivers/gpu/drm/virtio/Makefile
··· 3 3 # Makefile for the drm device driver. This driver provides support for the 4 4 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 5 5 6 - virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o \ 6 + virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \ 7 7 virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \ 8 8 virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \ 9 9 virtgpu_ioctl.o virtgpu_prime.o
+1 -7
drivers/gpu/drm/virtio/virtgpu_display.c
··· 243 243 244 244 static void virtio_gpu_conn_destroy(struct drm_connector *connector) 245 245 { 246 - struct virtio_gpu_output *virtio_gpu_output = 247 - drm_connector_to_virtio_gpu_output(connector); 248 - 249 246 drm_connector_unregister(connector); 250 247 drm_connector_cleanup(connector); 251 - kfree(virtio_gpu_output); 252 248 } 253 249 254 250 static const struct drm_connector_funcs virtio_gpu_connector_funcs = { ··· 358 362 .atomic_commit = drm_atomic_helper_commit, 359 363 }; 360 364 361 - int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) 365 + void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) 362 366 { 363 367 int i; 364 368 ··· 377 381 vgdev_output_init(vgdev, i); 378 382 379 383 drm_mode_config_reset(vgdev->ddev); 380 - return 0; 381 384 } 382 385 383 386 void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev) ··· 385 390 386 391 for (i = 0 ; i < vgdev->num_scanouts; ++i) 387 392 kfree(vgdev->outputs[i].edid); 388 - virtio_gpu_fbdev_fini(vgdev); 389 393 drm_mode_config_cleanup(vgdev->ddev); 390 394 }
-103
drivers/gpu/drm/virtio/virtgpu_drm_bus.c
··· 1 - /* 2 - * Copyright (C) 2015 Red Hat, Inc. 3 - * All Rights Reserved. 4 - * 5 - * Permission is hereby granted, free of charge, to any person obtaining 6 - * a copy of this software and associated documentation files (the 7 - * "Software"), to deal in the Software without restriction, including 8 - * without limitation the rights to use, copy, modify, merge, publish, 9 - * distribute, sublicense, and/or sell copies of the Software, and to 10 - * permit persons to whom the Software is furnished to do so, subject to 11 - * the following conditions: 12 - * 13 - * The above copyright notice and this permission notice (including the 14 - * next paragraph) shall be included in all copies or substantial 15 - * portions of the Software. 16 - * 17 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 - */ 25 - 26 - #include <linux/pci.h> 27 - #include <drm/drm_fb_helper.h> 28 - 29 - #include "virtgpu_drv.h" 30 - 31 - int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev) 32 - { 33 - struct drm_device *dev; 34 - int ret; 35 - 36 - dev = drm_dev_alloc(driver, &vdev->dev); 37 - if (IS_ERR(dev)) 38 - return PTR_ERR(dev); 39 - vdev->priv = dev; 40 - 41 - if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) { 42 - struct pci_dev *pdev = to_pci_dev(vdev->dev.parent); 43 - const char *pname = dev_name(&pdev->dev); 44 - bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; 45 - char unique[20]; 46 - 47 - DRM_INFO("pci: %s detected at %s\n", 48 - vga ? "virtio-vga" : "virtio-gpu-pci", 49 - pname); 50 - dev->pdev = pdev; 51 - if (vga) 52 - drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 53 - 0, 54 - "virtiodrmfb"); 55 - 56 - /* 57 - * Normally the drm_dev_set_unique() call is done by core DRM. 58 - * The following comment covers, why virtio cannot rely on it. 59 - * 60 - * Unlike the other virtual GPU drivers, virtio abstracts the 61 - * underlying bus type by using struct virtio_device. 62 - * 63 - * Hence the dev_is_pci() check, used in core DRM, will fail 64 - * and the unique returned will be the virtio_device "virtio0", 65 - * while a "pci:..." one is required. 66 - * 67 - * A few other ideas were considered: 68 - * - Extend the dev_is_pci() check [in drm_set_busid] to 69 - * consider virtio. 70 - * Seems like a bigger hack than what we have already. 71 - * 72 - * - Point drm_device::dev to the parent of the virtio_device 73 - * Semantic changes: 74 - * * Using the wrong device for i2c, framebuffer_alloc and 75 - * prime import. 76 - * Visual changes: 77 - * * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer, 78 - * will print the wrong information. 79 - * 80 - * We could address the latter issues, by introducing 81 - * drm_device::bus_dev, ... which would be used solely for this. 82 - * 83 - * So for the moment keep things as-is, with a bulky comment 84 - * for the next person who feels like removing this 85 - * drm_dev_set_unique() quirk. 86 - */ 87 - snprintf(unique, sizeof(unique), "pci:%s", pname); 88 - ret = drm_dev_set_unique(dev, unique); 89 - if (ret) 90 - goto err_free; 91 - 92 - } 93 - 94 - ret = drm_dev_register(dev, 0); 95 - if (ret) 96 - goto err_free; 97 - 98 - return 0; 99 - 100 - err_free: 101 - drm_dev_put(dev); 102 - return ret; 103 - }
+81 -3
drivers/gpu/drm/virtio/virtgpu_drv.c
··· 40 40 MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); 41 41 module_param_named(modeset, virtio_gpu_modeset, int, 0400); 42 42 43 + static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vdev) 44 + { 45 + struct pci_dev *pdev = to_pci_dev(vdev->dev.parent); 46 + const char *pname = dev_name(&pdev->dev); 47 + bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; 48 + char unique[20]; 49 + 50 + DRM_INFO("pci: %s detected at %s\n", 51 + vga ? "virtio-vga" : "virtio-gpu-pci", 52 + pname); 53 + dev->pdev = pdev; 54 + if (vga) 55 + drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 56 + 0, 57 + "virtiodrmfb"); 58 + 59 + /* 60 + * Normally the drm_dev_set_unique() call is done by core DRM. 61 + * The following comment covers, why virtio cannot rely on it. 62 + * 63 + * Unlike the other virtual GPU drivers, virtio abstracts the 64 + * underlying bus type by using struct virtio_device. 65 + * 66 + * Hence the dev_is_pci() check, used in core DRM, will fail 67 + * and the unique returned will be the virtio_device "virtio0", 68 + * while a "pci:..." one is required. 69 + * 70 + * A few other ideas were considered: 71 + * - Extend the dev_is_pci() check [in drm_set_busid] to 72 + * consider virtio. 73 + * Seems like a bigger hack than what we have already. 74 + * 75 + * - Point drm_device::dev to the parent of the virtio_device 76 + * Semantic changes: 77 + * * Using the wrong device for i2c, framebuffer_alloc and 78 + * prime import. 79 + * Visual changes: 80 + * * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer, 81 + * will print the wrong information. 82 + * 83 + * We could address the latter issues, by introducing 84 + * drm_device::bus_dev, ... which would be used solely for this. 85 + * 86 + * So for the moment keep things as-is, with a bulky comment 87 + * for the next person who feels like removing this 88 + * drm_dev_set_unique() quirk. 89 + */ 90 + snprintf(unique, sizeof(unique), "pci:%s", pname); 91 + return drm_dev_set_unique(dev, unique); 92 + } 93 + 43 94 static int virtio_gpu_probe(struct virtio_device *vdev) 44 95 { 96 + struct drm_device *dev; 97 + int ret; 98 + 45 99 if (vgacon_text_force() && virtio_gpu_modeset == -1) 46 100 return -EINVAL; 47 101 48 102 if (virtio_gpu_modeset == 0) 49 103 return -EINVAL; 50 104 51 - return drm_virtio_init(&driver, vdev); 105 + dev = drm_dev_alloc(&driver, &vdev->dev); 106 + if (IS_ERR(dev)) 107 + return PTR_ERR(dev); 108 + vdev->priv = dev; 109 + 110 + if (!strcmp(vdev->dev.parent->bus->name, "pci")) { 111 + ret = virtio_gpu_pci_quirk(dev, vdev); 112 + if (ret) 113 + goto err_free; 114 + } 115 + 116 + ret = virtio_gpu_init(dev); 117 + if (ret) 118 + goto err_free; 119 + 120 + ret = drm_dev_register(dev, 0); 121 + if (ret) 122 + goto err_free; 123 + 124 + drm_fbdev_generic_setup(vdev->priv, 32); 125 + return 0; 126 + 127 + err_free: 128 + drm_dev_put(dev); 129 + return ret; 52 130 } 53 131 54 132 static void virtio_gpu_remove(struct virtio_device *vdev) 55 133 { 56 134 struct drm_device *dev = vdev->priv; 57 135 136 + drm_dev_unregister(dev); 137 + virtio_gpu_deinit(dev); 58 138 drm_put_dev(dev); 59 139 } 60 140 ··· 196 116 197 117 static struct drm_driver driver = { 198 118 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC, 199 - .load = virtio_gpu_driver_load, 200 - .unload = virtio_gpu_driver_unload, 201 119 .open = virtio_gpu_driver_open, 202 120 .postclose = virtio_gpu_driver_postclose, 203 121
+3 -21
drivers/gpu/drm/virtio/virtgpu_drv.h
··· 50 50 #define DRIVER_MINOR 1 51 51 #define DRIVER_PATCHLEVEL 0 52 52 53 - /* virtgpu_drm_bus.c */ 54 - int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev); 55 - 56 53 struct virtio_gpu_object { 57 54 struct drm_gem_object gem_base; 58 55 uint32_t hw_res_handle; ··· 134 137 #define to_virtio_gpu_framebuffer(x) \ 135 138 container_of(x, struct virtio_gpu_framebuffer, base) 136 139 137 - struct virtio_gpu_fbdev { 138 - struct drm_fb_helper helper; 139 - struct virtio_gpu_framebuffer vgfb; 140 - struct virtio_gpu_device *vgdev; 141 - struct delayed_work work; 142 - }; 143 - 144 140 struct virtio_gpu_mman { 145 141 struct ttm_bo_device bdev; 146 142 }; 147 - 148 - struct virtio_gpu_fbdev; 149 143 150 144 struct virtio_gpu_queue { 151 145 struct virtqueue *vq; ··· 168 180 169 181 struct virtio_gpu_mman mman; 170 182 171 - /* pointer to fbdev info structure */ 172 - struct virtio_gpu_fbdev *vgfbdev; 173 183 struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS]; 174 184 uint32_t num_scanouts; 175 185 ··· 206 220 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; 207 221 208 222 /* virtio_kms.c */ 209 - int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags); 210 - void virtio_gpu_driver_unload(struct drm_device *dev); 223 + int virtio_gpu_init(struct drm_device *dev); 224 + void virtio_gpu_deinit(struct drm_device *dev); 211 225 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file); 212 226 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file); 213 227 ··· 235 249 uint32_t handle, uint64_t *offset_p); 236 250 237 251 /* virtio_fb */ 238 - #define VIRTIO_GPUFB_CONN_LIMIT 1 239 - int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev); 240 - void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev); 241 252 int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb, 242 253 struct drm_clip_rect *clips, 243 254 unsigned int num_clips); ··· 317 334 struct virtio_gpu_framebuffer *vgfb, 318 335 const struct drm_mode_fb_cmd2 *mode_cmd, 319 336 struct drm_gem_object *obj); 320 - int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); 337 + void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); 321 338 void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); 322 339 323 340 /* virtio_gpu_plane.c */ ··· 334 351 /* virtio_gpu_fence.c */ 335 352 struct virtio_gpu_fence *virtio_gpu_fence_alloc( 336 353 struct virtio_gpu_device *vgdev); 337 - void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence); 338 354 int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, 339 355 struct virtio_gpu_ctrl_hdr *cmd_hdr, 340 356 struct virtio_gpu_fence *fence);
-191
drivers/gpu/drm/virtio/virtgpu_fb.c
··· 27 27 #include <drm/drm_fb_helper.h> 28 28 #include "virtgpu_drv.h" 29 29 30 - #define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60) 31 - 32 30 static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb, 33 31 bool store, int x, int y, 34 32 int width, int height) ··· 147 149 virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle, 148 150 left, top, right - left, bottom - top); 149 151 return 0; 150 - } 151 - 152 - static void virtio_gpu_fb_dirty_work(struct work_struct *work) 153 - { 154 - struct delayed_work *delayed_work = to_delayed_work(work); 155 - struct virtio_gpu_fbdev *vfbdev = 156 - container_of(delayed_work, struct virtio_gpu_fbdev, work); 157 - struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb; 158 - 159 - virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1, 160 - vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1); 161 - } 162 - 163 - static void virtio_gpu_3d_fillrect(struct fb_info *info, 164 - const struct fb_fillrect *rect) 165 - { 166 - struct virtio_gpu_fbdev *vfbdev = info->par; 167 - 168 - drm_fb_helper_sys_fillrect(info, rect); 169 - virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy, 170 - rect->width, rect->height); 171 - schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); 172 - } 173 - 174 - static void virtio_gpu_3d_copyarea(struct fb_info *info, 175 - const struct fb_copyarea *area) 176 - { 177 - struct virtio_gpu_fbdev *vfbdev = info->par; 178 - 179 - drm_fb_helper_sys_copyarea(info, area); 180 - virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy, 181 - area->width, area->height); 182 - schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); 183 - } 184 - 185 - static void virtio_gpu_3d_imageblit(struct fb_info *info, 186 - const struct fb_image *image) 187 - { 188 - struct virtio_gpu_fbdev *vfbdev = info->par; 189 - 190 - drm_fb_helper_sys_imageblit(info, image); 191 - virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy, 192 - image->width, image->height); 193 - schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); 194 - } 195 - 196 - static struct fb_ops virtio_gpufb_ops = { 197 - .owner = THIS_MODULE, 198 - DRM_FB_HELPER_DEFAULT_OPS, 199 - .fb_fillrect = virtio_gpu_3d_fillrect, 200 - .fb_copyarea = virtio_gpu_3d_copyarea, 201 - .fb_imageblit = virtio_gpu_3d_imageblit, 202 - }; 203 - 204 - static int virtio_gpufb_create(struct drm_fb_helper *helper, 205 - struct drm_fb_helper_surface_size *sizes) 206 - { 207 - struct virtio_gpu_fbdev *vfbdev = 208 - container_of(helper, struct virtio_gpu_fbdev, helper); 209 - struct drm_device *dev = helper->dev; 210 - struct virtio_gpu_device *vgdev = dev->dev_private; 211 - struct fb_info *info; 212 - struct drm_framebuffer *fb; 213 - struct drm_mode_fb_cmd2 mode_cmd = {}; 214 - struct virtio_gpu_object *obj; 215 - uint32_t format, size; 216 - int ret; 217 - 218 - mode_cmd.width = sizes->surface_width; 219 - mode_cmd.height = sizes->surface_height; 220 - mode_cmd.pitches[0] = mode_cmd.width * 4; 221 - mode_cmd.pixel_format = DRM_FORMAT_HOST_XRGB8888; 222 - 223 - format = virtio_gpu_translate_format(mode_cmd.pixel_format); 224 - if (format == 0) 225 - return -EINVAL; 226 - 227 - size = mode_cmd.pitches[0] * mode_cmd.height; 228 - obj = virtio_gpu_alloc_object(dev, size, false, true); 229 - if (IS_ERR(obj)) 230 - return PTR_ERR(obj); 231 - 232 - virtio_gpu_cmd_create_resource(vgdev, obj, format, 233 - mode_cmd.width, mode_cmd.height); 234 - 235 - ret = virtio_gpu_object_kmap(obj); 236 - if (ret) { 237 - DRM_ERROR("failed to kmap fb %d\n", ret); 238 - goto err_obj_vmap; 239 - } 240 - 241 - /* attach the object to the resource */ 242 - ret = virtio_gpu_object_attach(vgdev, obj, NULL); 243 - if (ret) 244 - goto err_obj_attach; 245 - 246 - info = drm_fb_helper_alloc_fbi(helper); 247 - if (IS_ERR(info)) { 248 - ret = PTR_ERR(info); 249 - goto err_fb_alloc; 250 - } 251 - 252 - info->par = helper; 253 - 254 - ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb, 255 - &mode_cmd, &obj->gem_base); 256 - if (ret) 257 - goto err_fb_alloc; 258 - 259 - fb = &vfbdev->vgfb.base; 260 - 261 - vfbdev->helper.fb = fb; 262 - 263 - strcpy(info->fix.id, "virtiodrmfb"); 264 - info->fbops = &virtio_gpufb_ops; 265 - info->pixmap.flags = FB_PIXMAP_SYSTEM; 266 - 267 - info->screen_buffer = obj->vmap; 268 - info->screen_size = obj->gem_base.size; 269 - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); 270 - drm_fb_helper_fill_var(info, &vfbdev->helper, 271 - sizes->fb_width, sizes->fb_height); 272 - 273 - info->fix.mmio_start = 0; 274 - info->fix.mmio_len = 0; 275 - return 0; 276 - 277 - err_fb_alloc: 278 - virtio_gpu_object_detach(vgdev, obj); 279 - err_obj_attach: 280 - err_obj_vmap: 281 - virtio_gpu_gem_free_object(&obj->gem_base); 282 - return ret; 283 - } 284 - 285 - static int virtio_gpu_fbdev_destroy(struct drm_device *dev, 286 - struct virtio_gpu_fbdev *vgfbdev) 287 - { 288 - struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb; 289 - 290 - drm_fb_helper_unregister_fbi(&vgfbdev->helper); 291 - 292 - if (vgfb->base.obj[0]) 293 - vgfb->base.obj[0] = NULL; 294 - drm_fb_helper_fini(&vgfbdev->helper); 295 - drm_framebuffer_cleanup(&vgfb->base); 296 - 297 - return 0; 298 - } 299 - static const struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = { 300 - .fb_probe = virtio_gpufb_create, 301 - }; 302 - 303 - int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev) 304 - { 305 - struct virtio_gpu_fbdev *vgfbdev; 306 - int bpp_sel = 32; /* TODO: parameter from somewhere? */ 307 - int ret; 308 - 309 - vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL); 310 - if (!vgfbdev) 311 - return -ENOMEM; 312 - 313 - vgfbdev->vgdev = vgdev; 314 - vgdev->vgfbdev = vgfbdev; 315 - INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work); 316 - 317 - drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper, 318 - &virtio_gpu_fb_helper_funcs); 319 - ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper, 320 - VIRTIO_GPUFB_CONN_LIMIT); 321 - if (ret) { 322 - kfree(vgfbdev); 323 - return ret; 324 - } 325 - 326 - drm_fb_helper_single_add_all_connectors(&vgfbdev->helper); 327 - drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel); 328 - return 0; 329 - } 330 - 331 - void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev) 332 - { 333 - if (!vgdev->vgfbdev) 334 - return; 335 - 336 - virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev); 337 - kfree(vgdev->vgfbdev); 338 - vgdev->vgfbdev = NULL; 339 152 }
-8
drivers/gpu/drm/virtio/virtgpu_fence.c
··· 81 81 return fence; 82 82 } 83 83 84 - void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence) 85 - { 86 - if (!fence) 87 - return; 88 - 89 - dma_fence_put(&fence->f); 90 - } 91 - 92 84 int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, 93 85 struct virtio_gpu_ctrl_hdr *cmd_hdr, 94 86 struct virtio_gpu_fence *fence)
+1 -1
drivers/gpu/drm/virtio/virtgpu_ioctl.c
··· 351 351 virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d); 352 352 ret = virtio_gpu_object_attach(vgdev, qobj, fence); 353 353 if (ret) { 354 - virtio_gpu_fence_cleanup(fence); 354 + dma_fence_put(&fence->f); 355 355 goto fail_backoff; 356 356 } 357 357 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
+4 -14
drivers/gpu/drm/virtio/virtgpu_kms.c
··· 28 28 #include <drm/drmP.h> 29 29 #include "virtgpu_drv.h" 30 30 31 - static int virtio_gpu_fbdev = 1; 32 - 33 - MODULE_PARM_DESC(fbdev, "Disable/Enable framebuffer device & console"); 34 - module_param_named(fbdev, virtio_gpu_fbdev, int, 0400); 35 - 36 31 static void virtio_gpu_config_changed_work_func(struct work_struct *work) 37 32 { 38 33 struct virtio_gpu_device *vgdev = ··· 106 111 vgdev->num_capsets = num_capsets; 107 112 } 108 113 109 - int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) 114 + int virtio_gpu_init(struct drm_device *dev) 110 115 { 111 116 static vq_callback_t *callbacks[] = { 112 117 virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack ··· 193 198 num_capsets, &num_capsets); 194 199 DRM_INFO("number of cap sets: %d\n", num_capsets); 195 200 196 - ret = virtio_gpu_modeset_init(vgdev); 197 - if (ret) 198 - goto err_modeset; 201 + virtio_gpu_modeset_init(vgdev); 199 202 200 203 virtio_device_ready(vgdev->vdev); 201 204 vgdev->vqs_ready = true; ··· 205 212 virtio_gpu_cmd_get_display_info(vgdev); 206 213 wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, 207 214 5 * HZ); 208 - if (virtio_gpu_fbdev) 209 - virtio_gpu_fbdev_init(vgdev); 210 - 211 215 return 0; 212 216 213 - err_modeset: 214 217 err_scanouts: 215 218 virtio_gpu_ttm_fini(vgdev); 216 219 err_ttm: ··· 228 239 } 229 240 } 230 241 231 - void virtio_gpu_driver_unload(struct drm_device *dev) 242 + void virtio_gpu_deinit(struct drm_device *dev) 232 243 { 233 244 struct virtio_gpu_device *vgdev = dev->dev_private; 234 245 ··· 236 247 flush_work(&vgdev->ctrlq.dequeue_work); 237 248 flush_work(&vgdev->cursorq.dequeue_work); 238 249 flush_work(&vgdev->config_changed_work); 250 + vgdev->vdev->config->reset(vgdev->vdev); 239 251 vgdev->vdev->config->del_vqs(vgdev->vdev); 240 252 241 253 virtio_gpu_modeset_fini(vgdev);
+10 -7
drivers/gpu/drm/virtio/virtgpu_plane.c
··· 130 130 plane->state->src_h >> 16, 131 131 plane->state->src_x >> 16, 132 132 plane->state->src_y >> 16); 133 - virtio_gpu_cmd_resource_flush(vgdev, handle, 134 - plane->state->src_x >> 16, 135 - plane->state->src_y >> 16, 136 - plane->state->src_w >> 16, 137 - plane->state->src_h >> 16); 133 + if (handle) 134 + virtio_gpu_cmd_resource_flush(vgdev, handle, 135 + plane->state->src_x >> 16, 136 + plane->state->src_y >> 16, 137 + plane->state->src_w >> 16, 138 + plane->state->src_h >> 16); 138 139 } 139 140 140 141 static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, ··· 169 168 return; 170 169 171 170 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 172 - if (vgfb->fence) 173 - virtio_gpu_fence_cleanup(vgfb->fence); 171 + if (vgfb->fence) { 172 + dma_fence_put(&vgfb->fence->f); 173 + vgfb->fence = NULL; 174 + } 174 175 } 175 176 176 177 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
+10 -2
drivers/gpu/drm/virtio/virtgpu_vq.c
··· 192 192 193 193 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { 194 194 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; 195 - if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) 196 - DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); 195 + if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { 196 + if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) { 197 + struct virtio_gpu_ctrl_hdr *cmd; 198 + cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf; 199 + DRM_ERROR("response 0x%x (command 0x%x)\n", 200 + le32_to_cpu(resp->type), 201 + le32_to_cpu(cmd->type)); 202 + } else 203 + DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); 204 + } 197 205 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { 198 206 u64 f = le64_to_cpu(resp->fence_id); 199 207
+1
drivers/gpu/drm/vkms/vkms_drv.c
··· 95 95 dev->mode_config.min_height = YRES_MIN; 96 96 dev->mode_config.max_width = XRES_MAX; 97 97 dev->mode_config.max_height = YRES_MAX; 98 + dev->mode_config.preferred_depth = 24; 98 99 99 100 return vkms_output_init(vkmsdev); 100 101 }
-1
drivers/gpu/drm/xen/xen_drm_front_conn.c
··· 89 89 }; 90 90 91 91 static const struct drm_connector_funcs connector_funcs = { 92 - .dpms = drm_helper_connector_dpms, 93 92 .fill_modes = drm_helper_probe_single_connector_modes, 94 93 .destroy = drm_connector_cleanup, 95 94 .reset = drm_atomic_helper_connector_reset,
+3 -1
drivers/gpu/drm/zte/zx_hdmi.c
··· 125 125 union hdmi_infoframe frame; 126 126 int ret; 127 127 128 - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false); 128 + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 129 + &hdmi->connector, 130 + mode); 129 131 if (ret) { 130 132 DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n", 131 133 ret);
-5
drivers/staging/vboxvideo/vbox_fb.c
··· 95 95 96 96 strcpy(info->fix.id, "vboxdrmfb"); 97 97 98 - /* 99 - * The last flag forces a mode set on VT switches even if the kernel 100 - * does not think it is needed. 101 - */ 102 - info->flags = FBINFO_DEFAULT | FBINFO_MISC_ALWAYS_SETPAR; 103 98 info->fbops = &vboxfb_ops; 104 99 105 100 /*
+4 -2
include/drm/bridge/dw_hdmi.h
··· 10 10 #ifndef __DW_HDMI__ 11 11 #define __DW_HDMI__ 12 12 13 - #include <drm/drmP.h> 14 - 13 + struct drm_connector; 14 + struct drm_display_mode; 15 + struct drm_encoder; 15 16 struct dw_hdmi; 17 + struct platform_device; 16 18 17 19 /** 18 20 * DOC: Supported input formats and encodings
+2 -1
include/drm/bridge/dw_mipi_dsi.h
··· 14 14 15 15 struct dw_mipi_dsi_phy_ops { 16 16 int (*init)(void *priv_data); 17 - int (*get_lane_mbps)(void *priv_data, struct drm_display_mode *mode, 17 + int (*get_lane_mbps)(void *priv_data, 18 + const struct drm_display_mode *mode, 18 19 unsigned long mode_flags, u32 lanes, u32 format, 19 20 unsigned int *lane_mbps); 20 21 };
+6 -20
include/drm/drmP.h
··· 94 94 struct pci_dev; 95 95 struct pci_controller; 96 96 97 - #define DRM_IF_VERSION(maj, min) (maj << 16 | min) 98 - 99 - #define DRM_SWITCH_POWER_ON 0 100 - #define DRM_SWITCH_POWER_OFF 1 101 - #define DRM_SWITCH_POWER_CHANGING 2 102 - #define DRM_SWITCH_POWER_DYNAMIC_OFF 3 103 - 104 - /* returns true if currently okay to sleep */ 105 - static inline bool drm_can_sleep(void) 106 - { 107 - if (in_atomic() || in_dbg_master() || irqs_disabled()) 108 - return false; 109 - return true; 110 - } 111 - 112 - #if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE) 113 - #define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x) 114 - #else 115 - #define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) 116 - #endif 97 + /* 98 + * NOTE: drmP.h is obsolete - do NOT add anything to this file 99 + * 100 + * Do not include drmP.h in new files. 101 + * Work is ongoing to remove drmP.h includes from existing files 102 + */ 117 103 118 104 #endif
+39 -4
include/drm/drm_atomic.h
··· 139 139 /** 140 140 * @abort_completion: 141 141 * 142 - * A flag that's set after drm_atomic_helper_setup_commit takes a second 143 - * reference for the completion of $drm_crtc_state.event. It's used by 144 - * the free code to remove the second reference if commit fails. 142 + * A flag that's set after drm_atomic_helper_setup_commit() takes a 143 + * second reference for the completion of $drm_crtc_state.event. It's 144 + * used by the free code to remove the second reference if commit fails. 145 145 */ 146 146 bool abort_completion; 147 147 }; ··· 228 228 * Currently only tracks the state update functions and the opaque driver 229 229 * private state itself, but in the future might also track which 230 230 * &drm_modeset_lock is required to duplicate and update this object's state. 231 + * 232 + * All private objects must be initialized before the DRM device they are 233 + * attached to is registered to the DRM subsystem (call to drm_dev_register()) 234 + * and should stay around until this DRM device is unregistered (call to 235 + * drm_dev_unregister()). In other words, private objects lifetime is tied 236 + * to the DRM device lifetime. This implies that: 237 + * 238 + * 1/ all calls to drm_atomic_private_obj_init() must be done before calling 239 + * drm_dev_register() 240 + * 2/ all calls to drm_atomic_private_obj_fini() must be done after calling 241 + * drm_dev_unregister() 231 242 */ 232 243 struct drm_private_obj { 244 + /** 245 + * @head: List entry used to attach a private object to a &drm_device 246 + * (queued to &drm_mode_config.privobj_list). 247 + */ 248 + struct list_head head; 249 + 250 + /** 251 + * @lock: Modeset lock to protect the state object. 252 + */ 253 + struct drm_modeset_lock lock; 254 + 233 255 /** 234 256 * @state: Current atomic state for this driver private object. 235 257 */ ··· 265 243 */ 266 244 const struct drm_private_state_funcs *funcs; 267 245 }; 246 + 247 + /** 248 + * drm_for_each_privobj() - private object iterator 249 + * 250 + * @privobj: pointer to the current private object. Updated after each 251 + * iteration 252 + * @dev: the DRM device we want get private objects from 253 + * 254 + * Allows one to iterate over all private objects attached to @dev 255 + */ 256 + #define drm_for_each_privobj(privobj, dev) \ 257 + list_for_each_entry(privobj, &(dev)->mode_config.privobj_list, head) 268 258 269 259 /** 270 260 * struct drm_private_state - base struct for driver private object state ··· 434 400 drm_atomic_get_connector_state(struct drm_atomic_state *state, 435 401 struct drm_connector *connector); 436 402 437 - void drm_atomic_private_obj_init(struct drm_private_obj *obj, 403 + void drm_atomic_private_obj_init(struct drm_device *dev, 404 + struct drm_private_obj *obj, 438 405 struct drm_private_state *state, 439 406 const struct drm_private_state_funcs *funcs); 440 407 void drm_atomic_private_obj_fini(struct drm_private_obj *obj);
+4 -4
include/drm/drm_bridge.h
··· 196 196 * the DRM framework will have to be extended with DRM bridge states. 197 197 */ 198 198 void (*mode_set)(struct drm_bridge *bridge, 199 - struct drm_display_mode *mode, 200 - struct drm_display_mode *adjusted_mode); 199 + const struct drm_display_mode *mode, 200 + const struct drm_display_mode *adjusted_mode); 201 201 /** 202 202 * @pre_enable: 203 203 * ··· 310 310 void drm_bridge_disable(struct drm_bridge *bridge); 311 311 void drm_bridge_post_disable(struct drm_bridge *bridge); 312 312 void drm_bridge_mode_set(struct drm_bridge *bridge, 313 - struct drm_display_mode *mode, 314 - struct drm_display_mode *adjusted_mode); 313 + const struct drm_display_mode *mode, 314 + const struct drm_display_mode *adjusted_mode); 315 315 void drm_bridge_pre_enable(struct drm_bridge *bridge); 316 316 void drm_bridge_enable(struct drm_bridge *bridge); 317 317
+9 -1
include/drm/drm_connector.h
··· 366 366 bool has_hdmi_infoframe; 367 367 368 368 /** 369 + * @rgb_quant_range_selectable: Does the sink support selecting 370 + * the RGB quantization range? 371 + */ 372 + bool rgb_quant_range_selectable; 373 + 374 + /** 369 375 * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even 370 376 * more stuff redundant with @bus_formats. 371 377 */ ··· 400 394 /** 401 395 * struct drm_tv_connector_state - TV connector related states 402 396 * @subconnector: selected subconnector 403 - * @margins: margins 397 + * @margins: margins (all margins are expressed in pixels) 404 398 * @margins.left: left margin 405 399 * @margins.right: right margin 406 400 * @margins.top: top margin ··· 1255 1249 const char *drm_get_content_protection_name(int val); 1256 1250 1257 1251 int drm_mode_create_dvi_i_properties(struct drm_device *dev); 1252 + int drm_mode_create_tv_margin_properties(struct drm_device *dev); 1258 1253 int drm_mode_create_tv_properties(struct drm_device *dev, 1259 1254 unsigned int num_modes, 1260 1255 const char * const modes[]); 1256 + void drm_connector_attach_tv_margin_properties(struct drm_connector *conn); 1261 1257 int drm_mode_create_scaling_mode_property(struct drm_device *dev); 1262 1258 int drm_connector_attach_content_type_property(struct drm_connector *dev); 1263 1259 int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
-3
include/drm/drm_crtc.h
··· 1149 1149 return 1 << drm_crtc_index(crtc); 1150 1150 } 1151 1151 1152 - int drm_crtc_force_disable(struct drm_crtc *crtc); 1153 - int drm_crtc_force_disable_all(struct drm_device *dev); 1154 - 1155 1152 int drm_mode_set_config_internal(struct drm_mode_set *set); 1156 1153 struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx); 1157 1154
+1
include/drm/drm_crtc_helper.h
··· 56 56 int drm_helper_connector_dpms(struct drm_connector *connector, int mode); 57 57 58 58 void drm_helper_resume_force_mode(struct drm_device *dev); 59 + int drm_helper_force_disable_all(struct drm_device *dev); 59 60 60 61 /* drm_probe_helper.c */ 61 62 int drm_helper_probe_single_connector_modes(struct drm_connector
+207 -85
include/drm/drm_device.h
··· 24 24 struct pci_dev; 25 25 struct pci_controller; 26 26 27 + 27 28 /** 28 - * DRM device structure. This structure represent a complete card that 29 + * enum drm_switch_power - power state of drm device 30 + */ 31 + 32 + enum switch_power_state { 33 + /** @DRM_SWITCH_POWER_ON: Power state is ON */ 34 + DRM_SWITCH_POWER_ON = 0, 35 + 36 + /** @DRM_SWITCH_POWER_OFF: Power state is OFF */ 37 + DRM_SWITCH_POWER_OFF = 1, 38 + 39 + /** @DRM_SWITCH_POWER_CHANGING: Power state is changing */ 40 + DRM_SWITCH_POWER_CHANGING = 2, 41 + 42 + /** @DRM_SWITCH_POWER_DYNAMIC_OFF: Suspended */ 43 + DRM_SWITCH_POWER_DYNAMIC_OFF = 3, 44 + }; 45 + 46 + /** 47 + * struct drm_device - DRM device structure 48 + * 49 + * This structure represent a complete card that 29 50 * may contain multiple heads. 30 51 */ 31 52 struct drm_device { 32 - struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ 33 - int if_version; /**< Highest interface version set */ 53 + /** 54 + * @legacy_dev_list: 55 + * 56 + * List of devices per driver for stealth attach cleanup 57 + */ 58 + struct list_head legacy_dev_list; 34 59 35 - /** \name Lifetime Management */ 36 - /*@{ */ 37 - struct kref ref; /**< Object ref-count */ 38 - struct device *dev; /**< Device structure of bus-device */ 39 - struct drm_driver *driver; /**< DRM driver managing the device */ 40 - void *dev_private; /**< DRM driver private data */ 41 - struct drm_minor *primary; /**< Primary node */ 42 - struct drm_minor *render; /**< Render node */ 60 + /** @if_version: Highest interface version set */ 61 + int if_version; 62 + 63 + /** @ref: Object ref-count */ 64 + struct kref ref; 65 + 66 + /** @dev: Device structure of bus-device */ 67 + struct device *dev; 68 + 69 + /** @driver: DRM driver managing the device */ 70 + struct drm_driver *driver; 71 + 72 + /** 73 + * @dev_private: 74 + * 75 + * DRM driver private data. Instead of using this pointer it is 76 + * recommended that drivers use drm_dev_init() and embed struct 77 + * &drm_device in their larger per-device structure. 78 + */ 79 + void *dev_private; 80 + 81 + /** @primary: Primary node */ 82 + struct drm_minor *primary; 83 + 84 + /** @render: Render node */ 85 + struct drm_minor *render; 86 + 87 + /** 88 + * @registered: 89 + * 90 + * Internally used by drm_dev_register() and drm_connector_register(). 91 + */ 43 92 bool registered; 44 93 45 - /* currently active master for this device. Protected by master_mutex */ 94 + /** 95 + * @master: 96 + * 97 + * Currently active master for this device. 98 + * Protected by &master_mutex 99 + */ 46 100 struct drm_master *master; 47 101 48 102 /** ··· 117 63 */ 118 64 bool unplugged; 119 65 120 - struct inode *anon_inode; /**< inode for private address-space */ 121 - char *unique; /**< unique name of the device */ 122 - /*@} */ 66 + /** @anon_inode: inode for private address-space */ 67 + struct inode *anon_inode; 123 68 124 - /** \name Locks */ 125 - /*@{ */ 126 - struct mutex struct_mutex; /**< For others */ 127 - struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */ 128 - /*@} */ 69 + /** @unique: Unique name of the device */ 70 + char *unique; 129 71 130 - /** \name Usage Counters */ 131 - /*@{ */ 132 - int open_count; /**< Outstanding files open, protected by drm_global_mutex. */ 133 - spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ 134 - int buf_use; /**< Buffers in use -- cannot alloc */ 135 - atomic_t buf_alloc; /**< Buffer allocation in progress */ 136 - /*@} */ 72 + /** 73 + * @struct_mutex: 74 + * 75 + * Lock for others (not &drm_minor.master and &drm_file.is_master) 76 + */ 77 + struct mutex struct_mutex; 137 78 79 + /** 80 + * @master_mutex: 81 + * 82 + * Lock for &drm_minor.master and &drm_file.is_master 83 + */ 84 + struct mutex master_mutex; 85 + 86 + /** 87 + * @open_count: 88 + * 89 + * Usage counter for outstanding files open, 90 + * protected by drm_global_mutex 91 + */ 92 + int open_count; 93 + 94 + /** @filelist_mutex: Protects @filelist. */ 138 95 struct mutex filelist_mutex; 96 + /** 97 + * @filelist: 98 + * 99 + * List of userspace clients, linked through &drm_file.lhead. 100 + */ 139 101 struct list_head filelist; 140 102 141 103 /** 142 104 * @filelist_internal: 143 105 * 144 - * List of open DRM files for in-kernel clients. Protected by @filelist_mutex. 106 + * List of open DRM files for in-kernel clients. 107 + * Protected by &filelist_mutex. 145 108 */ 146 109 struct list_head filelist_internal; 147 110 148 111 /** 149 112 * @clientlist_mutex: 150 113 * 151 - * Protects @clientlist access. 114 + * Protects &clientlist access. 152 115 */ 153 116 struct mutex clientlist_mutex; 154 117 155 118 /** 156 119 * @clientlist: 157 120 * 158 - * List of in-kernel clients. Protected by @clientlist_mutex. 121 + * List of in-kernel clients. Protected by &clientlist_mutex. 159 122 */ 160 123 struct list_head clientlist; 161 - 162 - /** \name Memory management */ 163 - /*@{ */ 164 - struct list_head maplist; /**< Linked list of regions */ 165 - struct drm_open_hash map_hash; /**< User token hash table for maps */ 166 - 167 - /** \name Context handle management */ 168 - /*@{ */ 169 - struct list_head ctxlist; /**< Linked list of context handles */ 170 - struct mutex ctxlist_mutex; /**< For ctxlist */ 171 - 172 - struct idr ctx_idr; 173 - 174 - struct list_head vmalist; /**< List of vmas (for debugging) */ 175 - 176 - /*@} */ 177 - 178 - /** \name DMA support */ 179 - /*@{ */ 180 - struct drm_device_dma *dma; /**< Optional pointer for DMA support */ 181 - /*@} */ 182 - 183 - /** \name Context support */ 184 - /*@{ */ 185 - 186 - __volatile__ long context_flag; /**< Context swapping flag */ 187 - int last_context; /**< Last current context */ 188 - /*@} */ 189 124 190 125 /** 191 126 * @irq_enabled: ··· 184 141 * to true manually. 185 142 */ 186 143 bool irq_enabled; 144 + 145 + /** 146 + * @irq: Used by the drm_irq_install() and drm_irq_unistall() helpers. 147 + */ 187 148 int irq; 188 149 189 150 /** ··· 215 168 */ 216 169 struct drm_vblank_crtc *vblank; 217 170 218 - spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ 171 + /** 172 + * @vblank_time_lock: 173 + * 174 + * Protects vblank count and time updates during vblank enable/disable 175 + */ 176 + spinlock_t vblank_time_lock; 177 + /** 178 + * @vbl_lock: Top-level vblank references lock, wraps the low-level 179 + * @vblank_time_lock. 180 + */ 219 181 spinlock_t vbl_lock; 220 182 221 183 /** ··· 240 184 * races and imprecision over longer time periods, hence exposing a 241 185 * hardware vblank counter is always recommended. 242 186 * 243 - * If non-zeor, &drm_crtc_funcs.get_vblank_counter must be set. 187 + * This is the statically configured device wide maximum. The driver 188 + * can instead choose to use a runtime configurable per-crtc value 189 + * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count 190 + * must be left at zero. See drm_crtc_set_max_vblank_count() on how 191 + * to use the per-crtc value. 192 + * 193 + * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set. 244 194 */ 245 - u32 max_vblank_count; /**< size of vblank counter register */ 195 + u32 max_vblank_count; 196 + 197 + /** @vblank_event_list: List of vblank events */ 198 + struct list_head vblank_event_list; 246 199 247 200 /** 248 - * List of events 201 + * @event_lock: 202 + * 203 + * Protects @vblank_event_list and event delivery in 204 + * general. See drm_send_event() and drm_send_event_locked(). 249 205 */ 250 - struct list_head vblank_event_list; 251 206 spinlock_t event_lock; 252 207 253 - /*@} */ 208 + /** @agp: AGP data */ 209 + struct drm_agp_head *agp; 254 210 255 - struct drm_agp_head *agp; /**< AGP data */ 211 + /** @pdev: PCI device structure */ 212 + struct pci_dev *pdev; 256 213 257 - struct pci_dev *pdev; /**< PCI device structure */ 258 214 #ifdef __alpha__ 215 + /** @hose: PCI hose, only used on ALPHA platforms. */ 259 216 struct pci_controller *hose; 260 217 #endif 218 + /** @num_crtcs: Number of CRTCs on this device */ 219 + unsigned int num_crtcs; 261 220 262 - struct drm_sg_mem *sg; /**< Scatter gather memory */ 263 - unsigned int num_crtcs; /**< Number of CRTCs on this device */ 221 + /** @mode_config: Current mode config */ 222 + struct drm_mode_config mode_config; 223 + 224 + /** @object_name_lock: GEM information */ 225 + struct mutex object_name_lock; 226 + 227 + /** @object_name_idr: GEM information */ 228 + struct idr object_name_idr; 229 + 230 + /** @vma_offset_manager: GEM information */ 231 + struct drm_vma_offset_manager *vma_offset_manager; 232 + 233 + /** 234 + * @switch_power_state: 235 + * 236 + * Power state of the client. 237 + * Used by drivers supporting the switcheroo driver. 238 + * The state is maintained in the 239 + * &vga_switcheroo_client_ops.set_gpu_state callback 240 + */ 241 + enum switch_power_state switch_power_state; 242 + 243 + /** 244 + * @fb_helper: 245 + * 246 + * Pointer to the fbdev emulation structure. 247 + * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini(). 248 + */ 249 + struct drm_fb_helper *fb_helper; 250 + 251 + /* Everything below here is for legacy driver, never use! */ 252 + /* private: */ 253 + 254 + /* Context handle management - linked list of context handles */ 255 + struct list_head ctxlist; 256 + 257 + /* Context handle management - mutex for &ctxlist */ 258 + struct mutex ctxlist_mutex; 259 + 260 + /* Context handle management */ 261 + struct idr ctx_idr; 262 + 263 + /* Memory management - linked list of regions */ 264 + struct list_head maplist; 265 + 266 + /* Memory management - user token hash table for maps */ 267 + struct drm_open_hash map_hash; 268 + 269 + /* Context handle management - list of vmas (for debugging) */ 270 + struct list_head vmalist; 271 + 272 + /* Optional pointer for DMA support */ 273 + struct drm_device_dma *dma; 274 + 275 + /* Context swapping flag */ 276 + __volatile__ long context_flag; 277 + 278 + /* Last current context */ 279 + int last_context; 280 + 281 + /* Lock for &buf_use and a few other things. */ 282 + spinlock_t buf_lock; 283 + 284 + /* Usage counter for buffers in use -- cannot alloc */ 285 + int buf_use; 286 + 287 + /* Buffer allocation in progress */ 288 + atomic_t buf_alloc; 264 289 265 290 struct { 266 291 int context; ··· 351 214 struct drm_local_map *agp_buffer_map; 352 215 unsigned int agp_buffer_token; 353 216 354 - struct drm_mode_config mode_config; /**< Current mode config */ 355 - 356 - /** \name GEM information */ 357 - /*@{ */ 358 - struct mutex object_name_lock; 359 - struct idr object_name_idr; 360 - struct drm_vma_offset_manager *vma_offset_manager; 361 - /*@} */ 362 - int switch_power_state; 363 - 364 - /** 365 - * @fb_helper: 366 - * 367 - * Pointer to the fbdev emulation structure. 368 - * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini(). 369 - */ 370 - struct drm_fb_helper *fb_helper; 217 + /* Scatter gather memory */ 218 + struct drm_sg_mem *sg; 371 219 }; 372 220 373 221 #endif
+140 -13
include/drm/drm_dp_mst_helper.h
··· 44 44 45 45 /** 46 46 * struct drm_dp_mst_port - MST port 47 - * @kref: reference count for this port. 48 47 * @port_num: port number 49 48 * @input: if this port is an input port. 50 49 * @mcs: message capability status - DP 1.2 spec. ··· 66 67 * in the MST topology. 67 68 */ 68 69 struct drm_dp_mst_port { 69 - struct kref kref; 70 + /** 71 + * @topology_kref: refcount for this port's lifetime in the topology, 72 + * only the DP MST helpers should need to touch this 73 + */ 74 + struct kref topology_kref; 75 + 76 + /** 77 + * @malloc_kref: refcount for the memory allocation containing this 78 + * structure. See drm_dp_mst_get_port_malloc() and 79 + * drm_dp_mst_put_port_malloc(). 80 + */ 81 + struct kref malloc_kref; 70 82 71 83 u8 port_num; 72 84 bool input; ··· 112 102 113 103 /** 114 104 * struct drm_dp_mst_branch - MST branch device. 115 - * @kref: reference count for this port. 116 105 * @rad: Relative Address to talk to this branch device. 117 106 * @lct: Link count total to talk to this branch device. 118 107 * @num_ports: number of ports on the branch. ··· 130 121 * to downstream port of parent branches. 131 122 */ 132 123 struct drm_dp_mst_branch { 133 - struct kref kref; 124 + /** 125 + * @topology_kref: refcount for this branch device's lifetime in the 126 + * topology, only the DP MST helpers should need to touch this 127 + */ 128 + struct kref topology_kref; 129 + 130 + /** 131 + * @malloc_kref: refcount for the memory allocation containing this 132 + * structure. See drm_dp_mst_get_mstb_malloc() and 133 + * drm_dp_mst_put_mstb_malloc(). 134 + */ 135 + struct kref malloc_kref; 136 + 134 137 u8 rad[8]; 135 138 u8 lct; 136 139 int num_ports; ··· 408 387 void (*register_connector)(struct drm_connector *connector); 409 388 void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, 410 389 struct drm_connector *connector); 411 - void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); 412 - 413 390 }; 414 391 415 392 #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) ··· 425 406 426 407 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base) 427 408 409 + struct drm_dp_vcpi_allocation { 410 + struct drm_dp_mst_port *port; 411 + int vcpi; 412 + struct list_head next; 413 + }; 414 + 428 415 struct drm_dp_mst_topology_state { 429 416 struct drm_private_state base; 430 - int avail_slots; 417 + struct list_head vcpis; 431 418 struct drm_dp_mst_topology_mgr *mgr; 432 419 }; 433 420 ··· 644 619 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); 645 620 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, 646 621 struct drm_dp_mst_topology_mgr *mgr); 647 - int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, 648 - struct drm_dp_mst_topology_mgr *mgr, 649 - struct drm_dp_mst_port *port, int pbn); 650 - int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, 651 - struct drm_dp_mst_topology_mgr *mgr, 652 - int slots); 622 + int __must_check 623 + drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, 624 + struct drm_dp_mst_topology_mgr *mgr, 625 + struct drm_dp_mst_port *port, int pbn); 626 + int __must_check 627 + drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, 628 + struct drm_dp_mst_topology_mgr *mgr, 629 + struct drm_dp_mst_port *port); 653 630 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, 654 631 struct drm_dp_mst_port *port, bool power_up); 632 + int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); 633 + 634 + void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); 635 + void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); 636 + 637 + extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs; 638 + 639 + /** 640 + * __drm_dp_mst_state_iter_get - private atomic state iterator function for 641 + * macro-internal use 642 + * @state: &struct drm_atomic_state pointer 643 + * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor 644 + * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state 645 + * iteration cursor 646 + * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state 647 + * iteration cursor 648 + * @i: int iteration cursor, for macro-internal use 649 + * 650 + * Used by for_each_oldnew_mst_mgr_in_state(), 651 + * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't 652 + * call this directly. 653 + * 654 + * Returns: 655 + * True if the current &struct drm_private_obj is a &struct 656 + * drm_dp_mst_topology_mgr, false otherwise. 657 + */ 658 + static inline bool 659 + __drm_dp_mst_state_iter_get(struct drm_atomic_state *state, 660 + struct drm_dp_mst_topology_mgr **mgr, 661 + struct drm_dp_mst_topology_state **old_state, 662 + struct drm_dp_mst_topology_state **new_state, 663 + int i) 664 + { 665 + struct __drm_private_objs_state *objs_state = &state->private_objs[i]; 666 + 667 + if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs) 668 + return false; 669 + 670 + *mgr = to_dp_mst_topology_mgr(objs_state->ptr); 671 + if (old_state) 672 + *old_state = to_dp_mst_topology_state(objs_state->old_state); 673 + if (new_state) 674 + *new_state = to_dp_mst_topology_state(objs_state->new_state); 675 + 676 + return true; 677 + } 678 + 679 + /** 680 + * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology 681 + * managers in an atomic update 682 + * @__state: &struct drm_atomic_state pointer 683 + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor 684 + * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old 685 + * state 686 + * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new 687 + * state 688 + * @__i: int iteration cursor, for macro-internal use 689 + * 690 + * This iterates over all DRM DP MST topology managers in an atomic update, 691 + * tracking both old and new state. This is useful in places where the state 692 + * delta needs to be considered, for example in atomic check functions. 693 + */ 694 + #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \ 695 + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ 696 + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i))) 697 + 698 + /** 699 + * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers 700 + * in an atomic update 701 + * @__state: &struct drm_atomic_state pointer 702 + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor 703 + * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old 704 + * state 705 + * @__i: int iteration cursor, for macro-internal use 706 + * 707 + * This iterates over all DRM DP MST topology managers in an atomic update, 708 + * tracking only the old state. This is useful in disable functions, where we 709 + * need the old state the hardware is still in. 710 + */ 711 + #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \ 712 + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ 713 + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i))) 714 + 715 + /** 716 + * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers 717 + * in an atomic update 718 + * @__state: &struct drm_atomic_state pointer 719 + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor 720 + * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new 721 + * state 722 + * @__i: int iteration cursor, for macro-internal use 723 + * 724 + * This iterates over all DRM DP MST topology managers in an atomic update, 725 + * tracking only the new state. This is useful in enable functions, where we 726 + * need the new state the hardware should be in when the atomic commit 727 + * operation has completed. 728 + */ 729 + #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \ 730 + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ 731 + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i))) 655 732 656 733 #endif
+4 -6
include/drm/drm_edid.h
··· 352 352 353 353 int 354 354 drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, 355 - const struct drm_display_mode *mode, 356 - bool is_hdmi2_sink); 355 + struct drm_connector *connector, 356 + const struct drm_display_mode *mode); 357 357 int 358 358 drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, 359 359 struct drm_connector *connector, 360 360 const struct drm_display_mode *mode); 361 361 void 362 362 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, 363 + struct drm_connector *connector, 363 364 const struct drm_display_mode *mode, 364 - enum hdmi_quantization_range rgb_quant_range, 365 - bool rgb_quant_range_selectable, 366 - bool is_hdmi2_sink); 365 + enum hdmi_quantization_range rgb_quant_range); 367 366 368 367 /** 369 368 * drm_eld_mnl - Get ELD monitor name length in bytes. ··· 470 471 enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code); 471 472 bool drm_detect_hdmi_monitor(struct edid *edid); 472 473 bool drm_detect_monitor_audio(struct edid *edid); 473 - bool drm_rgb_quant_range_selectable(struct edid *edid); 474 474 enum hdmi_quantization_range 475 475 drm_default_rgb_quant_range(const struct drm_display_mode *mode); 476 476 int drm_add_modes_noedid(struct drm_connector *connector,
-1
include/drm/drm_encoder_slave.h
··· 27 27 #ifndef __DRM_ENCODER_SLAVE_H__ 28 28 #define __DRM_ENCODER_SLAVE_H__ 29 29 30 - #include <drm/drmP.h> 31 30 #include <drm/drm_crtc.h> 32 31 #include <drm/drm_encoder.h> 33 32
+7 -3
include/drm/drm_framebuffer.h
··· 23 23 #ifndef __DRM_FRAMEBUFFER_H__ 24 24 #define __DRM_FRAMEBUFFER_H__ 25 25 26 - #include <linux/list.h> 27 26 #include <linux/ctype.h> 27 + #include <linux/list.h> 28 + #include <linux/sched.h> 29 + 28 30 #include <drm/drm_mode_object.h> 29 31 30 - struct drm_framebuffer; 31 - struct drm_file; 32 + struct drm_clip_rect; 32 33 struct drm_device; 34 + struct drm_file; 35 + struct drm_framebuffer; 36 + struct drm_gem_object; 33 37 34 38 /** 35 39 * struct drm_framebuffer_funcs - framebuffer hooks
+4 -1
include/drm/drm_gem_cma_helper.h
··· 2 2 #ifndef __DRM_GEM_CMA_HELPER_H__ 3 3 #define __DRM_GEM_CMA_HELPER_H__ 4 4 5 - #include <drm/drmP.h> 5 + #include <drm/drm_file.h> 6 + #include <drm/drm_ioctl.h> 6 7 #include <drm/drm_gem.h> 8 + 9 + struct drm_mode_create_dumb; 7 10 8 11 /** 9 12 * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
+16 -7
include/drm/drm_mode_config.h
··· 391 391 /** 392 392 * @idr_mutex: 393 393 * 394 - * Mutex for KMS ID allocation and management. Protects both @crtc_idr 394 + * Mutex for KMS ID allocation and management. Protects both @object_idr 395 395 * and @tile_idr. 396 396 */ 397 397 struct mutex idr_mutex; 398 398 399 399 /** 400 - * @crtc_idr: 400 + * @object_idr: 401 401 * 402 402 * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc, 403 403 * connector, modes - just makes life easier to have only one. 404 404 */ 405 - struct idr crtc_idr; 405 + struct idr object_idr; 406 406 407 407 /** 408 408 * @tile_idr: ··· 511 511 * locks. 512 512 */ 513 513 struct list_head property_list; 514 + 515 + /** 516 + * @privobj_list: 517 + * 518 + * List of private objects linked with &drm_private_obj.head. This is 519 + * invariant over the lifetime of a device and hence doesn't need any 520 + * locks. 521 + */ 522 + struct list_head privobj_list; 514 523 515 524 int min_width, min_height; 516 525 int max_width, max_height; ··· 697 688 struct drm_property *tv_mode_property; 698 689 /** 699 690 * @tv_left_margin_property: Optional TV property to set the left 700 - * margin. 691 + * margin (expressed in pixels). 701 692 */ 702 693 struct drm_property *tv_left_margin_property; 703 694 /** 704 695 * @tv_right_margin_property: Optional TV property to set the right 705 - * margin. 696 + * margin (expressed in pixels). 706 697 */ 707 698 struct drm_property *tv_right_margin_property; 708 699 /** 709 700 * @tv_top_margin_property: Optional TV property to set the right 710 - * margin. 701 + * margin (expressed in pixels). 711 702 */ 712 703 struct drm_property *tv_top_margin_property; 713 704 /** 714 705 * @tv_bottom_margin_property: Optional TV property to set the right 715 - * margin. 706 + * margin (expressed in pixels). 716 707 */ 717 708 struct drm_property *tv_bottom_margin_property; 718 709 /**
+3 -18
include/drm/drm_modes.h
··· 136 136 .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ 137 137 .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ 138 138 .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ 139 - .vscan = (vs), .flags = (f), \ 140 - .base.type = DRM_MODE_OBJECT_MODE 139 + .vscan = (vs), .flags = (f) 141 140 142 141 #define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */ 143 142 #define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */ ··· 211 212 * struct list_head for mode lists. 212 213 */ 213 214 struct list_head head; 214 - 215 - /** 216 - * @base: 217 - * 218 - * A display mode is a normal modeset object, possibly including public 219 - * userspace id. 220 - * 221 - * FIXME: 222 - * 223 - * This can probably be removed since the entire concept of userspace 224 - * managing modes explicitly has never landed in upstream kernel mode 225 - * setting support. 226 - */ 227 - struct drm_mode_object base; 228 215 229 216 /** 230 217 * @name: ··· 414 429 /** 415 430 * DRM_MODE_FMT - printf string for &struct drm_display_mode 416 431 */ 417 - #define DRM_MODE_FMT "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x" 432 + #define DRM_MODE_FMT "\"%s\": %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x" 418 433 419 434 /** 420 435 * DRM_MODE_ARG - printf arguments for &struct drm_display_mode 421 436 * @m: display mode 422 437 */ 423 438 #define DRM_MODE_ARG(m) \ 424 - (m)->base.id, (m)->name, (m)->vrefresh, (m)->clock, \ 439 + (m)->name, (m)->vrefresh, (m)->clock, \ 425 440 (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \ 426 441 (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \ 427 442 (m)->type, (m)->flags
-21
include/drm/drm_syncobj.h
··· 30 30 31 31 struct drm_file; 32 32 33 - struct drm_syncobj_cb; 34 - 35 33 /** 36 34 * struct drm_syncobj - sync object. 37 35 * ··· 60 62 * @file: A file backing for this syncobj. 61 63 */ 62 64 struct file *file; 63 - }; 64 - 65 - typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj, 66 - struct drm_syncobj_cb *cb); 67 - 68 - /** 69 - * struct drm_syncobj_cb - callback for drm_syncobj_add_callback 70 - * @node: used by drm_syncob_add_callback to append this struct to 71 - * &drm_syncobj.cb_list 72 - * @func: drm_syncobj_func_t to call 73 - * 74 - * This struct will be initialized by drm_syncobj_add_callback, additional 75 - * data can be passed along by embedding drm_syncobj_cb in another struct. 76 - * The callback will get called the next time drm_syncobj_replace_fence is 77 - * called. 78 - */ 79 - struct drm_syncobj_cb { 80 - struct list_head node; 81 - drm_syncobj_func_t func; 82 65 }; 83 66 84 67 void drm_syncobj_free(struct kref *kref);
+52 -1
include/drm/drm_util.h
··· 26 26 #ifndef _DRM_UTIL_H_ 27 27 #define _DRM_UTIL_H_ 28 28 29 - /* helper for handling conditionals in various for_each macros */ 29 + /** 30 + * DOC: drm utils 31 + * 32 + * Macros and inline functions that does not naturally belong in other places 33 + */ 34 + 35 + #include <linux/interrupt.h> 36 + #include <linux/kgdb.h> 37 + #include <linux/preempt.h> 38 + #include <linux/smp.h> 39 + 40 + /* 41 + * Use EXPORT_SYMBOL_FOR_TESTS_ONLY() for functions that shall 42 + * only be visible for drmselftests. 43 + */ 44 + #if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE) 45 + #define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x) 46 + #else 47 + #define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) 48 + #endif 49 + 50 + /** 51 + * for_each_if - helper for handling conditionals in various for_each macros 52 + * @condition: The condition to check 53 + * 54 + * Typical use:: 55 + * 56 + * #define for_each_foo_bar(x, y) \' 57 + * list_for_each_entry(x, y->list, head) \' 58 + * for_each_if(x->something == SOMETHING) 59 + * 60 + * The for_each_if() macro makes the use of for_each_foo_bar() less error 61 + * prone. 62 + */ 30 63 #define for_each_if(condition) if (!(condition)) {} else 64 + 65 + /** 66 + * drm_can_sleep - returns true if currently okay to sleep 67 + * 68 + * This function shall not be used in new code. 69 + * The check for running in atomic context may not work - see linux/preempt.h. 70 + * 71 + * FIXME: All users of drm_can_sleep should be removed (see todo.rst) 72 + * 73 + * Returns: 74 + * True if kgdb is active or we are in an atomic context or irqs are disabled 75 + */ 76 + static inline bool drm_can_sleep(void) 77 + { 78 + if (in_atomic() || in_dbg_master() || irqs_disabled()) 79 + return false; 80 + return true; 81 + } 31 82 32 83 #endif
+22
include/drm/drm_vblank.h
··· 129 129 */ 130 130 u32 last; 131 131 /** 132 + * @max_vblank_count: 133 + * 134 + * Maximum value of the vblank registers for this crtc. This value +1 135 + * will result in a wrap-around of the vblank register. It is used 136 + * by the vblank core to handle wrap-arounds. 137 + * 138 + * If set to zero the vblank core will try to guess the elapsed vblanks 139 + * between times when the vblank interrupt is disabled through 140 + * high-precision timestamps. That approach is suffering from small 141 + * races and imprecision over longer time periods, hence exposing a 142 + * hardware vblank counter is always recommended. 143 + * 144 + * This is the runtime configurable per-crtc maximum set through 145 + * drm_crtc_set_max_vblank_count(). If this is used the driver 146 + * must leave the device wide &drm_device.max_vblank_count at zero. 147 + * 148 + * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set. 149 + */ 150 + u32 max_vblank_count; 151 + /** 132 152 * @inmodeset: Tracks whether the vblank is disabled due to a modeset. 133 153 * For legacy driver bit 2 additionally tracks whether an additional 134 154 * temporary vblank reference has been acquired to paper over the ··· 226 206 void drm_calc_timestamping_constants(struct drm_crtc *crtc, 227 207 const struct drm_display_mode *mode); 228 208 wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc); 209 + void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, 210 + u32 max_vblank_count); 229 211 #endif
+15 -7
include/linux/dma-fence.h
··· 77 77 struct list_head cb_list; 78 78 spinlock_t *lock; 79 79 u64 context; 80 - unsigned seqno; 80 + u64 seqno; 81 81 unsigned long flags; 82 82 ktime_t timestamp; 83 83 int error; ··· 244 244 }; 245 245 246 246 void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, 247 - spinlock_t *lock, u64 context, unsigned seqno); 247 + spinlock_t *lock, u64 context, u64 seqno); 248 248 249 249 void dma_fence_release(struct kref *kref); 250 250 void dma_fence_free(struct dma_fence *fence); ··· 414 414 * Returns true if f1 is chronologically later than f2. Both fences must be 415 415 * from the same context, since a seqno is not common across contexts. 416 416 */ 417 - static inline bool __dma_fence_is_later(u32 f1, u32 f2) 417 + static inline bool __dma_fence_is_later(u64 f1, u64 f2) 418 418 { 419 - return (int)(f1 - f2) > 0; 419 + /* This is for backward compatibility with drivers which can only handle 420 + * 32bit sequence numbers. Use a 64bit compare when any of the higher 421 + * bits are none zero, otherwise use a 32bit compare with wrap around 422 + * handling. 423 + */ 424 + if (upper_32_bits(f1) || upper_32_bits(f2)) 425 + return f1 > f2; 426 + 427 + return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; 420 428 } 421 429 422 430 /** ··· 556 548 do { \ 557 549 struct dma_fence *__ff = (f); \ 558 550 if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ 559 - pr_info("f %llu#%u: " fmt, \ 551 + pr_info("f %llu#%llu: " fmt, \ 560 552 __ff->context, __ff->seqno, ##args); \ 561 553 } while (0) 562 554 563 555 #define DMA_FENCE_WARN(f, fmt, args...) \ 564 556 do { \ 565 557 struct dma_fence *__ff = (f); \ 566 - pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ 558 + pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\ 567 559 ##args); \ 568 560 } while (0) 569 561 570 562 #define DMA_FENCE_ERR(f, fmt, args...) \ 571 563 do { \ 572 564 struct dma_fence *__ff = (f); \ 573 - pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ 565 + pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \ 574 566 ##args); \ 575 567 } while (0) 576 568
+23
include/uapi/drm/drm_fourcc.h
··· 581 581 * Indicates the superblock size(s) used for the AFBC buffer. The buffer 582 582 * size (in pixels) must be aligned to a multiple of the superblock size. 583 583 * Four lowest significant bits(LSBs) are reserved for block size. 584 + * 585 + * Where one superblock size is specified, it applies to all planes of the 586 + * buffer (e.g. 16x16, 32x8). When multiple superblock sizes are specified, 587 + * the first applies to the Luma plane and the second applies to the Chroma 588 + * plane(s). e.g. (32x8_64x4 means 32x8 Luma, with 64x4 Chroma). 589 + * Multiple superblock sizes are only valid for multi-plane YCbCr formats. 584 590 */ 585 591 #define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK 0xf 586 592 #define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 (1ULL) 587 593 #define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 (2ULL) 594 + #define AFBC_FORMAT_MOD_BLOCK_SIZE_64x4 (3ULL) 595 + #define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4 (4ULL) 588 596 589 597 /* 590 598 * AFBC lossless colorspace transform ··· 651 643 * can be reduced if a whole superblock is a single color. 652 644 */ 653 645 #define AFBC_FORMAT_MOD_SC (1ULL << 9) 646 + 647 + /* 648 + * AFBC double-buffer 649 + * 650 + * Indicates that the buffer is allocated in a layout safe for front-buffer 651 + * rendering. 652 + */ 653 + #define AFBC_FORMAT_MOD_DB (1ULL << 10) 654 + 655 + /* 656 + * AFBC buffer content hints 657 + * 658 + * Indicates that the buffer includes per-superblock content hints. 659 + */ 660 + #define AFBC_FORMAT_MOD_BCH (1ULL << 11) 654 661 655 662 #if defined(__cplusplus) 656 663 }
+8
include/uapi/drm/v3d_drm.h
··· 52 52 * 53 53 * This asks the kernel to have the GPU execute an optional binner 54 54 * command list, and a render command list. 55 + * 56 + * The L1T, slice, L2C, L2T, and GCA caches will be flushed before 57 + * each CL executes. The VCD cache should be flushed (if necessary) 58 + * by the submitted CLs. The TLB writes are guaranteed to have been 59 + * flushed by the time the render done IRQ happens, which is the 60 + * trigger for out_sync. Any dirtying of cachelines by the job (only 61 + * possible using TMU writes) must be flushed by the caller using the 62 + * CL's cache flush commands. 55 63 */ 56 64 struct drm_v3d_submit_cl { 57 65 /* Pointer to the binner command list.