Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2017-10-20' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

Final drm-misc feature pull for 4.15:

UAPI Changes:
- new madvise ioctl for vc4 (Boris)

Core Changes:
- plane commit tracking fixes (Maarten)
- vgaarb improvements for fancy new platforms (aka ppc64 and arm64) by
Bjorn Helgaas

Driver Changes:
- pile of new panel drivers: Toshiba LT089AC19000, Innolux AT043TN24
- more sun4i work to support A10/A20 Tcon and hdmi outputs
- vc4: fix sleep in irq handler by making it threaded (Eric)
- udl probe/edid read fixes (Robert Tarasov)

And a bunch of misc small cleanups/refactors and doc fixes all over.

* tag 'drm-misc-next-2017-10-20' of git://anongit.freedesktop.org/drm/drm-misc: (32 commits)
drm/vc4: Fix sleeps during the IRQ handler for DSI transactions.
drm/vc4: Add the DRM_IOCTL_VC4_GEM_MADVISE ioctl
drm/panel: simple: add Toshiba LT089AC19000
dma-fence: remove duplicate word in comment
drm/panel: simple: add delays for Innolux AT043TN24
drm/panel: simple: add bus flags for Innolux AT043TN24
drm/panel: simple: fix vertical timings for Innolux AT043TN24
drm/atomic-helper: check that drivers call drm_crtc_vblank_off
drm: some KMS todo ideas
vgaarb: Factor out EFI and fallback default device selection
vgaarb: Select a default VGA device even if there's no legacy VGA
drm/bridge: adv7511: Fix a use after free
drm/sun4i: Add support for A20 display pipeline components
drm/sun4i: Add support for A10 display pipeline components
drm/sun4i: hdmi: Support HDMI controller on A10
drm/sun4i: tcon: Add support for A10 TCON
drm/sun4i: backend: Support output muxing
drm/sun4i: tcon: Move out the tcon0 common setup
drm/sun4i: tcon: Don't rely on encoders to set the TCON mode
drm/sun4i: tcon: Don't rely on encoders to enable the TCON
...

+1160 -362
+8
Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt
··· 1 + Toshiba 8.9" WXGA (1280x768) TFT LCD panel 2 + 3 + Required properties: 4 + - compatible: should be "toshiba,lt089ac29000.txt" 5 + - power-supply: as specified in the base binding 6 + 7 + This binding is compatible with the simple-panel binding, which is specified 8 + in simple-panel.txt in this directory.
+9
Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
··· 40 40 41 41 Required properties: 42 42 - compatible: value must be one of: 43 + * allwinner,sun4i-a10-hdmi 43 44 * allwinner,sun5i-a10s-hdmi 44 45 * allwinner,sun6i-a31-hdmi 45 46 - reg: base address and size of memory-mapped region ··· 87 86 88 87 Required properties: 89 88 - compatible: value must be either: 89 + * allwinner,sun4i-a10-tcon 90 90 * allwinner,sun5i-a13-tcon 91 91 * allwinner,sun6i-a31-tcon 92 92 * allwinner,sun6i-a31s-tcon 93 + * allwinner,sun7i-a20-tcon 93 94 * allwinner,sun8i-a33-tcon 94 95 * allwinner,sun8i-v3s-tcon 95 96 - reg: base address and size of memory-mapped region ··· 156 153 157 154 Required properties: 158 155 - compatible: value must be one of: 156 + * allwinner,sun4i-a10-display-backend 159 157 * allwinner,sun5i-a13-display-backend 160 158 * allwinner,sun6i-a31-display-backend 159 + * allwinner,sun7i-a20-display-backend 161 160 * allwinner,sun8i-a33-display-backend 162 161 - reg: base address and size of the memory-mapped region. 163 162 - interrupts: interrupt associated to this IP ··· 190 185 191 186 Required properties: 192 187 - compatible: value must be one of: 188 + * allwinner,sun4i-a10-display-frontend 193 189 * allwinner,sun5i-a13-display-frontend 194 190 * allwinner,sun6i-a31-display-frontend 191 + * allwinner,sun7i-a20-display-frontend 195 192 * allwinner,sun8i-a33-display-frontend 196 193 - reg: base address and size of the memory-mapped region. 197 194 - interrupts: interrupt associated to this IP ··· 238 231 239 232 Required properties: 240 233 - compatible: value must be one of: 234 + * allwinner,sun4i-a10-display-engine 241 235 * allwinner,sun5i-a10s-display-engine 242 236 * allwinner,sun5i-a13-display-engine 243 237 * allwinner,sun6i-a31-display-engine 244 238 * allwinner,sun6i-a31s-display-engine 239 + * allwinner,sun7i-a20-display-engine 245 240 * allwinner,sun8i-a33-display-engine 246 241 * allwinner,sun8i-v3s-display-engine 247 242
+12
Documentation/gpu/todo.rst
··· 304 304 305 305 Contact: Daniel Vetter 306 306 307 + KMS cleanups 308 + ------------ 309 + 310 + Some of these date from the very introduction of KMS in 2008 ... 311 + 312 + - drm_mode_config.crtc_idr is misnamed, since it contains all KMS object. Should 313 + be renamed to drm_mode_config.object_idr. 314 + 315 + - drm_display_mode doesn't need to be derived from drm_mode_object. That's 316 + leftovers from older (never merged into upstream) KMS designs where modes 317 + where set using their ID, including support to add/remove modes. 318 + 307 319 Better Testing 308 320 ============== 309 321
-12
arch/powerpc/kernel/pci-common.c
··· 1740 1740 } 1741 1741 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); 1742 1742 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); 1743 - 1744 - static void fixup_vga(struct pci_dev *pdev) 1745 - { 1746 - u16 cmd; 1747 - 1748 - pci_read_config_word(pdev, PCI_COMMAND, &cmd); 1749 - if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device()) 1750 - vga_set_default_device(pdev); 1751 - 1752 - } 1753 - DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, 1754 - PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
+11 -11
drivers/gpu/drm/armada/armada_crtc.c
··· 298 298 299 299 if (force) { 300 300 /* Display is disabled, so just drop the old fb */ 301 - drm_framebuffer_unreference(fb); 301 + drm_framebuffer_put(fb); 302 302 return; 303 303 } 304 304 ··· 321 321 * the best. The worst that will happen is the buffer gets 322 322 * reused before it has finished being displayed. 323 323 */ 324 - drm_framebuffer_unreference(fb); 324 + drm_framebuffer_put(fb); 325 325 } 326 326 327 327 static void armada_drm_vblank_off(struct armada_crtc *dcrtc) ··· 577 577 unsigned i; 578 578 bool interlaced; 579 579 580 - drm_framebuffer_reference(crtc->primary->fb); 580 + drm_framebuffer_get(crtc->primary->fb); 581 581 582 582 interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE); 583 583 ··· 718 718 MAX_SCHEDULE_TIMEOUT); 719 719 720 720 /* Take a reference to the new fb as we're using it */ 721 - drm_framebuffer_reference(crtc->primary->fb); 721 + drm_framebuffer_get(crtc->primary->fb); 722 722 723 723 /* Update the base in the CRTC */ 724 724 armada_drm_crtc_update_regs(dcrtc, regs); ··· 742 742 * primary plane. 743 743 */ 744 744 if (plane->fb) 745 - drm_framebuffer_unreference(plane->fb); 745 + drm_framebuffer_put(plane->fb); 746 746 747 747 /* Power down the Y/U/V FIFOs */ 748 748 sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66; ··· 947 947 948 948 /* Must be a kernel-mapped object */ 949 949 if (!obj->addr) { 950 - drm_gem_object_unreference_unlocked(&obj->obj); 950 + drm_gem_object_put_unlocked(&obj->obj); 951 951 return -EINVAL; 952 952 } 953 953 954 954 if (obj->obj.size < w * h * 4) { 955 955 DRM_ERROR("buffer is too small\n"); 956 - drm_gem_object_unreference_unlocked(&obj->obj); 956 + drm_gem_object_put_unlocked(&obj->obj); 957 957 return -ENOMEM; 958 958 } 959 959 } ··· 961 961 if (dcrtc->cursor_obj) { 962 962 dcrtc->cursor_obj->update = NULL; 963 963 dcrtc->cursor_obj->update_data = NULL; 964 - drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj); 964 + drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj); 965 965 } 966 966 dcrtc->cursor_obj = obj; 967 967 dcrtc->cursor_w = w; ··· 997 997 struct armada_private *priv = crtc->dev->dev_private; 998 998 999 999 if (dcrtc->cursor_obj) 1000 - drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj); 1000 + drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj); 1001 1001 1002 1002 priv->dcrtc[dcrtc->num] = NULL; 1003 1003 drm_crtc_cleanup(&dcrtc->crtc); ··· 1045 1045 * Ensure that we hold a reference on the new framebuffer. 1046 1046 * This has to match the behaviour in mode_set. 1047 1047 */ 1048 - drm_framebuffer_reference(fb); 1048 + drm_framebuffer_get(fb); 1049 1049 1050 1050 ret = armada_drm_crtc_queue_frame_work(dcrtc, work); 1051 1051 if (ret) { 1052 1052 /* Undo our reference above */ 1053 - drm_framebuffer_unreference(fb); 1053 + drm_framebuffer_put(fb); 1054 1054 kfree(work); 1055 1055 return ret; 1056 1056 }
+1 -1
drivers/gpu/drm/armada/armada_drv.c
··· 25 25 struct drm_framebuffer *fb; 26 26 27 27 while (kfifo_get(&priv->fb_unref, &fb)) 28 - drm_framebuffer_unreference(fb); 28 + drm_framebuffer_put(fb); 29 29 } 30 30 31 31 /* Must be called with dev->event_lock held */
+4 -4
drivers/gpu/drm/armada/armada_fb.c
··· 17 17 struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb); 18 18 19 19 drm_framebuffer_cleanup(&dfb->fb); 20 - drm_gem_object_unreference_unlocked(&dfb->obj->obj); 20 + drm_gem_object_put_unlocked(&dfb->obj->obj); 21 21 kfree(dfb); 22 22 } 23 23 ··· 94 94 * the above call, but the caller will drop their reference 95 95 * to it. Hence we need to take our own reference. 96 96 */ 97 - drm_gem_object_reference(&obj->obj); 97 + drm_gem_object_get(&obj->obj); 98 98 99 99 return dfb; 100 100 } ··· 143 143 goto err; 144 144 } 145 145 146 - drm_gem_object_unreference_unlocked(&obj->obj); 146 + drm_gem_object_put_unlocked(&obj->obj); 147 147 148 148 return &dfb->fb; 149 149 150 150 err_unref: 151 - drm_gem_object_unreference_unlocked(&obj->obj); 151 + drm_gem_object_put_unlocked(&obj->obj); 152 152 err: 153 153 DRM_ERROR("failed to initialize framebuffer: %d\n", ret); 154 154 return ERR_PTR(ret);
+3 -3
drivers/gpu/drm/armada/armada_fbdev.c
··· 51 51 52 52 ret = armada_gem_linear_back(dev, obj); 53 53 if (ret) { 54 - drm_gem_object_unreference_unlocked(&obj->obj); 54 + drm_gem_object_put_unlocked(&obj->obj); 55 55 return ret; 56 56 } 57 57 58 58 ptr = armada_gem_map_object(dev, obj); 59 59 if (!ptr) { 60 - drm_gem_object_unreference_unlocked(&obj->obj); 60 + drm_gem_object_put_unlocked(&obj->obj); 61 61 return -ENOMEM; 62 62 } 63 63 ··· 67 67 * A reference is now held by the framebuffer object if 68 68 * successful, otherwise this drops the ref for the error path. 69 69 */ 70 - drm_gem_object_unreference_unlocked(&obj->obj); 70 + drm_gem_object_put_unlocked(&obj->obj); 71 71 72 72 if (IS_ERR(dfb)) 73 73 return PTR_ERR(dfb);
+6 -6
drivers/gpu/drm/armada/armada_gem.c
··· 265 265 /* drop reference from allocate - handle holds it now */ 266 266 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle); 267 267 err: 268 - drm_gem_object_unreference_unlocked(&dobj->obj); 268 + drm_gem_object_put_unlocked(&dobj->obj); 269 269 return ret; 270 270 } 271 271 ··· 297 297 /* drop reference from allocate - handle holds it now */ 298 298 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle); 299 299 err: 300 - drm_gem_object_unreference_unlocked(&dobj->obj); 300 + drm_gem_object_put_unlocked(&dobj->obj); 301 301 return ret; 302 302 } 303 303 ··· 314 314 return -ENOENT; 315 315 316 316 if (!dobj->obj.filp) { 317 - drm_gem_object_unreference_unlocked(&dobj->obj); 317 + drm_gem_object_put_unlocked(&dobj->obj); 318 318 return -EINVAL; 319 319 } 320 320 321 321 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE, 322 322 MAP_SHARED, args->offset); 323 - drm_gem_object_unreference_unlocked(&dobj->obj); 323 + drm_gem_object_put_unlocked(&dobj->obj); 324 324 if (IS_ERR_VALUE(addr)) 325 325 return addr; 326 326 ··· 375 375 } 376 376 377 377 unref: 378 - drm_gem_object_unreference_unlocked(&dobj->obj); 378 + drm_gem_object_put_unlocked(&dobj->obj); 379 379 return ret; 380 380 } 381 381 ··· 524 524 * Importing our own dmabuf(s) increases the 525 525 * refcount on the gem object itself. 526 526 */ 527 - drm_gem_object_reference(obj); 527 + drm_gem_object_get(obj); 528 528 return obj; 529 529 } 530 530 }
+2 -2
drivers/gpu/drm/armada/armada_overlay.c
··· 177 177 * Take a reference on the new framebuffer - we want to 178 178 * hold on to it while the hardware is displaying it. 179 179 */ 180 - drm_framebuffer_reference(fb); 180 + drm_framebuffer_get(fb); 181 181 182 182 if (plane->fb) 183 183 armada_ovl_retire_fb(dplane, plane->fb); ··· 278 278 279 279 fb = xchg(&dplane->old_fb, NULL); 280 280 if (fb) 281 - drm_framebuffer_unreference(fb); 281 + drm_framebuffer_put(fb); 282 282 283 283 return 0; 284 284 }
+2 -2
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
··· 607 607 adv7511_set_config_csc(adv7511, connector, adv7511->rgb, 608 608 drm_detect_hdmi_monitor(edid)); 609 609 610 - kfree(edid); 611 - 612 610 cec_s_phys_addr_from_edid(adv7511->cec_adap, edid); 611 + 612 + kfree(edid); 613 613 614 614 return count; 615 615 }
+18 -10
drivers/gpu/drm/drm_atomic_helper.c
··· 860 860 861 861 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 862 862 const struct drm_crtc_helper_funcs *funcs; 863 + int ret; 863 864 864 865 /* Shut down everything that needs a full modeset. */ 865 866 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) ··· 884 883 funcs->disable(crtc); 885 884 else 886 885 funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 886 + 887 + if (!(dev->irq_enabled && dev->num_crtcs)) 888 + continue; 889 + 890 + ret = drm_crtc_vblank_get(crtc); 891 + WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n"); 892 + if (ret == 0) 893 + drm_crtc_vblank_put(crtc); 887 894 } 888 895 } 889 896 ··· 1781 1772 } 1782 1773 1783 1774 for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) { 1784 - /* commit tracked through new_crtc_state->commit, no need to do it explicitly */ 1785 - if (new_conn_state->crtc) 1786 - continue; 1787 - 1788 1775 /* Userspace is not allowed to get ahead of the previous 1789 1776 * commit with nonblocking ones. */ 1790 1777 if (nonblock && old_conn_state->commit && 1791 1778 !try_wait_for_completion(&old_conn_state->commit->flip_done)) 1792 1779 return -EBUSY; 1780 + 1781 + /* commit tracked through new_crtc_state->commit, no need to do it explicitly */ 1782 + if (new_conn_state->crtc) 1783 + continue; 1793 1784 1794 1785 commit = crtc_or_fake_commit(state, old_conn_state->crtc); 1795 1786 if (!commit) ··· 1799 1790 } 1800 1791 1801 1792 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 1802 - /* 1803 - * Unlike connectors, always track planes explicitly for 1804 - * async pageflip support. 1805 - */ 1806 - 1807 1793 /* Userspace is not allowed to get ahead of the previous 1808 1794 * commit with nonblocking ones. */ 1809 1795 if (nonblock && old_plane_state->commit && 1810 1796 !try_wait_for_completion(&old_plane_state->commit->flip_done)) 1811 1797 return -EBUSY; 1812 1798 1813 - commit = crtc_or_fake_commit(state, old_plane_state->crtc); 1799 + /* 1800 + * Unlike connectors, always track planes explicitly for 1801 + * async pageflip support. 1802 + */ 1803 + commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc); 1814 1804 if (!commit) 1815 1805 return -ENOMEM; 1816 1806
+1 -1
drivers/gpu/drm/drm_gem_cma_helper.c
··· 112 112 cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr, 113 113 GFP_KERNEL | __GFP_NOWARN); 114 114 if (!cma_obj->vaddr) { 115 - dev_err(drm->dev, "failed to allocate buffer with size %zu\n", 115 + dev_dbg(drm->dev, "failed to allocate buffer with size %zu\n", 116 116 size); 117 117 ret = -ENOMEM; 118 118 goto error;
+34 -2
drivers/gpu/drm/panel/panel-simple.c
··· 1008 1008 .width = 195, 1009 1009 .height = 117, 1010 1010 }, 1011 + .delay = { 1012 + .enable = 160, 1013 + .disable = 160, 1014 + }, 1011 1015 }; 1012 1016 1013 1017 static const struct drm_display_mode innolux_at043tn24_mode = { ··· 1022 1018 .htotal = 480 + 2 + 41 + 2, 1023 1019 .vdisplay = 272, 1024 1020 .vsync_start = 272 + 2, 1025 - .vsync_end = 272 + 2 + 11, 1026 - .vtotal = 272 + 2 + 11 + 2, 1021 + .vsync_end = 272 + 2 + 10, 1022 + .vtotal = 272 + 2 + 10 + 2, 1027 1023 .vrefresh = 60, 1028 1024 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 1029 1025 }; ··· 1037 1033 .height = 54, 1038 1034 }, 1039 1035 .bus_format = MEDIA_BUS_FMT_RGB888_1X24, 1036 + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, 1040 1037 }; 1041 1038 1042 1039 static const struct drm_display_mode innolux_at070tn92_mode = { ··· 1837 1832 .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 1838 1833 }; 1839 1834 1835 + static const struct drm_display_mode toshiba_lt089ac29000_mode = { 1836 + .clock = 79500, 1837 + .hdisplay = 1280, 1838 + .hsync_start = 1280 + 192, 1839 + .hsync_end = 1280 + 192 + 128, 1840 + .htotal = 1280 + 192 + 128 + 64, 1841 + .vdisplay = 768, 1842 + .vsync_start = 768 + 20, 1843 + .vsync_end = 768 + 20 + 7, 1844 + .vtotal = 768 + 20 + 7 + 3, 1845 + .vrefresh = 60, 1846 + }; 1847 + 1848 + static const struct panel_desc toshiba_lt089ac29000 = { 1849 + .modes = &toshiba_lt089ac29000_mode, 1850 + .num_modes = 1, 1851 + .size = { 1852 + .width = 194, 1853 + .height = 116, 1854 + }, 1855 + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, 1856 + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, 1857 + }; 1858 + 1840 1859 static const struct drm_display_mode tpk_f07a_0102_mode = { 1841 1860 .clock = 33260, 1842 1861 .hdisplay = 800, ··· 2142 2113 }, { 2143 2114 .compatible = "tianma,tm070jdhg30", 2144 2115 .data = &tianma_tm070jdhg30, 2116 + }, { 2117 + .compatible = "toshiba,lt089ac29000", 2118 + .data = &toshiba_lt089ac29000, 2145 2119 }, { 2146 2120 .compatible = "tpk,f07a-0102", 2147 2121 .data = &tpk_f07a_0102,
+17 -16
drivers/gpu/drm/sun4i/Makefile
··· 1 - sun4i-drm-y += sun4i_drv.o 2 - sun4i-drm-y += sun4i_framebuffer.o 1 + sun4i-backend-y += sun4i_backend.o sun4i_layer.o 3 2 4 - sun4i-drm-hdmi-y += sun4i_hdmi_enc.o 5 - sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o 6 - sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o 7 - sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o 3 + sun4i-drm-y += sun4i_drv.o 4 + sun4i-drm-y += sun4i_framebuffer.o 8 5 9 - sun4i-tcon-y += sun4i_tcon.o 10 - sun4i-tcon-y += sun4i_rgb.o 11 - sun4i-tcon-y += sun4i_dotclock.o 12 - sun4i-tcon-y += sun4i_crtc.o 6 + sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o 7 + sun4i-drm-hdmi-y += sun4i_hdmi_enc.o 8 + sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o 9 + sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o 13 10 14 - sun4i-backend-y += sun4i_backend.o sun4i_layer.o 11 + sun8i-mixer-y += sun8i_mixer.o sun8i_layer.o 15 12 16 - sun8i-mixer-y += sun8i_mixer.o sun8i_layer.o 13 + sun4i-tcon-y += sun4i_crtc.o 14 + sun4i-tcon-y += sun4i_dotclock.o 15 + sun4i-tcon-y += sun4i_tcon.o 16 + sun4i-tcon-y += sun4i_rgb.o 17 17 18 - obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o sun4i-tcon.o 19 - obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o 18 + obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o 19 + obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o 20 20 obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o 21 + obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o 21 22 22 - obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o 23 + obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o 23 24 obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o 24 - obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o 25 + obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o
+72 -3
drivers/gpu/drm/sun4i/sun4i_backend.c
··· 20 20 21 21 #include <linux/component.h> 22 22 #include <linux/list.h> 23 + #include <linux/of_device.h> 23 24 #include <linux/of_graph.h> 24 25 #include <linux/reset.h> 25 26 ··· 28 27 #include "sun4i_drv.h" 29 28 #include "sun4i_layer.h" 30 29 #include "sunxi_engine.h" 30 + 31 + struct sun4i_backend_quirks { 32 + /* backend <-> TCON muxing selection done in backend */ 33 + bool needs_output_muxing; 34 + }; 31 35 32 36 static const u32 sunxi_rgb2yuv_coef[12] = { 33 37 0x00000107, 0x00000204, 0x00000064, 0x00000108, ··· 222 216 paddr = drm_fb_cma_get_gem_addr(fb, state, 0); 223 217 DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr); 224 218 219 + /* 220 + * backend DMA accesses DRAM directly, bypassing the system 221 + * bus. As such, the address range is different and the buffer 222 + * address needs to be corrected. 223 + */ 224 + paddr -= PHYS_OFFSET; 225 + 225 226 /* Write the 32 lower bits of the address (in bits) */ 226 227 lo_paddr = paddr << 3; 227 228 DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr); ··· 351 338 struct drm_device *drm = data; 352 339 struct sun4i_drv *drv = drm->dev_private; 353 340 struct sun4i_backend *backend; 341 + const struct sun4i_backend_quirks *quirks; 354 342 struct resource *res; 355 343 void __iomem *regs; 356 344 int i, ret; ··· 446 432 SUN4I_BACKEND_MODCTL_DEBE_EN | 447 433 SUN4I_BACKEND_MODCTL_START_CTL); 448 434 435 + /* Set output selection if needed */ 436 + quirks = of_device_get_match_data(dev); 437 + if (quirks->needs_output_muxing) { 438 + /* 439 + * We assume there is no dynamic muxing of backends 440 + * and TCONs, so we select the backend with same ID. 441 + * 442 + * While dynamic selection might be interesting, since 443 + * the CRTC is tied to the TCON, while the layers are 444 + * tied to the backends, this means, we will need to 445 + * switch between groups of layers. There might not be 446 + * a way to represent this constraint in DRM. 447 + */ 448 + regmap_update_bits(backend->engine.regs, 449 + SUN4I_BACKEND_MODCTL_REG, 450 + SUN4I_BACKEND_MODCTL_OUT_SEL, 451 + (backend->engine.id 452 + ? SUN4I_BACKEND_MODCTL_OUT_LCD1 453 + : SUN4I_BACKEND_MODCTL_OUT_LCD0)); 454 + } 455 + 449 456 return 0; 450 457 451 458 err_disable_ram_clk: ··· 514 479 return 0; 515 480 } 516 481 482 + static const struct sun4i_backend_quirks sun4i_backend_quirks = { 483 + .needs_output_muxing = true, 484 + }; 485 + 486 + static const struct sun4i_backend_quirks sun5i_backend_quirks = { 487 + }; 488 + 489 + static const struct sun4i_backend_quirks sun6i_backend_quirks = { 490 + }; 491 + 492 + static const struct sun4i_backend_quirks sun7i_backend_quirks = { 493 + .needs_output_muxing = true, 494 + }; 495 + 496 + static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = { 497 + }; 498 + 517 499 static const struct of_device_id sun4i_backend_of_table[] = { 518 - { .compatible = "allwinner,sun5i-a13-display-backend" }, 519 - { .compatible = "allwinner,sun6i-a31-display-backend" }, 520 - { .compatible = "allwinner,sun8i-a33-display-backend" }, 500 + { 501 + .compatible = "allwinner,sun4i-a10-display-backend", 502 + .data = &sun4i_backend_quirks, 503 + }, 504 + { 505 + .compatible = "allwinner,sun5i-a13-display-backend", 506 + .data = &sun5i_backend_quirks, 507 + }, 508 + { 509 + .compatible = "allwinner,sun6i-a31-display-backend", 510 + .data = &sun6i_backend_quirks, 511 + }, 512 + { 513 + .compatible = "allwinner,sun7i-a20-display-backend", 514 + .data = &sun7i_backend_quirks, 515 + }, 516 + { 517 + .compatible = "allwinner,sun8i-a33-display-backend", 518 + .data = &sun8i_a33_backend_quirks, 519 + }, 521 520 { } 522 521 }; 523 522 MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
+2 -1
drivers/gpu/drm/sun4i/sun4i_backend.h
··· 25 25 #define SUN4I_BACKEND_MODCTL_LINE_SEL BIT(29) 26 26 #define SUN4I_BACKEND_MODCTL_ITLMOD_EN BIT(28) 27 27 #define SUN4I_BACKEND_MODCTL_OUT_SEL GENMASK(22, 20) 28 - #define SUN4I_BACKEND_MODCTL_OUT_LCD (0 << 20) 28 + #define SUN4I_BACKEND_MODCTL_OUT_LCD0 (0 << 20) 29 + #define SUN4I_BACKEND_MODCTL_OUT_LCD1 (1 << 20) 29 30 #define SUN4I_BACKEND_MODCTL_OUT_FE0 (6 << 20) 30 31 #define SUN4I_BACKEND_MODCTL_OUT_FE1 (7 << 20) 31 32 #define SUN4I_BACKEND_MODCTL_HWC_EN BIT(16)
+30 -2
drivers/gpu/drm/sun4i/sun4i_crtc.c
··· 30 30 #include "sunxi_engine.h" 31 31 #include "sun4i_tcon.h" 32 32 33 + /* 34 + * While this isn't really working in the DRM theory, in practice we 35 + * can only ever have one encoder per TCON since we have a mux in our 36 + * TCON. 37 + */ 38 + static struct drm_encoder *sun4i_crtc_get_encoder(struct drm_crtc *crtc) 39 + { 40 + struct drm_encoder *encoder; 41 + 42 + drm_for_each_encoder(encoder, crtc->dev) 43 + if (encoder->crtc == crtc) 44 + return encoder; 45 + 46 + return NULL; 47 + } 48 + 33 49 static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc, 34 50 struct drm_crtc_state *old_state) 35 51 { ··· 88 72 static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc, 89 73 struct drm_crtc_state *old_state) 90 74 { 75 + struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc); 91 76 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); 92 77 93 78 DRM_DEBUG_DRIVER("Disabling the CRTC\n"); 94 79 95 - sun4i_tcon_disable(scrtc->tcon); 80 + sun4i_tcon_set_status(scrtc->tcon, encoder, false); 96 81 97 82 if (crtc->state->event && !crtc->state->active) { 98 83 spin_lock_irq(&crtc->dev->event_lock); ··· 107 90 static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc, 108 91 struct drm_crtc_state *old_state) 109 92 { 93 + struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc); 110 94 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); 111 95 112 96 DRM_DEBUG_DRIVER("Enabling the CRTC\n"); 113 97 114 - sun4i_tcon_enable(scrtc->tcon); 98 + sun4i_tcon_set_status(scrtc->tcon, encoder, true); 99 + } 100 + 101 + static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc) 102 + { 103 + struct drm_display_mode *mode = &crtc->state->adjusted_mode; 104 + struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc); 105 + struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); 106 + 107 + sun4i_tcon_mode_set(scrtc->tcon, encoder, mode); 115 108 } 116 109 117 110 static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = { ··· 129 102 .atomic_flush = sun4i_crtc_atomic_flush, 130 103 .atomic_enable = sun4i_crtc_atomic_enable, 131 104 .atomic_disable = sun4i_crtc_atomic_disable, 105 + .mode_set_nofb = sun4i_crtc_mode_set_nofb, 132 106 }; 133 107 134 108 static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc)
+21 -60
drivers/gpu/drm/sun4i/sun4i_drv.c
··· 11 11 */ 12 12 13 13 #include <linux/component.h> 14 + #include <linux/kfifo.h> 14 15 #include <linux/of_graph.h> 15 16 #include <linux/of_reserved_mem.h> 16 17 ··· 178 177 179 178 static bool sun4i_drv_node_is_frontend(struct device_node *node) 180 179 { 181 - return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") || 180 + return of_device_is_compatible(node, "allwinner,sun4i-a10-display-frontend") || 181 + of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") || 182 182 of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") || 183 + of_device_is_compatible(node, "allwinner,sun7i-a20-display-frontend") || 183 184 of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend"); 184 185 } 185 186 186 187 static bool sun4i_drv_node_is_tcon(struct device_node *node) 187 188 { 188 - return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") || 189 + return of_device_is_compatible(node, "allwinner,sun4i-a10-tcon") || 190 + of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") || 189 191 of_device_is_compatible(node, "allwinner,sun6i-a31-tcon") || 190 192 of_device_is_compatible(node, "allwinner,sun6i-a31s-tcon") || 193 + of_device_is_compatible(node, "allwinner,sun7i-a20-tcon") || 191 194 of_device_is_compatible(node, "allwinner,sun8i-a33-tcon") || 192 195 of_device_is_compatible(node, "allwinner,sun8i-v3s-tcon"); 193 196 } ··· 227 222 * matching system handles this for us. 228 223 */ 229 224 struct endpoint_list { 230 - struct device_node *node; 231 - struct list_head list; 225 + DECLARE_KFIFO(fifo, struct device_node *, 16); 232 226 }; 233 227 234 - static bool node_is_in_list(struct list_head *endpoints, 235 - struct device_node *node) 236 - { 237 - struct endpoint_list *endpoint; 238 - 239 - list_for_each_entry(endpoint, endpoints, list) 240 - if (endpoint->node == node) 241 - return true; 242 - 243 - return false; 244 - } 245 - 246 228 static int sun4i_drv_add_endpoints(struct device *dev, 247 - struct list_head *endpoints, 229 + struct endpoint_list *list, 248 230 struct component_match **match, 249 231 struct device_node *node) 250 232 { 251 233 struct device_node *port, *ep, *remote; 252 - struct endpoint_list *endpoint; 253 234 int count = 0; 254 235 255 236 /* ··· 295 304 } 296 305 } 297 306 298 - /* skip downstream node if it is already in the queue */ 299 - if (node_is_in_list(endpoints, remote)) 300 - continue; 301 - 302 - /* Add downstream nodes to the queue */ 303 - endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL); 304 - if (!endpoint) { 305 - of_node_put(remote); 306 - return -ENOMEM; 307 - } 308 - 309 - endpoint->node = remote; 310 - list_add_tail(&endpoint->list, endpoints); 307 + kfifo_put(&list->fifo, remote); 311 308 } 312 309 313 310 return count; ··· 304 325 static int sun4i_drv_probe(struct platform_device *pdev) 305 326 { 306 327 struct component_match *match = NULL; 307 - struct device_node *np = pdev->dev.of_node; 308 - struct endpoint_list *endpoint, *endpoint_temp; 328 + struct device_node *np = pdev->dev.of_node, *endpoint; 329 + struct endpoint_list list; 309 330 int i, ret, count = 0; 310 - LIST_HEAD(endpoints); 331 + 332 + INIT_KFIFO(list.fifo); 311 333 312 334 for (i = 0;; i++) { 313 335 struct device_node *pipeline = of_parse_phandle(np, ··· 317 337 if (!pipeline) 318 338 break; 319 339 320 - endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL); 321 - if (!endpoint) { 322 - ret = -ENOMEM; 323 - goto err_free_endpoints; 324 - } 325 - 326 - endpoint->node = pipeline; 327 - list_add_tail(&endpoint->list, &endpoints); 340 + kfifo_put(&list.fifo, pipeline); 328 341 } 329 342 330 - list_for_each_entry_safe(endpoint, endpoint_temp, &endpoints, list) { 343 + while (kfifo_get(&list.fifo, &endpoint)) { 331 344 /* process this endpoint */ 332 - ret = sun4i_drv_add_endpoints(&pdev->dev, &endpoints, &match, 333 - endpoint->node); 345 + ret = sun4i_drv_add_endpoints(&pdev->dev, &list, &match, 346 + endpoint); 334 347 335 348 /* sun4i_drv_add_endpoints can fail to allocate memory */ 336 349 if (ret < 0) 337 - goto err_free_endpoints; 350 + return ret; 338 351 339 352 count += ret; 340 - 341 - /* delete and cleanup the current entry */ 342 - list_del(&endpoint->list); 343 - of_node_put(endpoint->node); 344 - kfree(endpoint); 345 353 } 346 354 347 355 if (count) ··· 338 370 match); 339 371 else 340 372 return 0; 341 - 342 - err_free_endpoints: 343 - list_for_each_entry_safe(endpoint, endpoint_temp, &endpoints, list) { 344 - list_del(&endpoint->list); 345 - of_node_put(endpoint->node); 346 - kfree(endpoint); 347 - } 348 - 349 - return ret; 350 373 } 351 374 352 375 static int sun4i_drv_remove(struct platform_device *pdev) ··· 346 387 } 347 388 348 389 static const struct of_device_id sun4i_drv_of_table[] = { 390 + { .compatible = "allwinner,sun4i-a10-display-engine" }, 349 391 { .compatible = "allwinner,sun5i-a10s-display-engine" }, 350 392 { .compatible = "allwinner,sun5i-a13-display-engine" }, 351 393 { .compatible = "allwinner,sun6i-a31-display-engine" }, 352 394 { .compatible = "allwinner,sun6i-a31s-display-engine" }, 395 + { .compatible = "allwinner,sun7i-a20-display-engine" }, 353 396 { .compatible = "allwinner,sun8i-a33-display-engine" }, 354 397 { .compatible = "allwinner,sun8i-v3s-display-engine" }, 355 398 { }
-1
drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
··· 13 13 #include <linux/clk-provider.h> 14 14 #include <linux/regmap.h> 15 15 16 - #include "sun4i_tcon.h" 17 16 #include "sun4i_hdmi.h" 18 17 19 18 struct sun4i_ddc {
+53 -15
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
··· 30 30 #include "sun4i_crtc.h" 31 31 #include "sun4i_drv.h" 32 32 #include "sun4i_hdmi.h" 33 - #include "sun4i_tcon.h" 34 33 35 34 static inline struct sun4i_hdmi * 36 35 drm_encoder_to_sun4i_hdmi(struct drm_encoder *encoder) ··· 85 86 static void sun4i_hdmi_disable(struct drm_encoder *encoder) 86 87 { 87 88 struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder); 88 - struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); 89 - struct sun4i_tcon *tcon = crtc->tcon; 90 89 u32 val; 91 90 92 91 DRM_DEBUG_DRIVER("Disabling the HDMI Output\n"); ··· 92 95 val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG); 93 96 val &= ~SUN4I_HDMI_VID_CTRL_ENABLE; 94 97 writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); 95 - 96 - sun4i_tcon_channel_disable(tcon, 1); 97 98 } 98 99 99 100 static void sun4i_hdmi_enable(struct drm_encoder *encoder) 100 101 { 101 102 struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; 102 103 struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder); 103 - struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); 104 - struct sun4i_tcon *tcon = crtc->tcon; 105 104 u32 val = 0; 106 105 107 106 DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); 108 - 109 - sun4i_tcon_channel_enable(tcon, 1); 110 107 111 108 sun4i_hdmi_setup_avi_infoframes(hdmi, mode); 112 109 val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); ··· 119 128 struct drm_display_mode *adjusted_mode) 120 129 { 121 130 struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder); 122 - struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); 123 - struct sun4i_tcon *tcon = crtc->tcon; 124 131 unsigned int x, y; 125 132 u32 val; 126 133 127 - sun4i_tcon1_mode_set(tcon, mode); 128 - sun4i_tcon_set_mux(tcon, 1, encoder); 129 - 130 - clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000); 131 134 clk_set_rate(hdmi->mod_clk, mode->crtc_clock * 1000); 132 135 clk_set_rate(hdmi->tmds_clk, mode->crtc_clock * 1000); 133 136 ··· 273 288 274 289 #define SUN4I_HDMI_PAD_CTRL1_MASK (GENMASK(24, 7) | GENMASK(5, 0)) 275 290 #define SUN4I_HDMI_PLL_CTRL_MASK (GENMASK(31, 8) | GENMASK(3, 0)) 291 + 292 + /* Only difference from sun5i is AMP is 4 instead of 6 */ 293 + static const struct sun4i_hdmi_variant sun4i_variant = { 294 + .pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN | 295 + SUN4I_HDMI_PAD_CTRL0_CKEN | 296 + SUN4I_HDMI_PAD_CTRL0_PWENG | 297 + SUN4I_HDMI_PAD_CTRL0_PWEND | 298 + SUN4I_HDMI_PAD_CTRL0_PWENC | 299 + SUN4I_HDMI_PAD_CTRL0_LDODEN | 300 + SUN4I_HDMI_PAD_CTRL0_LDOCEN | 301 + SUN4I_HDMI_PAD_CTRL0_BIASEN, 302 + .pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(4) | 303 + SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) | 304 + SUN4I_HDMI_PAD_CTRL1_REG_DENCK | 305 + SUN4I_HDMI_PAD_CTRL1_REG_DEN | 306 + SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT | 307 + SUN4I_HDMI_PAD_CTRL1_EMP_OPT | 308 + SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT | 309 + SUN4I_HDMI_PAD_CTRL1_AMP_OPT, 310 + .pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) | 311 + SUN4I_HDMI_PLL_CTRL_CS(7) | 312 + SUN4I_HDMI_PLL_CTRL_CP_S(15) | 313 + SUN4I_HDMI_PLL_CTRL_S(7) | 314 + SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) | 315 + SUN4I_HDMI_PLL_CTRL_SDIV2 | 316 + SUN4I_HDMI_PLL_CTRL_LDO2_EN | 317 + SUN4I_HDMI_PLL_CTRL_LDO1_EN | 318 + SUN4I_HDMI_PLL_CTRL_HV_IS_33 | 319 + SUN4I_HDMI_PLL_CTRL_BWS | 320 + SUN4I_HDMI_PLL_CTRL_PLL_EN, 321 + 322 + .ddc_clk_reg = REG_FIELD(SUN4I_HDMI_DDC_CLK_REG, 0, 6), 323 + .ddc_clk_pre_divider = 2, 324 + .ddc_clk_m_offset = 1, 325 + 326 + .field_ddc_en = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 31, 31), 327 + .field_ddc_start = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 30, 30), 328 + .field_ddc_reset = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 0, 0), 329 + .field_ddc_addr_reg = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 31), 330 + .field_ddc_slave_addr = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 6), 331 + .field_ddc_int_status = REG_FIELD(SUN4I_HDMI_DDC_INT_STATUS_REG, 0, 8), 332 + .field_ddc_fifo_clear = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 31, 31), 333 + .field_ddc_fifo_rx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 4, 7), 334 + .field_ddc_fifo_tx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 0, 3), 335 + .field_ddc_byte_count = REG_FIELD(SUN4I_HDMI_DDC_BYTE_COUNT_REG, 0, 9), 336 + .field_ddc_cmd = REG_FIELD(SUN4I_HDMI_DDC_CMD_REG, 0, 2), 337 + .field_ddc_sda_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 9, 9), 338 + .field_ddc_sck_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 8, 8), 339 + 340 + .ddc_fifo_reg = SUN4I_HDMI_DDC_FIFO_DATA_REG, 341 + .ddc_fifo_has_dir = true, 342 + }; 276 343 277 344 static const struct sun4i_hdmi_variant sun5i_variant = { 278 345 .pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN | ··· 650 613 } 651 614 652 615 static const struct of_device_id sun4i_hdmi_of_table[] = { 616 + { .compatible = "allwinner,sun4i-a10-hdmi", .data = &sun4i_variant, }, 653 617 { .compatible = "allwinner,sun5i-a10s-hdmi", .data = &sun5i_variant, }, 654 618 { .compatible = "allwinner,sun6i-a31-hdmi", .data = &sun6i_variant, }, 655 619 { }
-1
drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
··· 12 12 13 13 #include <linux/clk-provider.h> 14 14 15 - #include "sun4i_tcon.h" 16 15 #include "sun4i_hdmi.h" 17 16 18 17 struct sun4i_tmds {
+4 -25
drivers/gpu/drm/sun4i/sun4i_rgb.c
··· 134 134 135 135 DRM_DEBUG_DRIVER("Enabling RGB output\n"); 136 136 137 - if (!IS_ERR(tcon->panel)) 137 + if (!IS_ERR(tcon->panel)) { 138 138 drm_panel_prepare(tcon->panel); 139 - 140 - sun4i_tcon_channel_enable(tcon, 0); 141 - 142 - if (!IS_ERR(tcon->panel)) 143 139 drm_panel_enable(tcon->panel); 140 + } 144 141 } 145 142 146 143 static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) ··· 147 150 148 151 DRM_DEBUG_DRIVER("Disabling RGB output\n"); 149 152 150 - if (!IS_ERR(tcon->panel)) 153 + if (!IS_ERR(tcon->panel)) { 151 154 drm_panel_disable(tcon->panel); 152 - 153 - sun4i_tcon_channel_disable(tcon, 0); 154 - 155 - if (!IS_ERR(tcon->panel)) 156 155 drm_panel_unprepare(tcon->panel); 157 - } 158 - 159 - static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder, 160 - struct drm_display_mode *mode, 161 - struct drm_display_mode *adjusted_mode) 162 - { 163 - struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); 164 - struct sun4i_tcon *tcon = rgb->tcon; 165 - 166 - sun4i_tcon0_mode_set(tcon, mode); 167 - sun4i_tcon_set_mux(tcon, 0, encoder); 168 - 169 - /* FIXME: This seems to be board specific */ 170 - clk_set_phase(tcon->dclk, 120); 156 + } 171 157 } 172 158 173 159 static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = { 174 - .mode_set = sun4i_rgb_encoder_mode_set, 175 160 .disable = sun4i_rgb_encoder_disable, 176 161 .enable = sun4i_rgb_encoder_enable, 177 162 };
+125 -70
drivers/gpu/drm/sun4i/sun4i_tcon.c
··· 35 35 #include "sun4i_tcon.h" 36 36 #include "sunxi_engine.h" 37 37 38 - void sun4i_tcon_disable(struct sun4i_tcon *tcon) 38 + static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel, 39 + bool enabled) 39 40 { 40 - DRM_DEBUG_DRIVER("Disabling TCON\n"); 41 + struct clk *clk; 41 42 42 - /* Disable the TCON */ 43 - regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, 44 - SUN4I_TCON_GCTL_TCON_ENABLE, 0); 45 - } 46 - EXPORT_SYMBOL(sun4i_tcon_disable); 47 - 48 - void sun4i_tcon_enable(struct sun4i_tcon *tcon) 49 - { 50 - DRM_DEBUG_DRIVER("Enabling TCON\n"); 51 - 52 - /* Enable the TCON */ 53 - regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, 54 - SUN4I_TCON_GCTL_TCON_ENABLE, 55 - SUN4I_TCON_GCTL_TCON_ENABLE); 56 - } 57 - EXPORT_SYMBOL(sun4i_tcon_enable); 58 - 59 - void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel) 60 - { 61 - DRM_DEBUG_DRIVER("Disabling TCON channel %d\n", channel); 62 - 63 - /* Disable the TCON's channel */ 64 - if (channel == 0) { 65 - regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, 66 - SUN4I_TCON0_CTL_TCON_ENABLE, 0); 67 - clk_disable_unprepare(tcon->dclk); 68 - return; 69 - } 70 - 71 - WARN_ON(!tcon->quirks->has_channel_1); 72 - regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG, 73 - SUN4I_TCON1_CTL_TCON_ENABLE, 0); 74 - clk_disable_unprepare(tcon->sclk1); 75 - } 76 - EXPORT_SYMBOL(sun4i_tcon_channel_disable); 77 - 78 - void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel) 79 - { 80 - DRM_DEBUG_DRIVER("Enabling TCON channel %d\n", channel); 81 - 82 - /* Enable the TCON's channel */ 83 - if (channel == 0) { 43 + switch (channel) { 44 + case 0: 84 45 regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, 85 46 SUN4I_TCON0_CTL_TCON_ENABLE, 86 - SUN4I_TCON0_CTL_TCON_ENABLE); 87 - clk_prepare_enable(tcon->dclk); 47 + enabled ? SUN4I_TCON0_CTL_TCON_ENABLE : 0); 48 + clk = tcon->dclk; 49 + break; 50 + case 1: 51 + WARN_ON(!tcon->quirks->has_channel_1); 52 + regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG, 53 + SUN4I_TCON1_CTL_TCON_ENABLE, 54 + enabled ? SUN4I_TCON1_CTL_TCON_ENABLE : 0); 55 + clk = tcon->sclk1; 56 + break; 57 + default: 58 + DRM_WARN("Unknown channel... doing nothing\n"); 88 59 return; 89 60 } 90 61 91 - WARN_ON(!tcon->quirks->has_channel_1); 92 - regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG, 93 - SUN4I_TCON1_CTL_TCON_ENABLE, 94 - SUN4I_TCON1_CTL_TCON_ENABLE); 95 - clk_prepare_enable(tcon->sclk1); 62 + if (enabled) 63 + clk_prepare_enable(clk); 64 + else 65 + clk_disable_unprepare(clk); 96 66 } 97 - EXPORT_SYMBOL(sun4i_tcon_channel_enable); 67 + 68 + void sun4i_tcon_set_status(struct sun4i_tcon *tcon, 69 + const struct drm_encoder *encoder, 70 + bool enabled) 71 + { 72 + int channel; 73 + 74 + switch (encoder->encoder_type) { 75 + case DRM_MODE_ENCODER_NONE: 76 + channel = 0; 77 + break; 78 + case DRM_MODE_ENCODER_TMDS: 79 + case DRM_MODE_ENCODER_TVDAC: 80 + channel = 1; 81 + break; 82 + default: 83 + DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n"); 84 + return; 85 + } 86 + 87 + regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, 88 + SUN4I_TCON_GCTL_TCON_ENABLE, 89 + enabled ? SUN4I_TCON_GCTL_TCON_ENABLE : 0); 90 + 91 + sun4i_tcon_channel_set_status(tcon, channel, enabled); 92 + } 98 93 99 94 void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable) 100 95 { ··· 129 134 } 130 135 131 136 void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel, 132 - struct drm_encoder *encoder) 137 + const struct drm_encoder *encoder) 133 138 { 134 139 int ret = -ENOTSUPP; 135 140 ··· 139 144 DRM_DEBUG_DRIVER("Muxing encoder %s to CRTC %s: %d\n", 140 145 encoder->name, encoder->crtc->name, ret); 141 146 } 142 - EXPORT_SYMBOL(sun4i_tcon_set_mux); 143 147 144 - static int sun4i_tcon_get_clk_delay(struct drm_display_mode *mode, 148 + static int sun4i_tcon_get_clk_delay(const struct drm_display_mode *mode, 145 149 int channel) 146 150 { 147 151 int delay = mode->vtotal - mode->vdisplay; ··· 158 164 return delay; 159 165 } 160 166 161 - void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon, 162 - struct drm_display_mode *mode) 167 + static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon, 168 + const struct drm_display_mode *mode) 169 + { 170 + /* Configure the dot clock */ 171 + clk_set_rate(tcon->dclk, mode->crtc_clock * 1000); 172 + 173 + /* Set the resolution */ 174 + regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG, 175 + SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) | 176 + SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay)); 177 + } 178 + 179 + static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, 180 + const struct drm_display_mode *mode) 163 181 { 164 182 unsigned int bp, hsync, vsync; 165 183 u8 clk_delay; 166 184 u32 val = 0; 167 185 168 - /* Configure the dot clock */ 169 - clk_set_rate(tcon->dclk, mode->crtc_clock * 1000); 186 + sun4i_tcon0_mode_set_common(tcon, mode); 170 187 171 188 /* Adjust clock delay */ 172 189 clk_delay = sun4i_tcon_get_clk_delay(mode, 0); 173 190 regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, 174 191 SUN4I_TCON0_CTL_CLK_DELAY_MASK, 175 192 SUN4I_TCON0_CTL_CLK_DELAY(clk_delay)); 176 - 177 - /* Set the resolution */ 178 - regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG, 179 - SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) | 180 - SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay)); 181 193 182 194 /* 183 195 * This is called a backporch in the register documentation, ··· 238 238 /* Enable the output on the pins */ 239 239 regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0); 240 240 } 241 - EXPORT_SYMBOL(sun4i_tcon0_mode_set); 242 241 243 - void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon, 244 - struct drm_display_mode *mode) 242 + static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon, 243 + const struct drm_display_mode *mode) 245 244 { 246 245 unsigned int bp, hsync, vsync, vtotal; 247 246 u8 clk_delay; ··· 328 329 SUN4I_TCON_GCTL_IOMAP_MASK, 329 330 SUN4I_TCON_GCTL_IOMAP_TCON1); 330 331 } 331 - EXPORT_SYMBOL(sun4i_tcon1_mode_set); 332 + 333 + void sun4i_tcon_mode_set(struct sun4i_tcon *tcon, 334 + const struct drm_encoder *encoder, 335 + const struct drm_display_mode *mode) 336 + { 337 + switch (encoder->encoder_type) { 338 + case DRM_MODE_ENCODER_NONE: 339 + sun4i_tcon0_mode_set_rgb(tcon, mode); 340 + sun4i_tcon_set_mux(tcon, 0, encoder); 341 + break; 342 + case DRM_MODE_ENCODER_TVDAC: 343 + case DRM_MODE_ENCODER_TMDS: 344 + sun4i_tcon1_mode_set(tcon, mode); 345 + sun4i_tcon_set_mux(tcon, 1, encoder); 346 + break; 347 + default: 348 + DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n"); 349 + } 350 + } 351 + EXPORT_SYMBOL(sun4i_tcon_mode_set); 332 352 333 353 static void sun4i_tcon_finish_page_flip(struct drm_device *dev, 334 354 struct sun4i_crtc *scrtc) ··· 800 782 } 801 783 802 784 /* platform specific TCON muxing callbacks */ 785 + static int sun4i_a10_tcon_set_mux(struct sun4i_tcon *tcon, 786 + const struct drm_encoder *encoder) 787 + { 788 + struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev); 789 + u32 shift; 790 + 791 + if (!tcon0) 792 + return -EINVAL; 793 + 794 + switch (encoder->encoder_type) { 795 + case DRM_MODE_ENCODER_TMDS: 796 + /* HDMI */ 797 + shift = 8; 798 + break; 799 + default: 800 + return -EINVAL; 801 + } 802 + 803 + regmap_update_bits(tcon0->regs, SUN4I_TCON_MUX_CTRL_REG, 804 + 0x3 << shift, tcon->id << shift); 805 + 806 + return 0; 807 + } 808 + 803 809 static int sun5i_a13_tcon_set_mux(struct sun4i_tcon *tcon, 804 - struct drm_encoder *encoder) 810 + const struct drm_encoder *encoder) 805 811 { 806 812 u32 val; 807 813 ··· 841 799 } 842 800 843 801 static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon, 844 - struct drm_encoder *encoder) 802 + const struct drm_encoder *encoder) 845 803 { 846 804 struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev); 847 805 u32 shift; ··· 865 823 return 0; 866 824 } 867 825 826 + static const struct sun4i_tcon_quirks sun4i_a10_quirks = { 827 + .has_channel_1 = true, 828 + .set_mux = sun4i_a10_tcon_set_mux, 829 + }; 830 + 868 831 static const struct sun4i_tcon_quirks sun5i_a13_quirks = { 869 832 .has_channel_1 = true, 870 833 .set_mux = sun5i_a13_tcon_set_mux, ··· 886 839 .needs_de_be_mux = true, 887 840 }; 888 841 842 + static const struct sun4i_tcon_quirks sun7i_a20_quirks = { 843 + .has_channel_1 = true, 844 + /* Same display pipeline structure as A10 */ 845 + .set_mux = sun4i_a10_tcon_set_mux, 846 + }; 847 + 889 848 static const struct sun4i_tcon_quirks sun8i_a33_quirks = { 890 849 /* nothing is supported */ 891 850 }; ··· 901 848 }; 902 849 903 850 static const struct of_device_id sun4i_tcon_of_table[] = { 851 + { .compatible = "allwinner,sun4i-a10-tcon", .data = &sun4i_a10_quirks }, 904 852 { .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks }, 905 853 { .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks }, 906 854 { .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks }, 855 + { .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks }, 907 856 { .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks }, 908 857 { .compatible = "allwinner,sun8i-v3s-tcon", .data = &sun8i_v3s_quirks }, 909 858 { }
+6 -17
drivers/gpu/drm/sun4i/sun4i_tcon.h
··· 152 152 bool needs_de_be_mux; /* sun6i needs mux to select backend */ 153 153 154 154 /* callback to handle tcon muxing options */ 155 - int (*set_mux)(struct sun4i_tcon *, struct drm_encoder *); 155 + int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); 156 156 }; 157 157 158 158 struct sun4i_tcon { ··· 190 190 struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node); 191 191 struct drm_panel *sun4i_tcon_find_panel(struct device_node *node); 192 192 193 - /* Global Control */ 194 - void sun4i_tcon_disable(struct sun4i_tcon *tcon); 195 - void sun4i_tcon_enable(struct sun4i_tcon *tcon); 196 - 197 - /* Channel Control */ 198 - void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel); 199 - void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel); 200 - 201 193 void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable); 202 - 203 - /* Mode Related Controls */ 204 - void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel, 205 - struct drm_encoder *encoder); 206 - void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon, 207 - struct drm_display_mode *mode); 208 - void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon, 209 - struct drm_display_mode *mode); 194 + void sun4i_tcon_mode_set(struct sun4i_tcon *tcon, 195 + const struct drm_encoder *encoder, 196 + const struct drm_display_mode *mode); 197 + void sun4i_tcon_set_status(struct sun4i_tcon *crtc, 198 + const struct drm_encoder *encoder, bool enable); 210 199 211 200 #endif /* __SUN4I_TCON_H__ */
-12
drivers/gpu/drm/sun4i/sun4i_tv.c
··· 24 24 25 25 #include "sun4i_crtc.h" 26 26 #include "sun4i_drv.h" 27 - #include "sun4i_tcon.h" 28 27 #include "sunxi_engine.h" 29 28 30 29 #define SUN4I_TVE_EN_REG 0x000 ··· 344 345 { 345 346 struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder); 346 347 struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); 347 - struct sun4i_tcon *tcon = crtc->tcon; 348 348 349 349 DRM_DEBUG_DRIVER("Disabling the TV Output\n"); 350 - 351 - sun4i_tcon_channel_disable(tcon, 1); 352 350 353 351 regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG, 354 352 SUN4I_TVE_EN_ENABLE, ··· 358 362 { 359 363 struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder); 360 364 struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); 361 - struct sun4i_tcon *tcon = crtc->tcon; 362 365 363 366 DRM_DEBUG_DRIVER("Enabling the TV Output\n"); 364 367 ··· 366 371 regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG, 367 372 SUN4I_TVE_EN_ENABLE, 368 373 SUN4I_TVE_EN_ENABLE); 369 - 370 - sun4i_tcon_channel_enable(tcon, 1); 371 374 } 372 375 373 376 static void sun4i_tv_mode_set(struct drm_encoder *encoder, ··· 373 380 struct drm_display_mode *adjusted_mode) 374 381 { 375 382 struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder); 376 - struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); 377 - struct sun4i_tcon *tcon = crtc->tcon; 378 383 const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode); 379 - 380 - sun4i_tcon1_mode_set(tcon, mode); 381 - sun4i_tcon_set_mux(tcon, 1, encoder); 382 384 383 385 /* Enable and map the DAC to the output */ 384 386 regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
+103 -49
drivers/gpu/drm/udl/udl_connector.c
··· 14 14 #include <drm/drm_crtc.h> 15 15 #include <drm/drm_edid.h> 16 16 #include <drm/drm_crtc_helper.h> 17 + #include "udl_connector.h" 17 18 #include "udl_drv.h" 18 19 19 - /* dummy connector to just get EDID, 20 - all UDL appear to have a DVI-D */ 21 - 22 - static u8 *udl_get_edid(struct udl_device *udl) 20 + static bool udl_get_edid_block(struct udl_device *udl, int block_idx, 21 + u8 *buff) 23 22 { 24 - u8 *block; 25 - char *rbuf; 26 23 int ret, i; 24 + u8 *read_buff; 27 25 28 - block = kmalloc(EDID_LENGTH, GFP_KERNEL); 29 - if (block == NULL) 30 - return NULL; 31 - 32 - rbuf = kmalloc(2, GFP_KERNEL); 33 - if (rbuf == NULL) 34 - goto error; 26 + read_buff = kmalloc(2, GFP_KERNEL); 27 + if (!read_buff) 28 + return false; 35 29 36 30 for (i = 0; i < EDID_LENGTH; i++) { 31 + int bval = (i + block_idx * EDID_LENGTH) << 8; 37 32 ret = usb_control_msg(udl->udev, 38 - usb_rcvctrlpipe(udl->udev, 0), (0x02), 39 - (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2, 40 - HZ); 33 + usb_rcvctrlpipe(udl->udev, 0), 34 + (0x02), (0x80 | (0x02 << 5)), bval, 35 + 0xA1, read_buff, 2, HZ); 41 36 if (ret < 1) { 42 37 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); 43 - goto error; 38 + kfree(read_buff); 39 + return false; 44 40 } 45 - block[i] = rbuf[1]; 41 + buff[i] = read_buff[1]; 46 42 } 47 43 48 - kfree(rbuf); 49 - return block; 44 + kfree(read_buff); 45 + return true; 46 + } 50 47 51 - error: 52 - kfree(block); 53 - kfree(rbuf); 54 - return NULL; 48 + static bool udl_get_edid(struct udl_device *udl, u8 **result_buff, 49 + int *result_buff_size) 50 + { 51 + int i, extensions; 52 + u8 *block_buff = NULL, *buff_ptr; 53 + 54 + block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL); 55 + if (block_buff == NULL) 56 + return false; 57 + 58 + if (udl_get_edid_block(udl, 0, block_buff) && 59 + memchr_inv(block_buff, 0, EDID_LENGTH)) { 60 + extensions = ((struct edid *)block_buff)->extensions; 61 + if (extensions > 0) { 62 + /* we have to read all extensions one by one */ 63 + *result_buff_size = EDID_LENGTH * (extensions + 1); 64 + *result_buff = kmalloc(*result_buff_size, GFP_KERNEL); 65 + buff_ptr = *result_buff; 66 + if (buff_ptr == NULL) { 67 + kfree(block_buff); 68 + return false; 69 + } 70 + memcpy(buff_ptr, block_buff, EDID_LENGTH); 71 + kfree(block_buff); 72 + buff_ptr += EDID_LENGTH; 73 + for (i = 1; i < extensions; ++i) { 74 + if (udl_get_edid_block(udl, i, buff_ptr)) { 75 + buff_ptr += EDID_LENGTH; 76 + } else { 77 + kfree(*result_buff); 78 + *result_buff = NULL; 79 + return false; 80 + } 81 + } 82 + return true; 83 + } 84 + /* we have only base edid block */ 85 + *result_buff = block_buff; 86 + *result_buff_size = EDID_LENGTH; 87 + return true; 88 + } 89 + 90 + kfree(block_buff); 91 + 92 + return false; 55 93 } 56 94 57 95 static int udl_get_modes(struct drm_connector *connector) 58 96 { 59 - struct udl_device *udl = connector->dev->dev_private; 60 - struct edid *edid; 61 - int ret; 97 + struct udl_drm_connector *udl_connector = 98 + container_of(connector, 99 + struct udl_drm_connector, 100 + connector); 62 101 63 - edid = (struct edid *)udl_get_edid(udl); 64 - if (!edid) { 65 - drm_mode_connector_update_edid_property(connector, NULL); 66 - return 0; 67 - } 68 - 69 - /* 70 - * We only read the main block, but if the monitor reports extension 71 - * blocks then the drm edid code expects them to be present, so patch 72 - * the extension count to 0. 73 - */ 74 - edid->checksum += edid->extensions; 75 - edid->extensions = 0; 76 - 77 - drm_mode_connector_update_edid_property(connector, edid); 78 - ret = drm_add_edid_modes(connector, edid); 79 - kfree(edid); 80 - return ret; 102 + drm_mode_connector_update_edid_property(connector, udl_connector->edid); 103 + if (udl_connector->edid) 104 + return drm_add_edid_modes(connector, udl_connector->edid); 105 + return 0; 81 106 } 82 107 83 108 static int udl_mode_valid(struct drm_connector *connector, ··· 121 96 static enum drm_connector_status 122 97 udl_detect(struct drm_connector *connector, bool force) 123 98 { 124 - if (drm_dev_is_unplugged(connector->dev)) 99 + u8 *edid_buff = NULL; 100 + int edid_buff_size = 0; 101 + struct udl_device *udl = connector->dev->dev_private; 102 + struct udl_drm_connector *udl_connector = 103 + container_of(connector, 104 + struct udl_drm_connector, 105 + connector); 106 + 107 + /* cleanup previous edid */ 108 + if (udl_connector->edid != NULL) { 109 + kfree(udl_connector->edid); 110 + udl_connector->edid = NULL; 111 + } 112 + 113 + 114 + if (!udl_get_edid(udl, &edid_buff, &edid_buff_size)) 125 115 return connector_status_disconnected; 116 + 117 + udl_connector->edid = (struct edid *)edid_buff; 118 + 126 119 return connector_status_connected; 127 120 } 128 121 ··· 160 117 161 118 static void udl_connector_destroy(struct drm_connector *connector) 162 119 { 120 + struct udl_drm_connector *udl_connector = 121 + container_of(connector, 122 + struct udl_drm_connector, 123 + connector); 124 + 163 125 drm_connector_unregister(connector); 164 126 drm_connector_cleanup(connector); 127 + kfree(udl_connector->edid); 165 128 kfree(connector); 166 129 } 167 130 ··· 187 138 188 139 int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder) 189 140 { 141 + struct udl_drm_connector *udl_connector; 190 142 struct drm_connector *connector; 191 143 192 - connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL); 193 - if (!connector) 144 + udl_connector = kzalloc(sizeof(struct udl_drm_connector), GFP_KERNEL); 145 + if (!udl_connector) 194 146 return -ENOMEM; 195 147 196 - drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII); 148 + connector = &udl_connector->connector; 149 + drm_connector_init(dev, connector, &udl_connector_funcs, 150 + DRM_MODE_CONNECTOR_DVII); 197 151 drm_connector_helper_add(connector, &udl_connector_helper_funcs); 198 152 199 153 drm_connector_register(connector); 200 154 drm_mode_connector_attach_encoder(connector, encoder); 155 + connector->polled = DRM_CONNECTOR_POLL_HPD | 156 + DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; 201 157 202 158 return 0; 203 159 }
+13
drivers/gpu/drm/udl/udl_connector.h
··· 1 + #ifndef __UDL_CONNECTOR_H__ 2 + #define __UDL_CONNECTOR_H__ 3 + 4 + #include <drm/drm_crtc.h> 5 + 6 + struct udl_drm_connector { 7 + struct drm_connector connector; 8 + /* last udl_detect edid */ 9 + struct edid *edid; 10 + }; 11 + 12 + 13 + #endif //__UDL_CONNECTOR_H__
+4
drivers/gpu/drm/udl/udl_drv.c
··· 14 14 static int udl_usb_suspend(struct usb_interface *interface, 15 15 pm_message_t message) 16 16 { 17 + struct drm_device *dev = usb_get_intfdata(interface); 18 + 19 + drm_kms_helper_poll_disable(dev); 17 20 return 0; 18 21 } 19 22 ··· 24 21 { 25 22 struct drm_device *dev = usb_get_intfdata(interface); 26 23 24 + drm_kms_helper_poll_enable(dev); 27 25 udl_modeset_restore(dev); 28 26 return 0; 29 27 }
+5
drivers/gpu/drm/udl/udl_main.c
··· 11 11 * more details. 12 12 */ 13 13 #include <drm/drmP.h> 14 + #include <drm/drm_crtc_helper.h> 14 15 #include "udl_drv.h" 15 16 16 17 /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */ ··· 351 350 if (ret) 352 351 goto err_fb; 353 352 353 + drm_kms_helper_poll_init(dev); 354 + 354 355 return 0; 355 356 err_fb: 356 357 udl_fbdev_cleanup(dev); ··· 373 370 void udl_driver_unload(struct drm_device *dev) 374 371 { 375 372 struct udl_device *udl = dev->dev_private; 373 + 374 + drm_kms_helper_poll_fini(dev); 376 375 377 376 if (udl->urbs.count) 378 377 udl_free_urb_list(dev);
+280 -9
drivers/gpu/drm/vc4/vc4_bo.c
··· 53 53 vc4->bo_labels[i].size_allocated / 1024, 54 54 vc4->bo_labels[i].num_allocated); 55 55 } 56 + 57 + mutex_lock(&vc4->purgeable.lock); 58 + if (vc4->purgeable.num) 59 + DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache", 60 + vc4->purgeable.size / 1024, vc4->purgeable.num); 61 + 62 + if (vc4->purgeable.purged_num) 63 + DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO", 64 + vc4->purgeable.purged_size / 1024, 65 + vc4->purgeable.purged_num); 66 + mutex_unlock(&vc4->purgeable.lock); 56 67 } 57 68 58 69 #ifdef CONFIG_DEBUG_FS ··· 85 74 vc4->bo_labels[i].num_allocated); 86 75 } 87 76 mutex_unlock(&vc4->bo_lock); 77 + 78 + mutex_lock(&vc4->purgeable.lock); 79 + if (vc4->purgeable.num) 80 + seq_printf(m, "%30s: %6dkb BOs (%d)\n", "userspace BO cache", 81 + vc4->purgeable.size / 1024, vc4->purgeable.num); 82 + 83 + if (vc4->purgeable.purged_num) 84 + seq_printf(m, "%30s: %6dkb BOs (%d)\n", "total purged BO", 85 + vc4->purgeable.purged_size / 1024, 86 + vc4->purgeable.purged_num); 87 + mutex_unlock(&vc4->purgeable.lock); 88 88 89 89 return 0; 90 90 } ··· 269 247 mutex_unlock(&vc4->bo_lock); 270 248 } 271 249 250 + void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo) 251 + { 252 + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); 253 + 254 + mutex_lock(&vc4->purgeable.lock); 255 + list_add_tail(&bo->size_head, &vc4->purgeable.list); 256 + vc4->purgeable.num++; 257 + vc4->purgeable.size += bo->base.base.size; 258 + mutex_unlock(&vc4->purgeable.lock); 259 + } 260 + 261 + static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo) 262 + { 263 + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); 264 + 265 + /* list_del_init() is used here because the caller might release 266 + * the purgeable lock in order to acquire the madv one and update the 267 + * madv status. 268 + * During this short period of time a user might decide to mark 269 + * the BO as unpurgeable, and if bo->madv is set to 270 + * VC4_MADV_DONTNEED it will try to remove the BO from the 271 + * purgeable list which will fail if the ->next/prev fields 272 + * are set to LIST_POISON1/LIST_POISON2 (which is what 273 + * list_del() does). 274 + * Re-initializing the list element guarantees that list_del() 275 + * will work correctly even if it's a NOP. 276 + */ 277 + list_del_init(&bo->size_head); 278 + vc4->purgeable.num--; 279 + vc4->purgeable.size -= bo->base.base.size; 280 + } 281 + 282 + void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo) 283 + { 284 + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); 285 + 286 + mutex_lock(&vc4->purgeable.lock); 287 + vc4_bo_remove_from_purgeable_pool_locked(bo); 288 + mutex_unlock(&vc4->purgeable.lock); 289 + } 290 + 291 + static void vc4_bo_purge(struct drm_gem_object *obj) 292 + { 293 + struct vc4_bo *bo = to_vc4_bo(obj); 294 + struct drm_device *dev = obj->dev; 295 + 296 + WARN_ON(!mutex_is_locked(&bo->madv_lock)); 297 + WARN_ON(bo->madv != VC4_MADV_DONTNEED); 298 + 299 + drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 300 + 301 + dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr); 302 + bo->base.vaddr = NULL; 303 + bo->madv = __VC4_MADV_PURGED; 304 + } 305 + 306 + static void vc4_bo_userspace_cache_purge(struct drm_device *dev) 307 + { 308 + struct vc4_dev *vc4 = to_vc4_dev(dev); 309 + 310 + mutex_lock(&vc4->purgeable.lock); 311 + while (!list_empty(&vc4->purgeable.list)) { 312 + struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list, 313 + struct vc4_bo, size_head); 314 + struct drm_gem_object *obj = &bo->base.base; 315 + size_t purged_size = 0; 316 + 317 + vc4_bo_remove_from_purgeable_pool_locked(bo); 318 + 319 + /* Release the purgeable lock while we're purging the BO so 320 + * that other people can continue inserting things in the 321 + * purgeable pool without having to wait for all BOs to be 322 + * purged. 323 + */ 324 + mutex_unlock(&vc4->purgeable.lock); 325 + mutex_lock(&bo->madv_lock); 326 + 327 + /* Since we released the purgeable pool lock before acquiring 328 + * the BO madv one, the user may have marked the BO as WILLNEED 329 + * and re-used it in the meantime. 330 + * Before purging the BO we need to make sure 331 + * - it is still marked as DONTNEED 332 + * - it has not been re-inserted in the purgeable list 333 + * - it is not used by HW blocks 334 + * If one of these conditions is not met, just skip the entry. 335 + */ 336 + if (bo->madv == VC4_MADV_DONTNEED && 337 + list_empty(&bo->size_head) && 338 + !refcount_read(&bo->usecnt)) { 339 + purged_size = bo->base.base.size; 340 + vc4_bo_purge(obj); 341 + } 342 + mutex_unlock(&bo->madv_lock); 343 + mutex_lock(&vc4->purgeable.lock); 344 + 345 + if (purged_size) { 346 + vc4->purgeable.purged_size += purged_size; 347 + vc4->purgeable.purged_num++; 348 + } 349 + } 350 + mutex_unlock(&vc4->purgeable.lock); 351 + } 352 + 272 353 static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev, 273 354 uint32_t size, 274 355 enum vc4_kernel_bo_type type) ··· 418 293 if (!bo) 419 294 return ERR_PTR(-ENOMEM); 420 295 296 + bo->madv = VC4_MADV_WILLNEED; 297 + refcount_set(&bo->usecnt, 0); 298 + mutex_init(&bo->madv_lock); 421 299 mutex_lock(&vc4->bo_lock); 422 300 bo->label = VC4_BO_TYPE_KERNEL; 423 301 vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++; ··· 458 330 * CMA allocations we've got laying around and try again. 459 331 */ 460 332 vc4_bo_cache_purge(dev); 461 - 462 333 cma_obj = drm_gem_cma_create(dev, size); 463 - if (IS_ERR(cma_obj)) { 464 - DRM_ERROR("Failed to allocate from CMA:\n"); 465 - vc4_bo_stats_dump(vc4); 466 - return ERR_PTR(-ENOMEM); 467 - } 334 + } 335 + 336 + if (IS_ERR(cma_obj)) { 337 + /* 338 + * Still not enough CMA memory, purge the userspace BO 339 + * cache and retry. 340 + * This is sub-optimal since we purge the whole userspace 341 + * BO cache which forces user that want to re-use the BO to 342 + * restore its initial content. 343 + * Ideally, we should purge entries one by one and retry 344 + * after each to see if CMA allocation succeeds. Or even 345 + * better, try to find an entry with at least the same 346 + * size. 347 + */ 348 + vc4_bo_userspace_cache_purge(dev); 349 + cma_obj = drm_gem_cma_create(dev, size); 350 + } 351 + 352 + if (IS_ERR(cma_obj)) { 353 + DRM_ERROR("Failed to allocate from CMA:\n"); 354 + vc4_bo_stats_dump(vc4); 355 + return ERR_PTR(-ENOMEM); 468 356 } 469 357 bo = to_vc4_bo(&cma_obj->base); 358 + 359 + /* By default, BOs do not support the MADV ioctl. This will be enabled 360 + * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB 361 + * BOs). 362 + */ 363 + bo->madv = __VC4_MADV_NOTSUPP; 470 364 471 365 mutex_lock(&vc4->bo_lock); 472 366 vc4_bo_set_label(&cma_obj->base, type); ··· 514 364 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB); 515 365 if (IS_ERR(bo)) 516 366 return PTR_ERR(bo); 367 + 368 + bo->madv = VC4_MADV_WILLNEED; 517 369 518 370 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 519 371 drm_gem_object_put_unlocked(&bo->base.base); ··· 555 403 struct vc4_bo *bo = to_vc4_bo(gem_bo); 556 404 struct list_head *cache_list; 557 405 406 + /* Remove the BO from the purgeable list. */ 407 + mutex_lock(&bo->madv_lock); 408 + if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt)) 409 + vc4_bo_remove_from_purgeable_pool(bo); 410 + mutex_unlock(&bo->madv_lock); 411 + 558 412 mutex_lock(&vc4->bo_lock); 559 413 /* If the object references someone else's memory, we can't cache it. 560 414 */ ··· 576 418 } 577 419 578 420 /* If this object was partially constructed but CMA allocation 579 - * had failed, just free it. 421 + * had failed, just free it. Can also happen when the BO has been 422 + * purged. 580 423 */ 581 424 if (!bo->base.vaddr) { 582 425 vc4_bo_destroy(bo); ··· 595 436 kfree(bo->validated_shader); 596 437 bo->validated_shader = NULL; 597 438 } 439 + 440 + /* Reset madv and usecnt before adding the BO to the cache. */ 441 + bo->madv = __VC4_MADV_NOTSUPP; 442 + refcount_set(&bo->usecnt, 0); 598 443 599 444 bo->t_format = false; 600 445 bo->free_time = jiffies; ··· 624 461 mutex_unlock(&vc4->bo_lock); 625 462 } 626 463 464 + int vc4_bo_inc_usecnt(struct vc4_bo *bo) 465 + { 466 + int ret; 467 + 468 + /* Fast path: if the BO is already retained by someone, no need to 469 + * check the madv status. 470 + */ 471 + if (refcount_inc_not_zero(&bo->usecnt)) 472 + return 0; 473 + 474 + mutex_lock(&bo->madv_lock); 475 + switch (bo->madv) { 476 + case VC4_MADV_WILLNEED: 477 + refcount_inc(&bo->usecnt); 478 + ret = 0; 479 + break; 480 + case VC4_MADV_DONTNEED: 481 + /* We shouldn't use a BO marked as purgeable if at least 482 + * someone else retained its content by incrementing usecnt. 483 + * Luckily the BO hasn't been purged yet, but something wrong 484 + * is happening here. Just throw an error instead of 485 + * authorizing this use case. 486 + */ 487 + case __VC4_MADV_PURGED: 488 + /* We can't use a purged BO. */ 489 + default: 490 + /* Invalid madv value. */ 491 + ret = -EINVAL; 492 + break; 493 + } 494 + mutex_unlock(&bo->madv_lock); 495 + 496 + return ret; 497 + } 498 + 499 + void vc4_bo_dec_usecnt(struct vc4_bo *bo) 500 + { 501 + /* Fast path: if the BO is still retained by someone, no need to test 502 + * the madv value. 503 + */ 504 + if (refcount_dec_not_one(&bo->usecnt)) 505 + return; 506 + 507 + mutex_lock(&bo->madv_lock); 508 + if (refcount_dec_and_test(&bo->usecnt) && 509 + bo->madv == VC4_MADV_DONTNEED) 510 + vc4_bo_add_to_purgeable_pool(bo); 511 + mutex_unlock(&bo->madv_lock); 512 + } 513 + 627 514 static void vc4_bo_cache_time_timer(unsigned long data) 628 515 { 629 516 struct drm_device *dev = (struct drm_device *)data; ··· 693 480 vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) 694 481 { 695 482 struct vc4_bo *bo = to_vc4_bo(obj); 483 + struct dma_buf *dmabuf; 484 + int ret; 696 485 697 486 if (bo->validated_shader) { 698 487 DRM_DEBUG("Attempting to export shader BO\n"); 699 488 return ERR_PTR(-EINVAL); 700 489 } 701 490 702 - return drm_gem_prime_export(dev, obj, flags); 491 + /* Note: as soon as the BO is exported it becomes unpurgeable, because 492 + * noone ever decrements the usecnt even if the reference held by the 493 + * exported BO is released. This shouldn't be a problem since we don't 494 + * expect exported BOs to be marked as purgeable. 495 + */ 496 + ret = vc4_bo_inc_usecnt(bo); 497 + if (ret) { 498 + DRM_ERROR("Failed to increment BO usecnt\n"); 499 + return ERR_PTR(ret); 500 + } 501 + 502 + dmabuf = drm_gem_prime_export(dev, obj, flags); 503 + if (IS_ERR(dmabuf)) 504 + vc4_bo_dec_usecnt(bo); 505 + 506 + return dmabuf; 507 + } 508 + 509 + int vc4_fault(struct vm_fault *vmf) 510 + { 511 + struct vm_area_struct *vma = vmf->vma; 512 + struct drm_gem_object *obj = vma->vm_private_data; 513 + struct vc4_bo *bo = to_vc4_bo(obj); 514 + 515 + /* The only reason we would end up here is when user-space accesses 516 + * BO's memory after it's been purged. 517 + */ 518 + mutex_lock(&bo->madv_lock); 519 + WARN_ON(bo->madv != __VC4_MADV_PURGED); 520 + mutex_unlock(&bo->madv_lock); 521 + 522 + return VM_FAULT_SIGBUS; 703 523 } 704 524 705 525 int vc4_mmap(struct file *filp, struct vm_area_struct *vma) 706 526 { 707 527 struct drm_gem_object *gem_obj; 528 + unsigned long vm_pgoff; 708 529 struct vc4_bo *bo; 709 530 int ret; 710 531 ··· 754 507 return -EINVAL; 755 508 } 756 509 510 + if (bo->madv != VC4_MADV_WILLNEED) { 511 + DRM_DEBUG("mmaping of %s BO not allowed\n", 512 + bo->madv == VC4_MADV_DONTNEED ? 513 + "purgeable" : "purged"); 514 + return -EINVAL; 515 + } 516 + 757 517 /* 758 518 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the 759 519 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map 760 520 * the whole buffer. 761 521 */ 762 522 vma->vm_flags &= ~VM_PFNMAP; 763 - vma->vm_pgoff = 0; 764 523 524 + /* This ->vm_pgoff dance is needed to make all parties happy: 525 + * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated 526 + * mem-region, hence the need to set it to zero (the value set by 527 + * the DRM core is a virtual offset encoding the GEM object-id) 528 + * - the mmap() core logic needs ->vm_pgoff to be restored to its 529 + * initial value before returning from this function because it 530 + * encodes the offset of this GEM in the dev->anon_inode pseudo-file 531 + * and this information will be used when we invalidate userspace 532 + * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()). 533 + */ 534 + vm_pgoff = vma->vm_pgoff; 535 + vma->vm_pgoff = 0; 765 536 ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr, 766 537 bo->base.paddr, vma->vm_end - vma->vm_start); 538 + vma->vm_pgoff = vm_pgoff; 539 + 767 540 if (ret) 768 541 drm_gem_vm_close(vma); 769 542 ··· 847 580 if (IS_ERR(bo)) 848 581 return PTR_ERR(bo); 849 582 583 + bo->madv = VC4_MADV_WILLNEED; 584 + 850 585 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 851 586 drm_gem_object_put_unlocked(&bo->base.base); 852 587 ··· 901 632 bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER); 902 633 if (IS_ERR(bo)) 903 634 return PTR_ERR(bo); 635 + 636 + bo->madv = VC4_MADV_WILLNEED; 904 637 905 638 if (copy_from_user(bo->base.vaddr, 906 639 (void __user *)(uintptr_t)args->data,
+9 -1
drivers/gpu/drm/vc4/vc4_drv.c
··· 100 100 case DRM_VC4_PARAM_SUPPORTS_ETC1: 101 101 case DRM_VC4_PARAM_SUPPORTS_THREADED_FS: 102 102 case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER: 103 + case DRM_VC4_PARAM_SUPPORTS_MADVISE: 103 104 args->value = true; 104 105 break; 105 106 default: ··· 117 116 118 117 drm_fbdev_cma_restore_mode(vc4->fbdev); 119 118 } 119 + 120 + static const struct vm_operations_struct vc4_vm_ops = { 121 + .fault = vc4_fault, 122 + .open = drm_gem_vm_open, 123 + .close = drm_gem_vm_close, 124 + }; 120 125 121 126 static const struct file_operations vc4_drm_fops = { 122 127 .owner = THIS_MODULE, ··· 149 142 DRM_IOCTL_DEF_DRV(VC4_SET_TILING, vc4_set_tiling_ioctl, DRM_RENDER_ALLOW), 150 143 DRM_IOCTL_DEF_DRV(VC4_GET_TILING, vc4_get_tiling_ioctl, DRM_RENDER_ALLOW), 151 144 DRM_IOCTL_DEF_DRV(VC4_LABEL_BO, vc4_label_bo_ioctl, DRM_RENDER_ALLOW), 145 + DRM_IOCTL_DEF_DRV(VC4_GEM_MADVISE, vc4_gem_madvise_ioctl, DRM_RENDER_ALLOW), 152 146 }; 153 147 154 148 static struct drm_driver vc4_drm_driver = { ··· 174 166 175 167 .gem_create_object = vc4_create_object, 176 168 .gem_free_object_unlocked = vc4_free_object, 177 - .gem_vm_ops = &drm_gem_cma_vm_ops, 169 + .gem_vm_ops = &vc4_vm_ops, 178 170 179 171 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 180 172 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+30
drivers/gpu/drm/vc4/vc4_drv.h
··· 74 74 /* Protects bo_cache and bo_labels. */ 75 75 struct mutex bo_lock; 76 76 77 + /* Purgeable BO pool. All BOs in this pool can have their memory 78 + * reclaimed if the driver is unable to allocate new BOs. We also 79 + * keep stats related to the purge mechanism here. 80 + */ 81 + struct { 82 + struct list_head list; 83 + unsigned int num; 84 + size_t size; 85 + unsigned int purged_num; 86 + size_t purged_size; 87 + struct mutex lock; 88 + } purgeable; 89 + 77 90 uint64_t dma_fence_context; 78 91 79 92 /* Sequence number for the last job queued in bin_job_list. ··· 205 192 * for user-allocated labels. 206 193 */ 207 194 int label; 195 + 196 + /* Count the number of active users. This is needed to determine 197 + * whether we can move the BO to the purgeable list or not (when the BO 198 + * is used by the GPU or the display engine we can't purge it). 199 + */ 200 + refcount_t usecnt; 201 + 202 + /* Store purgeable/purged state here */ 203 + u32 madv; 204 + struct mutex madv_lock; 208 205 }; 209 206 210 207 static inline struct vc4_bo * ··· 526 503 struct drm_file *file_priv); 527 504 int vc4_label_bo_ioctl(struct drm_device *dev, void *data, 528 505 struct drm_file *file_priv); 506 + int vc4_fault(struct vm_fault *vmf); 529 507 int vc4_mmap(struct file *filp, struct vm_area_struct *vma); 530 508 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj); 531 509 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); ··· 537 513 int vc4_bo_cache_init(struct drm_device *dev); 538 514 void vc4_bo_cache_destroy(struct drm_device *dev); 539 515 int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); 516 + int vc4_bo_inc_usecnt(struct vc4_bo *bo); 517 + void vc4_bo_dec_usecnt(struct vc4_bo *bo); 518 + void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); 519 + void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo); 540 520 541 521 /* vc4_crtc.c */ 542 522 extern struct platform_driver vc4_crtc_driver; ··· 585 557 int vc4_queue_seqno_cb(struct drm_device *dev, 586 558 struct vc4_seqno_cb *cb, uint64_t seqno, 587 559 void (*func)(struct vc4_seqno_cb *cb)); 560 + int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, 561 + struct drm_file *file_priv); 588 562 589 563 /* vc4_hdmi.c */ 590 564 extern struct platform_driver vc4_hdmi_driver;
+30 -2
drivers/gpu/drm/vc4/vc4_dsi.c
··· 1360 1360 *ret = IRQ_HANDLED; 1361 1361 } 1362 1362 1363 + /* 1364 + * Initial handler for port 1 where we need the reg_dma workaround. 1365 + * The register DMA writes sleep, so we can't do it in the top half. 1366 + * Instead we use IRQF_ONESHOT so that the IRQ gets disabled in the 1367 + * parent interrupt contrller until our interrupt thread is done. 1368 + */ 1369 + static irqreturn_t vc4_dsi_irq_defer_to_thread_handler(int irq, void *data) 1370 + { 1371 + struct vc4_dsi *dsi = data; 1372 + u32 stat = DSI_PORT_READ(INT_STAT); 1373 + 1374 + if (!stat) 1375 + return IRQ_NONE; 1376 + 1377 + return IRQ_WAKE_THREAD; 1378 + } 1379 + 1380 + /* 1381 + * Normal IRQ handler for port 0, or the threaded IRQ handler for port 1382 + * 1 where we need the reg_dma workaround. 1383 + */ 1363 1384 static irqreturn_t vc4_dsi_irq_handler(int irq, void *data) 1364 1385 { 1365 1386 struct vc4_dsi *dsi = data; ··· 1560 1539 /* Clear any existing interrupt state. */ 1561 1540 DSI_PORT_WRITE(INT_STAT, DSI_PORT_READ(INT_STAT)); 1562 1541 1563 - ret = devm_request_irq(dev, platform_get_irq(pdev, 0), 1564 - vc4_dsi_irq_handler, 0, "vc4 dsi", dsi); 1542 + if (dsi->reg_dma_mem) 1543 + ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), 1544 + vc4_dsi_irq_defer_to_thread_handler, 1545 + vc4_dsi_irq_handler, 1546 + IRQF_ONESHOT, 1547 + "vc4 dsi", dsi); 1548 + else 1549 + ret = devm_request_irq(dev, platform_get_irq(pdev, 0), 1550 + vc4_dsi_irq_handler, 0, "vc4 dsi", dsi); 1565 1551 if (ret) { 1566 1552 if (ret != -EPROBE_DEFER) 1567 1553 dev_err(dev, "Failed to get interrupt: %d\n", ret);
+150 -6
drivers/gpu/drm/vc4/vc4_gem.c
··· 188 188 continue; 189 189 190 190 for (j = 0; j < exec[i]->bo_count; j++) { 191 + bo = to_vc4_bo(&exec[i]->bo[j]->base); 192 + 193 + /* Retain BOs just in case they were marked purgeable. 194 + * This prevents the BO from being purged before 195 + * someone had a chance to dump the hang state. 196 + */ 197 + WARN_ON(!refcount_read(&bo->usecnt)); 198 + refcount_inc(&bo->usecnt); 191 199 drm_gem_object_get(&exec[i]->bo[j]->base); 192 200 kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; 193 201 } 194 202 195 203 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { 204 + /* No need to retain BOs coming from the ->unref_list 205 + * because they are naturally unpurgeable. 206 + */ 196 207 drm_gem_object_get(&bo->base.base); 197 208 kernel_state->bo[j + prev_idx] = &bo->base.base; 198 209 j++; ··· 243 232 state->fdbgr = V3D_READ(V3D_FDBGR); 244 233 state->fdbgs = V3D_READ(V3D_FDBGS); 245 234 state->errstat = V3D_READ(V3D_ERRSTAT); 235 + 236 + /* We need to turn purgeable BOs into unpurgeable ones so that 237 + * userspace has a chance to dump the hang state before the kernel 238 + * decides to purge those BOs. 239 + * Note that BO consistency at dump time cannot be guaranteed. For 240 + * example, if the owner of these BOs decides to re-use them or mark 241 + * them purgeable again there's nothing we can do to prevent it. 242 + */ 243 + for (i = 0; i < kernel_state->user_state.bo_count; i++) { 244 + struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]); 245 + 246 + if (bo->madv == __VC4_MADV_NOTSUPP) 247 + continue; 248 + 249 + mutex_lock(&bo->madv_lock); 250 + if (!WARN_ON(bo->madv == __VC4_MADV_PURGED)) 251 + bo->madv = VC4_MADV_WILLNEED; 252 + refcount_dec(&bo->usecnt); 253 + mutex_unlock(&bo->madv_lock); 254 + } 246 255 247 256 spin_lock_irqsave(&vc4->job_lock, irqflags); 248 257 if (vc4->hang_state) { ··· 670 639 * The command validator needs to reference BOs by their index within 671 640 * the submitted job's BO list. This does the validation of the job's 672 641 * BO list and reference counting for the lifetime of the job. 673 - * 674 - * Note that this function doesn't need to unreference the BOs on 675 - * failure, because that will happen at vc4_complete_exec() time. 676 642 */ 677 643 static int 678 644 vc4_cl_lookup_bos(struct drm_device *dev, ··· 721 693 DRM_DEBUG("Failed to look up GEM BO %d: %d\n", 722 694 i, handles[i]); 723 695 ret = -EINVAL; 724 - spin_unlock(&file_priv->table_lock); 725 - goto fail; 696 + break; 726 697 } 698 + 727 699 drm_gem_object_get(bo); 728 700 exec->bo[i] = (struct drm_gem_cma_object *)bo; 729 701 } 730 702 spin_unlock(&file_priv->table_lock); 731 703 704 + if (ret) 705 + goto fail_put_bo; 706 + 707 + for (i = 0; i < exec->bo_count; i++) { 708 + ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base)); 709 + if (ret) 710 + goto fail_dec_usecnt; 711 + } 712 + 713 + kvfree(handles); 714 + return 0; 715 + 716 + fail_dec_usecnt: 717 + /* Decrease usecnt on acquired objects. 718 + * We cannot rely on vc4_complete_exec() to release resources here, 719 + * because vc4_complete_exec() has no information about which BO has 720 + * had its ->usecnt incremented. 721 + * To make things easier we just free everything explicitly and set 722 + * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release' 723 + * step. 724 + */ 725 + for (i-- ; i >= 0; i--) 726 + vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base)); 727 + 728 + fail_put_bo: 729 + /* Release any reference to acquired objects. */ 730 + for (i = 0; i < exec->bo_count && exec->bo[i]; i++) 731 + drm_gem_object_put_unlocked(&exec->bo[i]->base); 732 + 732 733 fail: 733 734 kvfree(handles); 735 + kvfree(exec->bo); 736 + exec->bo = NULL; 734 737 return ret; 735 738 } 736 739 ··· 892 833 dma_fence_signal(exec->fence); 893 834 894 835 if (exec->bo) { 895 - for (i = 0; i < exec->bo_count; i++) 836 + for (i = 0; i < exec->bo_count; i++) { 837 + struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base); 838 + 839 + vc4_bo_dec_usecnt(bo); 896 840 drm_gem_object_put_unlocked(&exec->bo[i]->base); 841 + } 897 842 kvfree(exec->bo); 898 843 } 899 844 ··· 1161 1098 INIT_WORK(&vc4->job_done_work, vc4_job_done_work); 1162 1099 1163 1100 mutex_init(&vc4->power_lock); 1101 + 1102 + INIT_LIST_HEAD(&vc4->purgeable.list); 1103 + mutex_init(&vc4->purgeable.lock); 1164 1104 } 1165 1105 1166 1106 void ··· 1186 1120 1187 1121 if (vc4->hang_state) 1188 1122 vc4_free_hang_state(dev, vc4->hang_state); 1123 + } 1124 + 1125 + int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, 1126 + struct drm_file *file_priv) 1127 + { 1128 + struct drm_vc4_gem_madvise *args = data; 1129 + struct drm_gem_object *gem_obj; 1130 + struct vc4_bo *bo; 1131 + int ret; 1132 + 1133 + switch (args->madv) { 1134 + case VC4_MADV_DONTNEED: 1135 + case VC4_MADV_WILLNEED: 1136 + break; 1137 + default: 1138 + return -EINVAL; 1139 + } 1140 + 1141 + if (args->pad != 0) 1142 + return -EINVAL; 1143 + 1144 + gem_obj = drm_gem_object_lookup(file_priv, args->handle); 1145 + if (!gem_obj) { 1146 + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 1147 + return -ENOENT; 1148 + } 1149 + 1150 + bo = to_vc4_bo(gem_obj); 1151 + 1152 + /* Only BOs exposed to userspace can be purged. */ 1153 + if (bo->madv == __VC4_MADV_NOTSUPP) { 1154 + DRM_DEBUG("madvise not supported on this BO\n"); 1155 + ret = -EINVAL; 1156 + goto out_put_gem; 1157 + } 1158 + 1159 + /* Not sure it's safe to purge imported BOs. Let's just assume it's 1160 + * not until proven otherwise. 1161 + */ 1162 + if (gem_obj->import_attach) { 1163 + DRM_DEBUG("madvise not supported on imported BOs\n"); 1164 + ret = -EINVAL; 1165 + goto out_put_gem; 1166 + } 1167 + 1168 + mutex_lock(&bo->madv_lock); 1169 + 1170 + if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED && 1171 + !refcount_read(&bo->usecnt)) { 1172 + /* If the BO is about to be marked as purgeable, is not used 1173 + * and is not already purgeable or purged, add it to the 1174 + * purgeable list. 1175 + */ 1176 + vc4_bo_add_to_purgeable_pool(bo); 1177 + } else if (args->madv == VC4_MADV_WILLNEED && 1178 + bo->madv == VC4_MADV_DONTNEED && 1179 + !refcount_read(&bo->usecnt)) { 1180 + /* The BO has not been purged yet, just remove it from 1181 + * the purgeable list. 1182 + */ 1183 + vc4_bo_remove_from_purgeable_pool(bo); 1184 + } 1185 + 1186 + /* Save the purged state. */ 1187 + args->retained = bo->madv != __VC4_MADV_PURGED; 1188 + 1189 + /* Update internal madv state only if the bo was not purged. */ 1190 + if (bo->madv != __VC4_MADV_PURGED) 1191 + bo->madv = args->madv; 1192 + 1193 + mutex_unlock(&bo->madv_lock); 1194 + 1195 + ret = 0; 1196 + 1197 + out_put_gem: 1198 + drm_gem_object_put_unlocked(gem_obj); 1199 + 1200 + return ret; 1189 1201 }
+20
drivers/gpu/drm/vc4/vc4_plane.c
··· 23 23 #include <drm/drm_fb_cma_helper.h> 24 24 #include <drm/drm_plane_helper.h> 25 25 26 + #include "uapi/drm/vc4_drm.h" 26 27 #include "vc4_drv.h" 27 28 #include "vc4_regs.h" 28 29 ··· 775 774 { 776 775 struct vc4_bo *bo; 777 776 struct dma_fence *fence; 777 + int ret; 778 778 779 779 if ((plane->state->fb == state->fb) || !state->fb) 780 780 return 0; 781 781 782 782 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); 783 + 784 + ret = vc4_bo_inc_usecnt(bo); 785 + if (ret) 786 + return ret; 787 + 783 788 fence = reservation_object_get_excl_rcu(bo->resv); 784 789 drm_atomic_set_fence_for_plane(state, fence); 785 790 786 791 return 0; 787 792 } 788 793 794 + static void vc4_cleanup_fb(struct drm_plane *plane, 795 + struct drm_plane_state *state) 796 + { 797 + struct vc4_bo *bo; 798 + 799 + if (plane->state->fb == state->fb || !state->fb) 800 + return; 801 + 802 + bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); 803 + vc4_bo_dec_usecnt(bo); 804 + } 805 + 789 806 static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { 790 807 .atomic_check = vc4_plane_atomic_check, 791 808 .atomic_update = vc4_plane_atomic_update, 792 809 .prepare_fb = vc4_prepare_fb, 810 + .cleanup_fb = vc4_cleanup_fb, 793 811 }; 794 812 795 813 static void vc4_plane_destroy(struct drm_plane *plane)
+55 -17
drivers/gpu/vga/vgaarb.c
··· 1402 1402 MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops 1403 1403 }; 1404 1404 1405 - static int __init vga_arb_device_init(void) 1405 + static void __init vga_arb_select_default_device(void) 1406 1406 { 1407 - int rc; 1408 1407 struct pci_dev *pdev; 1409 1408 struct vga_device *vgadev; 1410 1409 1411 - rc = misc_register(&vga_arb_device); 1412 - if (rc < 0) 1413 - pr_err("error %d registering device\n", rc); 1414 - 1415 - bus_register_notifier(&pci_bus_type, &pci_notifier); 1416 - 1417 - /* We add all pci devices satisfying vga class in the arbiter by 1418 - * default */ 1419 - pdev = NULL; 1420 - while ((pdev = 1421 - pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 1422 - PCI_ANY_ID, pdev)) != NULL) 1423 - vga_arbiter_add_pci_device(pdev); 1424 - 1410 + #if defined(CONFIG_X86) || defined(CONFIG_IA64) 1425 1411 list_for_each_entry(vgadev, &vga_list, list) { 1426 1412 struct device *dev = &vgadev->pdev->dev; 1427 - #if defined(CONFIG_X86) || defined(CONFIG_IA64) 1428 1413 /* 1429 1414 * Override vga_arbiter_add_pci_device()'s I/O based detection 1430 1415 * as it may take the wrong device (e.g. on Apple system under ··· 1446 1461 vgaarb_info(dev, "overriding boot device\n"); 1447 1462 vga_set_default_device(vgadev->pdev); 1448 1463 } 1464 + } 1449 1465 #endif 1466 + 1467 + if (!vga_default_device()) { 1468 + list_for_each_entry(vgadev, &vga_list, list) { 1469 + struct device *dev = &vgadev->pdev->dev; 1470 + u16 cmd; 1471 + 1472 + pdev = vgadev->pdev; 1473 + pci_read_config_word(pdev, PCI_COMMAND, &cmd); 1474 + if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { 1475 + vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n"); 1476 + vga_set_default_device(pdev); 1477 + break; 1478 + } 1479 + } 1480 + } 1481 + 1482 + if (!vga_default_device()) { 1483 + vgadev = list_first_entry_or_null(&vga_list, 1484 + struct vga_device, list); 1485 + if (vgadev) { 1486 + struct device *dev = &vgadev->pdev->dev; 1487 + vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n"); 1488 + vga_set_default_device(vgadev->pdev); 1489 + } 1490 + } 1491 + } 1492 + 1493 + static int __init vga_arb_device_init(void) 1494 + { 1495 + int rc; 1496 + struct pci_dev *pdev; 1497 + struct vga_device *vgadev; 1498 + 1499 + rc = misc_register(&vga_arb_device); 1500 + if (rc < 0) 1501 + pr_err("error %d registering device\n", rc); 1502 + 1503 + bus_register_notifier(&pci_bus_type, &pci_notifier); 1504 + 1505 + /* We add all PCI devices satisfying VGA class in the arbiter by 1506 + * default */ 1507 + pdev = NULL; 1508 + while ((pdev = 1509 + pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 1510 + PCI_ANY_ID, pdev)) != NULL) 1511 + vga_arbiter_add_pci_device(pdev); 1512 + 1513 + list_for_each_entry(vgadev, &vga_list, list) { 1514 + struct device *dev = &vgadev->pdev->dev; 1515 + 1450 1516 if (vgadev->bridge_has_one_vga) 1451 1517 vgaarb_info(dev, "bridge control possible\n"); 1452 1518 else 1453 1519 vgaarb_info(dev, "no bridge control possible\n"); 1454 1520 } 1521 + 1522 + vga_arb_select_default_device(); 1455 1523 1456 1524 pr_info("loaded\n"); 1457 1525 return rc;
+1 -1
include/linux/dma-fence.h
··· 128 128 * implementation know that there is another driver waiting on 129 129 * the signal (ie. hw->sw case). 130 130 * 131 - * This function can be called called from atomic context, but not 131 + * This function can be called from atomic context, but not 132 132 * from irq context, so normal spinlocks can be used. 133 133 * 134 134 * A return value of false indicates the fence already passed,
+19
include/uapi/drm/vc4_drm.h
··· 41 41 #define DRM_VC4_SET_TILING 0x08 42 42 #define DRM_VC4_GET_TILING 0x09 43 43 #define DRM_VC4_LABEL_BO 0x0a 44 + #define DRM_VC4_GEM_MADVISE 0x0b 44 45 45 46 #define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) 46 47 #define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) ··· 54 53 #define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling) 55 54 #define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling) 56 55 #define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo) 56 + #define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise) 57 57 58 58 struct drm_vc4_submit_rcl_surface { 59 59 __u32 hindex; /* Handle index, or ~0 if not present. */ ··· 307 305 #define DRM_VC4_PARAM_SUPPORTS_ETC1 4 308 306 #define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5 309 307 #define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6 308 + #define DRM_VC4_PARAM_SUPPORTS_MADVISE 7 310 309 311 310 struct drm_vc4_get_param { 312 311 __u32 param; ··· 334 331 __u32 handle; 335 332 __u32 len; 336 333 __u64 name; 334 + }; 335 + 336 + /* 337 + * States prefixed with '__' are internal states and cannot be passed to the 338 + * DRM_IOCTL_VC4_GEM_MADVISE ioctl. 339 + */ 340 + #define VC4_MADV_WILLNEED 0 341 + #define VC4_MADV_DONTNEED 1 342 + #define __VC4_MADV_PURGED 2 343 + #define __VC4_MADV_NOTSUPP 3 344 + 345 + struct drm_vc4_gem_madvise { 346 + __u32 handle; 347 + __u32 madv; 348 + __u32 retained; 349 + __u32 pad; 337 350 }; 338 351 339 352 #if defined(__cplusplus)