Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2019-08-19' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.4:

UAPI Changes:

Cross-subsystem Changes:

Core Changes:
- dma-buf: add reservation_object_fences helper, relax
reservation_object_add_shared_fence, remove
reservation_object seq number (and then
restored)
- dma-fence: Shrinkage of the dma_fence structure,
Merge dma_fence_signal and dma_fence_signal_locked,
Store the timestamp in struct dma_fence in a union with
cb_list

Driver Changes:
- More dt-bindings YAML conversions
- More removal of drmP.h includes
- dw-hdmi: Support get_eld and various i2s improvements
- gm12u320: Few fixes
- meson: Global cleanup
- panfrost: Few refactors, Support for GPU heap allocations
- sun4i: Support for DDC enable GPIO
- New panels: TI nspire, NEC NL8048HL11, LG Philips LB035Q02,
Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1
Toppoly TD043MTEA1

Signed-off-by: Dave Airlie <airlied@redhat.com>
[airlied: fixup dma_resv rename fallout]

From: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190819141923.7l2adietcr2pioct@flea

+5070 -3852
-119
Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
··· 1 - Amlogic specific extensions to the Synopsys Designware HDMI Controller 2 - ====================================================================== 3 - 4 - The Amlogic Meson Synopsys Designware Integration is composed of : 5 - - A Synopsys DesignWare HDMI Controller IP 6 - - A TOP control block controlling the Clocks and PHY 7 - - A custom HDMI PHY in order to convert video to TMDS signal 8 - ___________________________________ 9 - | HDMI TOP |<= HPD 10 - |___________________________________| 11 - | | | 12 - | Synopsys HDMI | HDMI PHY |=> TMDS 13 - | Controller |________________| 14 - |___________________________________|<=> DDC 15 - 16 - The HDMI TOP block only supports HPD sensing. 17 - The Synopsys HDMI Controller interrupt is routed through the 18 - TOP Block interrupt. 19 - Communication to the TOP Block and the Synopsys HDMI Controller is done 20 - via a pair of dedicated addr+read/write registers. 21 - The HDMI PHY is configured by registers in the HHI register block. 22 - 23 - Pixel data arrives in 4:4:4 format from the VENC block and the VPU HDMI mux 24 - selects either the ENCI encoder for the 576i or 480i formats or the ENCP 25 - encoder for all the other formats including interlaced HD formats. 26 - 27 - The VENC uses a DVI encoder on top of the ENCI or ENCP encoders to generate 28 - DVI timings for the HDMI controller. 29 - 30 - Amlogic Meson GXBB, GXL and GXM SoCs families embeds the Synopsys DesignWare 31 - HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF 32 - audio source interfaces. 33 - 34 - Required properties: 35 - - compatible: value should be different for each SoC family as : 36 - - GXBB (S905) : "amlogic,meson-gxbb-dw-hdmi" 37 - - GXL (S905X, S905D) : "amlogic,meson-gxl-dw-hdmi" 38 - - GXM (S912) : "amlogic,meson-gxm-dw-hdmi" 39 - followed by the common "amlogic,meson-gx-dw-hdmi" 40 - - G12A (S905X2, S905Y2, S905D2) : "amlogic,meson-g12a-dw-hdmi" 41 - - reg: Physical base address and length of the controller's registers. 42 - - interrupts: The HDMI interrupt number 43 - - clocks, clock-names : must have the phandles to the HDMI iahb and isfr clocks, 44 - and the Amlogic Meson venci clocks as described in 45 - Documentation/devicetree/bindings/clock/clock-bindings.txt, 46 - the clocks are soc specific, the clock-names should be "iahb", "isfr", "venci" 47 - - resets, resets-names: must have the phandles to the HDMI apb, glue and phy 48 - resets as described in : 49 - Documentation/devicetree/bindings/reset/reset.txt, 50 - the reset-names should be "hdmitx_apb", "hdmitx", "hdmitx_phy" 51 - 52 - Optional properties: 53 - - hdmi-supply: Optional phandle to an external 5V regulator to power the HDMI 54 - logic, as described in the file ../regulator/regulator.txt 55 - 56 - Required nodes: 57 - 58 - The connections to the HDMI ports are modeled using the OF graph 59 - bindings specified in Documentation/devicetree/bindings/graph.txt. 60 - 61 - The following table lists for each supported model the port number 62 - corresponding to each HDMI output and input. 63 - 64 - Port 0 Port 1 65 - ----------------------------------------- 66 - S905 (GXBB) VENC Input TMDS Output 67 - S905X (GXL) VENC Input TMDS Output 68 - S905D (GXL) VENC Input TMDS Output 69 - S912 (GXM) VENC Input TMDS Output 70 - S905X2 (G12A) VENC Input TMDS Output 71 - S905Y2 (G12A) VENC Input TMDS Output 72 - S905D2 (G12A) VENC Input TMDS Output 73 - 74 - Example: 75 - 76 - hdmi-connector { 77 - compatible = "hdmi-connector"; 78 - type = "a"; 79 - 80 - port { 81 - hdmi_connector_in: endpoint { 82 - remote-endpoint = <&hdmi_tx_tmds_out>; 83 - }; 84 - }; 85 - }; 86 - 87 - hdmi_tx: hdmi-tx@c883a000 { 88 - compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi"; 89 - reg = <0x0 0xc883a000 0x0 0x1c>; 90 - interrupts = <GIC_SPI 57 IRQ_TYPE_EDGE_RISING>; 91 - resets = <&reset RESET_HDMITX_CAPB3>, 92 - <&reset RESET_HDMI_SYSTEM_RESET>, 93 - <&reset RESET_HDMI_TX>; 94 - reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy"; 95 - clocks = <&clkc CLKID_HDMI_PCLK>, 96 - <&clkc CLKID_CLK81>, 97 - <&clkc CLKID_GCLK_VENCI_INT0>; 98 - clock-names = "isfr", "iahb", "venci"; 99 - #address-cells = <1>; 100 - #size-cells = <0>; 101 - 102 - /* VPU VENC Input */ 103 - hdmi_tx_venc_port: port@0 { 104 - reg = <0>; 105 - 106 - hdmi_tx_in: endpoint { 107 - remote-endpoint = <&hdmi_tx_out>; 108 - }; 109 - }; 110 - 111 - /* TMDS Output */ 112 - hdmi_tx_tmds_port: port@1 { 113 - reg = <1>; 114 - 115 - hdmi_tx_tmds_out: endpoint { 116 - remote-endpoint = <&hdmi_connector_in>; 117 - }; 118 - }; 119 - };
+150
Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + # Copyright 2019 BayLibre, SAS 3 + %YAML 1.2 4 + --- 5 + $id: "http://devicetree.org/schemas/display/amlogic,meson-dw-hdmi.yaml#" 6 + $schema: "http://devicetree.org/meta-schemas/core.yaml#" 7 + 8 + title: Amlogic specific extensions to the Synopsys Designware HDMI Controller 9 + 10 + maintainers: 11 + - Neil Armstrong <narmstrong@baylibre.com> 12 + 13 + description: | 14 + The Amlogic Meson Synopsys Designware Integration is composed of 15 + - A Synopsys DesignWare HDMI Controller IP 16 + - A TOP control block controlling the Clocks and PHY 17 + - A custom HDMI PHY in order to convert video to TMDS signal 18 + ___________________________________ 19 + | HDMI TOP |<= HPD 20 + |___________________________________| 21 + | | | 22 + | Synopsys HDMI | HDMI PHY |=> TMDS 23 + | Controller |________________| 24 + |___________________________________|<=> DDC 25 + 26 + The HDMI TOP block only supports HPD sensing. 27 + The Synopsys HDMI Controller interrupt is routed through the 28 + TOP Block interrupt. 29 + Communication to the TOP Block and the Synopsys HDMI Controller is done 30 + via a pair of dedicated addr+read/write registers. 31 + The HDMI PHY is configured by registers in the HHI register block. 32 + 33 + Pixel data arrives in "4:4:4" format from the VENC block and the VPU HDMI mux 34 + selects either the ENCI encoder for the 576i or 480i formats or the ENCP 35 + encoder for all the other formats including interlaced HD formats. 36 + 37 + The VENC uses a DVI encoder on top of the ENCI or ENCP encoders to generate 38 + DVI timings for the HDMI controller. 39 + 40 + Amlogic Meson GXBB, GXL and GXM SoCs families embeds the Synopsys DesignWare 41 + HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF 42 + audio source interfaces. 43 + 44 + properties: 45 + compatible: 46 + oneOf: 47 + - items: 48 + - enum: 49 + - amlogic,meson-gxbb-dw-hdmi # GXBB (S905) 50 + - amlogic,meson-gxl-dw-hdmi # GXL (S905X, S905D) 51 + - amlogic,meson-gxm-dw-hdmi # GXM (S912) 52 + - const: amlogic,meson-gx-dw-hdmi 53 + - enum: 54 + - amlogic,meson-g12a-dw-hdmi # G12A (S905X2, S905Y2, S905D2) 55 + 56 + reg: 57 + maxItems: 1 58 + 59 + interrupts: 60 + maxItems: 1 61 + 62 + clocks: 63 + minItems: 3 64 + 65 + clock-names: 66 + items: 67 + - const: isfr 68 + - const: iahb 69 + - const: venci 70 + 71 + resets: 72 + minItems: 3 73 + 74 + reset-names: 75 + items: 76 + - const: hdmitx_apb 77 + - const: hdmitx 78 + - const: hdmitx_phy 79 + 80 + hdmi-supply: 81 + description: phandle to an external 5V regulator to power the HDMI logic 82 + allOf: 83 + - $ref: /schemas/types.yaml#/definitions/phandle 84 + 85 + port@0: 86 + type: object 87 + description: 88 + A port node pointing to the VENC Input port node. 89 + 90 + port@1: 91 + type: object 92 + description: 93 + A port node pointing to the TMDS Output port node. 94 + 95 + "#address-cells": 96 + const: 1 97 + 98 + "#size-cells": 99 + const: 0 100 + 101 + "#sound-dai-cells": 102 + const: 0 103 + 104 + required: 105 + - compatible 106 + - reg 107 + - interrupts 108 + - clocks 109 + - clock-names 110 + - resets 111 + - reset-names 112 + - port@0 113 + - port@1 114 + - "#address-cells" 115 + - "#size-cells" 116 + 117 + additionalProperties: false 118 + 119 + examples: 120 + - | 121 + hdmi_tx: hdmi-tx@c883a000 { 122 + compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi"; 123 + reg = <0xc883a000 0x1c>; 124 + interrupts = <57>; 125 + resets = <&reset_apb>, <&reset_hdmitx>, <&reset_hdmitx_phy>; 126 + reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy"; 127 + clocks = <&clk_isfr>, <&clk_iahb>, <&clk_venci>; 128 + clock-names = "isfr", "iahb", "venci"; 129 + #address-cells = <1>; 130 + #size-cells = <0>; 131 + 132 + /* VPU VENC Input */ 133 + hdmi_tx_venc_port: port@0 { 134 + reg = <0>; 135 + 136 + hdmi_tx_in: endpoint { 137 + remote-endpoint = <&hdmi_tx_out>; 138 + }; 139 + }; 140 + 141 + /* TMDS Output */ 142 + hdmi_tx_tmds_port: port@1 { 143 + reg = <1>; 144 + 145 + hdmi_tx_tmds_out: endpoint { 146 + remote-endpoint = <&hdmi_connector_in>; 147 + }; 148 + }; 149 + }; 150 +
-121
Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
··· 1 - Amlogic Meson Display Controller 2 - ================================ 3 - 4 - The Amlogic Meson Display controller is composed of several components 5 - that are going to be documented below: 6 - 7 - DMC|---------------VPU (Video Processing Unit)----------------|------HHI------| 8 - | vd1 _______ _____________ _________________ | | 9 - D |-------| |----| | | | | HDMI PLL | 10 - D | vd2 | VIU | | Video Post | | Video Encoders |<---|-----VCLK | 11 - R |-------| |----| Processing | | | | | 12 - | osd2 | | | |---| Enci ----------|----|-----VDAC------| 13 - R |-------| CSC |----| Scalers | | Encp ----------|----|----HDMI-TX----| 14 - A | osd1 | | | Blenders | | Encl ----------|----|---------------| 15 - M |-------|______|----|____________| |________________| | | 16 - ___|__________________________________________________________|_______________| 17 - 18 - 19 - VIU: Video Input Unit 20 - --------------------- 21 - 22 - The Video Input Unit is in charge of the pixel scanout from the DDR memory. 23 - It fetches the frames addresses, stride and parameters from the "Canvas" memory. 24 - This part is also in charge of the CSC (Colorspace Conversion). 25 - It can handle 2 OSD Planes and 2 Video Planes. 26 - 27 - VPP: Video Post Processing 28 - -------------------------- 29 - 30 - The Video Post Processing is in charge of the scaling and blending of the 31 - various planes into a single pixel stream. 32 - There is a special "pre-blending" used by the video planes with a dedicated 33 - scaler and a "post-blending" to merge with the OSD Planes. 34 - The OSD planes also have a dedicated scaler for one of the OSD. 35 - 36 - VENC: Video Encoders 37 - -------------------- 38 - 39 - The VENC is composed of the multiple pixel encoders : 40 - - ENCI : Interlace Video encoder for CVBS and Interlace HDMI 41 - - ENCP : Progressive Video Encoder for HDMI 42 - - ENCL : LCD LVDS Encoder 43 - The VENC Unit gets a Pixel Clocks (VCLK) from a dedicated HDMI PLL and clock 44 - tree and provides the scanout clock to the VPP and VIU. 45 - The ENCI is connected to a single VDAC for Composite Output. 46 - The ENCI and ENCP are connected to an on-chip HDMI Transceiver. 47 - 48 - Device Tree Bindings: 49 - --------------------- 50 - 51 - VPU: Video Processing Unit 52 - -------------------------- 53 - 54 - Required properties: 55 - - compatible: value should be different for each SoC family as : 56 - - GXBB (S905) : "amlogic,meson-gxbb-vpu" 57 - - GXL (S905X, S905D) : "amlogic,meson-gxl-vpu" 58 - - GXM (S912) : "amlogic,meson-gxm-vpu" 59 - followed by the common "amlogic,meson-gx-vpu" 60 - - G12A (S905X2, S905Y2, S905D2) : "amlogic,meson-g12a-vpu" 61 - - reg: base address and size of he following memory-mapped regions : 62 - - vpu 63 - - hhi 64 - - reg-names: should contain the names of the previous memory regions 65 - - interrupts: should contain the VENC Vsync interrupt number 66 - - amlogic,canvas: phandle to canvas provider node as described in the file 67 - ../soc/amlogic/amlogic,canvas.txt 68 - 69 - Optional properties: 70 - - power-domains: Optional phandle to associated power domain as described in 71 - the file ../power/power_domain.txt 72 - 73 - Required nodes: 74 - 75 - The connections to the VPU output video ports are modeled using the OF graph 76 - bindings specified in Documentation/devicetree/bindings/graph.txt. 77 - 78 - The following table lists for each supported model the port number 79 - corresponding to each VPU output. 80 - 81 - Port 0 Port 1 82 - ----------------------------------------- 83 - S905 (GXBB) CVBS VDAC HDMI-TX 84 - S905X (GXL) CVBS VDAC HDMI-TX 85 - S905D (GXL) CVBS VDAC HDMI-TX 86 - S912 (GXM) CVBS VDAC HDMI-TX 87 - S905X2 (G12A) CVBS VDAC HDMI-TX 88 - S905Y2 (G12A) CVBS VDAC HDMI-TX 89 - S905D2 (G12A) CVBS VDAC HDMI-TX 90 - 91 - Example: 92 - 93 - tv-connector { 94 - compatible = "composite-video-connector"; 95 - 96 - port { 97 - tv_connector_in: endpoint { 98 - remote-endpoint = <&cvbs_vdac_out>; 99 - }; 100 - }; 101 - }; 102 - 103 - vpu: vpu@d0100000 { 104 - compatible = "amlogic,meson-gxbb-vpu"; 105 - reg = <0x0 0xd0100000 0x0 0x100000>, 106 - <0x0 0xc883c000 0x0 0x1000>, 107 - <0x0 0xc8838000 0x0 0x1000>; 108 - reg-names = "vpu", "hhi", "dmc"; 109 - interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>; 110 - #address-cells = <1>; 111 - #size-cells = <0>; 112 - 113 - /* CVBS VDAC output port */ 114 - port@0 { 115 - reg = <0>; 116 - 117 - cvbs_vdac_out: endpoint { 118 - remote-endpoint = <&tv_connector_in>; 119 - }; 120 - }; 121 - };
+137
Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + # Copyright 2019 BayLibre, SAS 3 + %YAML 1.2 4 + --- 5 + $id: "http://devicetree.org/schemas/display/amlogic,meson-vpu.yaml#" 6 + $schema: "http://devicetree.org/meta-schemas/core.yaml#" 7 + 8 + title: Amlogic Meson Display Controller 9 + 10 + maintainers: 11 + - Neil Armstrong <narmstrong@baylibre.com> 12 + 13 + description: | 14 + The Amlogic Meson Display controller is composed of several components 15 + that are going to be documented below 16 + 17 + DMC|---------------VPU (Video Processing Unit)----------------|------HHI------| 18 + | vd1 _______ _____________ _________________ | | 19 + D |-------| |----| | | | | HDMI PLL | 20 + D | vd2 | VIU | | Video Post | | Video Encoders |<---|-----VCLK | 21 + R |-------| |----| Processing | | | | | 22 + | osd2 | | | |---| Enci ----------|----|-----VDAC------| 23 + R |-------| CSC |----| Scalers | | Encp ----------|----|----HDMI-TX----| 24 + A | osd1 | | | Blenders | | Encl ----------|----|---------------| 25 + M |-------|______|----|____________| |________________| | | 26 + ___|__________________________________________________________|_______________| 27 + 28 + 29 + VIU: Video Input Unit 30 + --------------------- 31 + 32 + The Video Input Unit is in charge of the pixel scanout from the DDR memory. 33 + It fetches the frames addresses, stride and parameters from the "Canvas" memory. 34 + This part is also in charge of the CSC (Colorspace Conversion). 35 + It can handle 2 OSD Planes and 2 Video Planes. 36 + 37 + VPP: Video Post Processing 38 + -------------------------- 39 + 40 + The Video Post Processing is in charge of the scaling and blending of the 41 + various planes into a single pixel stream. 42 + There is a special "pre-blending" used by the video planes with a dedicated 43 + scaler and a "post-blending" to merge with the OSD Planes. 44 + The OSD planes also have a dedicated scaler for one of the OSD. 45 + 46 + VENC: Video Encoders 47 + -------------------- 48 + 49 + The VENC is composed of the multiple pixel encoders 50 + - ENCI : Interlace Video encoder for CVBS and Interlace HDMI 51 + - ENCP : Progressive Video Encoder for HDMI 52 + - ENCL : LCD LVDS Encoder 53 + The VENC Unit gets a Pixel Clocks (VCLK) from a dedicated HDMI PLL and clock 54 + tree and provides the scanout clock to the VPP and VIU. 55 + The ENCI is connected to a single VDAC for Composite Output. 56 + The ENCI and ENCP are connected to an on-chip HDMI Transceiver. 57 + 58 + properties: 59 + compatible: 60 + oneOf: 61 + - items: 62 + - enum: 63 + - amlogic,meson-gxbb-vpu # GXBB (S905) 64 + - amlogic,meson-gxl-vpu # GXL (S905X, S905D) 65 + - amlogic,meson-gxm-vpu # GXM (S912) 66 + - const: amlogic,meson-gx-vpu 67 + - enum: 68 + - amlogic,meson-g12a-vpu # G12A (S905X2, S905Y2, S905D2) 69 + 70 + reg: 71 + maxItems: 2 72 + 73 + reg-names: 74 + items: 75 + - const: vpu 76 + - const: hhi 77 + 78 + interrupts: 79 + maxItems: 1 80 + 81 + power-domains: 82 + maxItems: 1 83 + description: phandle to the associated power domain 84 + 85 + port@0: 86 + type: object 87 + description: 88 + A port node pointing to the CVBS VDAC port node. 89 + 90 + port@1: 91 + type: object 92 + description: 93 + A port node pointing to the HDMI-TX port node. 94 + 95 + "#address-cells": 96 + const: 1 97 + 98 + "#size-cells": 99 + const: 0 100 + 101 + required: 102 + - compatible 103 + - reg 104 + - interrupts 105 + - port@0 106 + - port@1 107 + - "#address-cells" 108 + - "#size-cells" 109 + 110 + examples: 111 + - | 112 + vpu: vpu@d0100000 { 113 + compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu"; 114 + reg = <0xd0100000 0x100000>, <0xc883c000 0x1000>; 115 + reg-names = "vpu", "hhi"; 116 + interrupts = <3>; 117 + #address-cells = <1>; 118 + #size-cells = <0>; 119 + 120 + /* CVBS VDAC output port */ 121 + port@0 { 122 + reg = <0>; 123 + 124 + cvbs_vdac_out: endpoint { 125 + remote-endpoint = <&tv_connector_in>; 126 + }; 127 + }; 128 + 129 + /* HDMI TX output port */ 130 + port@1 { 131 + reg = <1>; 132 + 133 + hdmi_tx_out: endpoint { 134 + remote-endpoint = <&hdmi_tx_in>; 135 + }; 136 + }; 137 + };
+1
Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
··· 9 9 - label: a symbolic name for the connector 10 10 - hpd-gpios: HPD GPIO number 11 11 - ddc-i2c-bus: phandle link to the I2C controller used for DDC EDID probing 12 + - ddc-en-gpios: signal to enable DDC bus 12 13 13 14 Required nodes: 14 15 - Video port for HDMI input
+62
Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/panel/nec,nl8048hl11.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: NEC NL8048HL11 4.1" WVGA TFT LCD panel 8 + 9 + description: 10 + The NEC NL8048HL11 is a 4.1" WVGA TFT LCD panel with a 24-bit RGB parallel 11 + data interface and an SPI control interface. 12 + 13 + maintainers: 14 + - Laurent Pinchart <laurent.pinchart@ideasonboard.com> 15 + 16 + allOf: 17 + - $ref: panel-common.yaml# 18 + 19 + properties: 20 + compatible: 21 + const: nec,nl8048hl11 22 + 23 + label: true 24 + port: true 25 + reg: true 26 + reset-gpios: true 27 + 28 + spi-max-frequency: 29 + maximum: 10000000 30 + 31 + required: 32 + - compatible 33 + - reg 34 + - reset-gpios 35 + - port 36 + 37 + additionalProperties: false 38 + 39 + examples: 40 + - | 41 + #include <dt-bindings/gpio/gpio.h> 42 + 43 + spi0 { 44 + #address-cells = <1>; 45 + #size-cells = <0>; 46 + 47 + lcd_panel: panel@0 { 48 + compatible = "nec,nl8048hl11"; 49 + reg = <0>; 50 + spi-max-frequency = <10000000>; 51 + 52 + reset-gpios = <&gpio7 7 GPIO_ACTIVE_LOW>; 53 + 54 + port { 55 + lcd_in: endpoint { 56 + remote-endpoint = <&dpi_out>; 57 + }; 58 + }; 59 + }; 60 + }; 61 + 62 + ...
+36
Documentation/devicetree/bindings/display/panel/ti,nspire.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/panel/ti,nspire.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Texas Instruments NSPIRE Display Panels 8 + 9 + maintainers: 10 + - Linus Walleij <linus.walleij@linaro.org> 11 + 12 + allOf: 13 + - $ref: panel-common.yaml# 14 + 15 + properties: 16 + compatible: 17 + enum: 18 + - ti,nspire-cx-lcd-panel 19 + - ti,nspire-classic-lcd-panel 20 + port: true 21 + 22 + required: 23 + - compatible 24 + 25 + additionalProperties: false 26 + 27 + examples: 28 + - | 29 + panel { 30 + compatible = "ti,nspire-cx-lcd-panel"; 31 + port { 32 + panel_in: endpoint { 33 + remote-endpoint = <&pads>; 34 + }; 35 + }; 36 + };
+5
Documentation/devicetree/bindings/vendor-prefixes.yaml
··· 511 511 description: Lenovo Group Ltd. 512 512 "^lg,.*": 513 513 description: LG Corporation 514 + "^lgphilips,.*": 515 + description: LG Display 514 516 "^libretech,.*": 515 517 description: Shenzhen Libre Technology Co., Ltd 516 518 "^licheepi,.*": ··· 935 933 description: Tecon Microprocessor Technologies, LLC. 936 934 "^topeet,.*": 937 935 description: Topeet 936 + "^toppoly,.*": 937 + description: TPO (deprecated, use tpo) 938 + deprecated: true 938 939 "^toradex,.*": 939 940 description: Toradex AG 940 941 "^toshiba,.*":
+2 -2
MAINTAINERS
··· 5334 5334 W: http://linux-meson.com/ 5335 5335 S: Supported 5336 5336 F: drivers/gpu/drm/meson/ 5337 - F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt 5338 - F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt 5337 + F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml 5338 + F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml 5339 5339 F: Documentation/gpu/meson.rst 5340 5340 T: git git://anongit.freedesktop.org/drm/drm-misc 5341 5341
+1 -1
drivers/dma-buf/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ 3 - reservation.o seqno-fence.o 3 + dma-resv.o seqno-fence.o 4 4 obj-$(CONFIG_SYNC_FILE) += sync_file.o 5 5 obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o 6 6 obj-$(CONFIG_UDMABUF) += udmabuf.o
+14 -14
drivers/dma-buf/dma-buf.c
··· 21 21 #include <linux/module.h> 22 22 #include <linux/seq_file.h> 23 23 #include <linux/poll.h> 24 - #include <linux/reservation.h> 24 + #include <linux/dma-resv.h> 25 25 #include <linux/mm.h> 26 26 #include <linux/mount.h> 27 27 #include <linux/pseudo_fs.h> ··· 104 104 list_del(&dmabuf->list_node); 105 105 mutex_unlock(&db_list.lock); 106 106 107 - if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) 108 - reservation_object_fini(dmabuf->resv); 107 + if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) 108 + dma_resv_fini(dmabuf->resv); 109 109 110 110 module_put(dmabuf->owner); 111 111 kfree(dmabuf); ··· 165 165 * To support cross-device and cross-driver synchronization of buffer access 166 166 * implicit fences (represented internally in the kernel with &struct fence) can 167 167 * be attached to a &dma_buf. The glue for that and a few related things are 168 - * provided in the &reservation_object structure. 168 + * provided in the &dma_resv structure. 169 169 * 170 170 * Userspace can query the state of these implicitly tracked fences using poll() 171 171 * and related system calls: ··· 195 195 static __poll_t dma_buf_poll(struct file *file, poll_table *poll) 196 196 { 197 197 struct dma_buf *dmabuf; 198 - struct reservation_object *resv; 199 - struct reservation_object_list *fobj; 198 + struct dma_resv *resv; 199 + struct dma_resv_list *fobj; 200 200 struct dma_fence *fence_excl; 201 201 __poll_t events; 202 202 unsigned shared_count, seq; ··· 506 506 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) 507 507 { 508 508 struct dma_buf *dmabuf; 509 - struct reservation_object *resv = exp_info->resv; 509 + struct dma_resv *resv = exp_info->resv; 510 510 struct file *file; 511 511 size_t alloc_size = sizeof(struct dma_buf); 512 512 int ret; 513 513 514 514 if (!exp_info->resv) 515 - alloc_size += sizeof(struct reservation_object); 515 + alloc_size += sizeof(struct dma_resv); 516 516 else 517 517 /* prevent &dma_buf[1] == dma_buf->resv */ 518 518 alloc_size += 1; ··· 544 544 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; 545 545 546 546 if (!resv) { 547 - resv = (struct reservation_object *)&dmabuf[1]; 548 - reservation_object_init(resv); 547 + resv = (struct dma_resv *)&dmabuf[1]; 548 + dma_resv_init(resv); 549 549 } 550 550 dmabuf->resv = resv; 551 551 ··· 909 909 { 910 910 bool write = (direction == DMA_BIDIRECTIONAL || 911 911 direction == DMA_TO_DEVICE); 912 - struct reservation_object *resv = dmabuf->resv; 912 + struct dma_resv *resv = dmabuf->resv; 913 913 long ret; 914 914 915 915 /* Wait on any implicit rendering fences */ 916 - ret = reservation_object_wait_timeout_rcu(resv, write, true, 916 + ret = dma_resv_wait_timeout_rcu(resv, write, true, 917 917 MAX_SCHEDULE_TIMEOUT); 918 918 if (ret < 0) 919 919 return ret; ··· 1154 1154 int ret; 1155 1155 struct dma_buf *buf_obj; 1156 1156 struct dma_buf_attachment *attach_obj; 1157 - struct reservation_object *robj; 1158 - struct reservation_object_list *fobj; 1157 + struct dma_resv *robj; 1158 + struct dma_resv_list *fobj; 1159 1159 struct dma_fence *fence; 1160 1160 unsigned seq; 1161 1161 int count = 0, attach_count, shared_count, i;
+31 -1
drivers/dma-buf/dma-fence-array.c
··· 13 13 #include <linux/slab.h> 14 14 #include <linux/dma-fence-array.h> 15 15 16 + #define PENDING_ERROR 1 17 + 16 18 static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) 17 19 { 18 20 return "dma_fence_array"; ··· 25 23 return "unbound"; 26 24 } 27 25 26 + static void dma_fence_array_set_pending_error(struct dma_fence_array *array, 27 + int error) 28 + { 29 + /* 30 + * Propagate the first error reported by any of our fences, but only 31 + * before we ourselves are signaled. 32 + */ 33 + if (error) 34 + cmpxchg(&array->base.error, PENDING_ERROR, error); 35 + } 36 + 37 + static void dma_fence_array_clear_pending_error(struct dma_fence_array *array) 38 + { 39 + /* Clear the error flag if not actually set. */ 40 + cmpxchg(&array->base.error, PENDING_ERROR, 0); 41 + } 42 + 28 43 static void irq_dma_fence_array_work(struct irq_work *wrk) 29 44 { 30 45 struct dma_fence_array *array = container_of(wrk, typeof(*array), work); 46 + 47 + dma_fence_array_clear_pending_error(array); 31 48 32 49 dma_fence_signal(&array->base); 33 50 dma_fence_put(&array->base); ··· 58 37 struct dma_fence_array_cb *array_cb = 59 38 container_of(cb, struct dma_fence_array_cb, cb); 60 39 struct dma_fence_array *array = array_cb->array; 40 + 41 + dma_fence_array_set_pending_error(array, f->error); 61 42 62 43 if (atomic_dec_and_test(&array->num_pending)) 63 44 irq_work_queue(&array->work); ··· 86 63 dma_fence_get(&array->base); 87 64 if (dma_fence_add_callback(array->fences[i], &cb[i].cb, 88 65 dma_fence_array_cb_func)) { 66 + int error = array->fences[i]->error; 67 + 68 + dma_fence_array_set_pending_error(array, error); 89 69 dma_fence_put(&array->base); 90 - if (atomic_dec_and_test(&array->num_pending)) 70 + if (atomic_dec_and_test(&array->num_pending)) { 71 + dma_fence_array_clear_pending_error(array); 91 72 return false; 73 + } 92 74 } 93 75 } 94 76 ··· 169 141 array->num_fences = num_fences; 170 142 atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); 171 143 array->fences = fences; 144 + 145 + array->base.error = PENDING_ERROR; 172 146 173 147 return array; 174 148 }
+20 -35
drivers/dma-buf/dma-fence.c
··· 60 60 * 61 61 * - Then there's also implicit fencing, where the synchronization points are 62 62 * implicitly passed around as part of shared &dma_buf instances. Such 63 - * implicit fences are stored in &struct reservation_object through the 63 + * implicit fences are stored in &struct dma_resv through the 64 64 * &dma_buf.resv pointer. 65 65 */ 66 66 ··· 129 129 int dma_fence_signal_locked(struct dma_fence *fence) 130 130 { 131 131 struct dma_fence_cb *cur, *tmp; 132 - int ret = 0; 132 + struct list_head cb_list; 133 133 134 134 lockdep_assert_held(fence->lock); 135 135 136 - if (WARN_ON(!fence)) 136 + if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 137 + &fence->flags))) 137 138 return -EINVAL; 138 139 139 - if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 140 - ret = -EINVAL; 140 + /* Stash the cb_list before replacing it with the timestamp */ 141 + list_replace(&fence->cb_list, &cb_list); 141 142 142 - /* 143 - * we might have raced with the unlocked dma_fence_signal, 144 - * still run through all callbacks 145 - */ 146 - } else { 147 - fence->timestamp = ktime_get(); 148 - set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); 149 - trace_dma_fence_signaled(fence); 150 - } 143 + fence->timestamp = ktime_get(); 144 + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); 145 + trace_dma_fence_signaled(fence); 151 146 152 - list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { 153 - list_del_init(&cur->node); 147 + list_for_each_entry_safe(cur, tmp, &cb_list, node) { 148 + INIT_LIST_HEAD(&cur->node); 154 149 cur->func(fence, cur); 155 150 } 156 - return ret; 151 + 152 + return 0; 157 153 } 158 154 EXPORT_SYMBOL(dma_fence_signal_locked); 159 155 ··· 169 173 int dma_fence_signal(struct dma_fence *fence) 170 174 { 171 175 unsigned long flags; 176 + int ret; 172 177 173 178 if (!fence) 174 179 return -EINVAL; 175 180 176 - if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 177 - return -EINVAL; 181 + spin_lock_irqsave(fence->lock, flags); 182 + ret = dma_fence_signal_locked(fence); 183 + spin_unlock_irqrestore(fence->lock, flags); 178 184 179 - fence->timestamp = ktime_get(); 180 - set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); 181 - trace_dma_fence_signaled(fence); 182 - 183 - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { 184 - struct dma_fence_cb *cur, *tmp; 185 - 186 - spin_lock_irqsave(fence->lock, flags); 187 - list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { 188 - list_del_init(&cur->node); 189 - cur->func(fence, cur); 190 - } 191 - spin_unlock_irqrestore(fence->lock, flags); 192 - } 193 - return 0; 185 + return ret; 194 186 } 195 187 EXPORT_SYMBOL(dma_fence_signal); 196 188 ··· 232 248 233 249 trace_dma_fence_destroy(fence); 234 250 235 - if (WARN(!list_empty(&fence->cb_list), 251 + if (WARN(!list_empty(&fence->cb_list) && 252 + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags), 236 253 "Fence %s:%s:%llx:%llx released with pending signals!\n", 237 254 fence->ops->get_driver_name(fence), 238 255 fence->ops->get_timeline_name(fence),
+73 -83
drivers/dma-buf/reservation.c drivers/dma-buf/dma-resv.c
··· 32 32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 33 33 */ 34 34 35 - #include <linux/reservation.h> 35 + #include <linux/dma-resv.h> 36 36 #include <linux/export.h> 37 37 38 38 /** ··· 56 56 EXPORT_SYMBOL(reservation_seqcount_string); 57 57 58 58 /** 59 - * reservation_object_list_alloc - allocate fence list 59 + * dma_resv_list_alloc - allocate fence list 60 60 * @shared_max: number of fences we need space for 61 61 * 62 - * Allocate a new reservation_object_list and make sure to correctly initialize 62 + * Allocate a new dma_resv_list and make sure to correctly initialize 63 63 * shared_max. 64 64 */ 65 - static struct reservation_object_list * 66 - reservation_object_list_alloc(unsigned int shared_max) 65 + static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max) 67 66 { 68 - struct reservation_object_list *list; 67 + struct dma_resv_list *list; 69 68 70 69 list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL); 71 70 if (!list) ··· 77 78 } 78 79 79 80 /** 80 - * reservation_object_list_free - free fence list 81 + * dma_resv_list_free - free fence list 81 82 * @list: list to free 82 83 * 83 - * Free a reservation_object_list and make sure to drop all references. 84 + * Free a dma_resv_list and make sure to drop all references. 84 85 */ 85 - static void reservation_object_list_free(struct reservation_object_list *list) 86 + static void dma_resv_list_free(struct dma_resv_list *list) 86 87 { 87 88 unsigned int i; 88 89 ··· 96 97 } 97 98 98 99 /** 99 - * reservation_object_init - initialize a reservation object 100 + * dma_resv_init - initialize a reservation object 100 101 * @obj: the reservation object 101 102 */ 102 - void reservation_object_init(struct reservation_object *obj) 103 + void dma_resv_init(struct dma_resv *obj) 103 104 { 104 105 ww_mutex_init(&obj->lock, &reservation_ww_class); 105 106 ··· 108 109 RCU_INIT_POINTER(obj->fence, NULL); 109 110 RCU_INIT_POINTER(obj->fence_excl, NULL); 110 111 } 111 - EXPORT_SYMBOL(reservation_object_init); 112 + EXPORT_SYMBOL(dma_resv_init); 112 113 113 114 /** 114 - * reservation_object_fini - destroys a reservation object 115 + * dma_resv_fini - destroys a reservation object 115 116 * @obj: the reservation object 116 117 */ 117 - void reservation_object_fini(struct reservation_object *obj) 118 + void dma_resv_fini(struct dma_resv *obj) 118 119 { 119 - struct reservation_object_list *fobj; 120 + struct dma_resv_list *fobj; 120 121 struct dma_fence *excl; 121 122 122 123 /* ··· 128 129 dma_fence_put(excl); 129 130 130 131 fobj = rcu_dereference_protected(obj->fence, 1); 131 - reservation_object_list_free(fobj); 132 + dma_resv_list_free(fobj); 132 133 ww_mutex_destroy(&obj->lock); 133 134 } 134 - EXPORT_SYMBOL(reservation_object_fini); 135 + EXPORT_SYMBOL(dma_resv_fini); 135 136 136 137 /** 137 - * reservation_object_reserve_shared - Reserve space to add shared fences to 138 - * a reservation_object. 138 + * dma_resv_reserve_shared - Reserve space to add shared fences to 139 + * a dma_resv. 139 140 * @obj: reservation object 140 141 * @num_fences: number of fences we want to add 141 142 * 142 - * Should be called before reservation_object_add_shared_fence(). Must 143 + * Should be called before dma_resv_add_shared_fence(). Must 143 144 * be called with obj->lock held. 144 145 * 145 146 * RETURNS 146 147 * Zero for success, or -errno 147 148 */ 148 - int reservation_object_reserve_shared(struct reservation_object *obj, 149 - unsigned int num_fences) 149 + int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) 150 150 { 151 - struct reservation_object_list *old, *new; 151 + struct dma_resv_list *old, *new; 152 152 unsigned int i, j, k, max; 153 153 154 - reservation_object_assert_held(obj); 154 + dma_resv_assert_held(obj); 155 155 156 - old = reservation_object_get_list(obj); 156 + old = dma_resv_get_list(obj); 157 157 158 158 if (old && old->shared_max) { 159 159 if ((old->shared_count + num_fences) <= old->shared_max) ··· 164 166 max = 4; 165 167 } 166 168 167 - new = reservation_object_list_alloc(max); 169 + new = dma_resv_list_alloc(max); 168 170 if (!new) 169 171 return -ENOMEM; 170 172 ··· 178 180 struct dma_fence *fence; 179 181 180 182 fence = rcu_dereference_protected(old->shared[i], 181 - reservation_object_held(obj)); 183 + dma_resv_held(obj)); 182 184 if (dma_fence_is_signaled(fence)) 183 185 RCU_INIT_POINTER(new->shared[--k], fence); 184 186 else ··· 204 206 struct dma_fence *fence; 205 207 206 208 fence = rcu_dereference_protected(new->shared[i], 207 - reservation_object_held(obj)); 209 + dma_resv_held(obj)); 208 210 dma_fence_put(fence); 209 211 } 210 212 kfree_rcu(old, rcu); 211 213 212 214 return 0; 213 215 } 214 - EXPORT_SYMBOL(reservation_object_reserve_shared); 216 + EXPORT_SYMBOL(dma_resv_reserve_shared); 215 217 216 218 /** 217 - * reservation_object_add_shared_fence - Add a fence to a shared slot 219 + * dma_resv_add_shared_fence - Add a fence to a shared slot 218 220 * @obj: the reservation object 219 221 * @fence: the shared fence to add 220 222 * 221 223 * Add a fence to a shared slot, obj->lock must be held, and 222 - * reservation_object_reserve_shared() has been called. 224 + * dma_resv_reserve_shared() has been called. 223 225 */ 224 - void reservation_object_add_shared_fence(struct reservation_object *obj, 225 - struct dma_fence *fence) 226 + void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) 226 227 { 227 - struct reservation_object_list *fobj; 228 + struct dma_resv_list *fobj; 228 229 struct dma_fence *old; 229 230 unsigned int i, count; 230 231 231 232 dma_fence_get(fence); 232 233 233 - reservation_object_assert_held(obj); 234 + dma_resv_assert_held(obj); 234 235 235 - fobj = reservation_object_get_list(obj); 236 + fobj = dma_resv_get_list(obj); 236 237 count = fobj->shared_count; 237 238 238 239 preempt_disable(); ··· 240 243 for (i = 0; i < count; ++i) { 241 244 242 245 old = rcu_dereference_protected(fobj->shared[i], 243 - reservation_object_held(obj)); 246 + dma_resv_held(obj)); 244 247 if (old->context == fence->context || 245 248 dma_fence_is_signaled(old)) 246 249 goto replace; ··· 259 262 preempt_enable(); 260 263 dma_fence_put(old); 261 264 } 262 - EXPORT_SYMBOL(reservation_object_add_shared_fence); 265 + EXPORT_SYMBOL(dma_resv_add_shared_fence); 263 266 264 267 /** 265 - * reservation_object_add_excl_fence - Add an exclusive fence. 268 + * dma_resv_add_excl_fence - Add an exclusive fence. 266 269 * @obj: the reservation object 267 270 * @fence: the shared fence to add 268 271 * 269 272 * Add a fence to the exclusive slot. The obj->lock must be held. 270 273 */ 271 - void reservation_object_add_excl_fence(struct reservation_object *obj, 272 - struct dma_fence *fence) 274 + void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) 273 275 { 274 - struct dma_fence *old_fence = reservation_object_get_excl(obj); 275 - struct reservation_object_list *old; 276 + struct dma_fence *old_fence = dma_resv_get_excl(obj); 277 + struct dma_resv_list *old; 276 278 u32 i = 0; 277 279 278 - reservation_object_assert_held(obj); 280 + dma_resv_assert_held(obj); 279 281 280 - old = reservation_object_get_list(obj); 282 + old = dma_resv_get_list(obj); 281 283 if (old) 282 284 i = old->shared_count; 283 285 ··· 295 299 /* inplace update, no shared fences */ 296 300 while (i--) 297 301 dma_fence_put(rcu_dereference_protected(old->shared[i], 298 - reservation_object_held(obj))); 302 + dma_resv_held(obj))); 299 303 300 304 dma_fence_put(old_fence); 301 305 } 302 - EXPORT_SYMBOL(reservation_object_add_excl_fence); 306 + EXPORT_SYMBOL(dma_resv_add_excl_fence); 303 307 304 308 /** 305 - * reservation_object_copy_fences - Copy all fences from src to dst. 309 + * dma_resv_copy_fences - Copy all fences from src to dst. 306 310 * @dst: the destination reservation object 307 311 * @src: the source reservation object 308 312 * 309 313 * Copy all fences from src to dst. dst-lock must be held. 310 314 */ 311 - int reservation_object_copy_fences(struct reservation_object *dst, 312 - struct reservation_object *src) 315 + int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) 313 316 { 314 - struct reservation_object_list *src_list, *dst_list; 317 + struct dma_resv_list *src_list, *dst_list; 315 318 struct dma_fence *old, *new; 316 319 unsigned i; 317 320 318 - reservation_object_assert_held(dst); 321 + dma_resv_assert_held(dst); 319 322 320 323 rcu_read_lock(); 321 324 src_list = rcu_dereference(src->fence); ··· 325 330 326 331 rcu_read_unlock(); 327 332 328 - dst_list = reservation_object_list_alloc(shared_count); 333 + dst_list = dma_resv_list_alloc(shared_count); 329 334 if (!dst_list) 330 335 return -ENOMEM; 331 336 ··· 346 351 continue; 347 352 348 353 if (!dma_fence_get_rcu(fence)) { 349 - reservation_object_list_free(dst_list); 354 + dma_resv_list_free(dst_list); 350 355 src_list = rcu_dereference(src->fence); 351 356 goto retry; 352 357 } ··· 365 370 new = dma_fence_get_rcu_safe(&src->fence_excl); 366 371 rcu_read_unlock(); 367 372 368 - src_list = reservation_object_get_list(dst); 369 - old = reservation_object_get_excl(dst); 373 + src_list = dma_resv_get_list(dst); 374 + old = dma_resv_get_excl(dst); 370 375 371 376 preempt_disable(); 372 377 write_seqcount_begin(&dst->seq); ··· 376 381 write_seqcount_end(&dst->seq); 377 382 preempt_enable(); 378 383 379 - reservation_object_list_free(src_list); 384 + dma_resv_list_free(src_list); 380 385 dma_fence_put(old); 381 386 382 387 return 0; 383 388 } 384 - EXPORT_SYMBOL(reservation_object_copy_fences); 389 + EXPORT_SYMBOL(dma_resv_copy_fences); 385 390 386 391 /** 387 - * reservation_object_get_fences_rcu - Get an object's shared and exclusive 392 + * dma_resv_get_fences_rcu - Get an object's shared and exclusive 388 393 * fences without update side lock held 389 394 * @obj: the reservation object 390 395 * @pfence_excl: the returned exclusive fence (or NULL) ··· 396 401 * exclusive fence is not specified the fence is put into the array of the 397 402 * shared fences as well. Returns either zero or -ENOMEM. 398 403 */ 399 - int reservation_object_get_fences_rcu(struct reservation_object *obj, 400 - struct dma_fence **pfence_excl, 401 - unsigned *pshared_count, 402 - struct dma_fence ***pshared) 404 + int dma_resv_get_fences_rcu(struct dma_resv *obj, 405 + struct dma_fence **pfence_excl, 406 + unsigned *pshared_count, 407 + struct dma_fence ***pshared) 403 408 { 404 409 struct dma_fence **shared = NULL; 405 410 struct dma_fence *fence_excl; ··· 407 412 int ret = 1; 408 413 409 414 do { 410 - struct reservation_object_list *fobj; 415 + struct dma_resv_list *fobj; 411 416 unsigned int i, seq; 412 417 size_t sz = 0; 413 418 ··· 482 487 *pshared = shared; 483 488 return ret; 484 489 } 485 - EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); 490 + EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu); 486 491 487 492 /** 488 - * reservation_object_wait_timeout_rcu - Wait on reservation's objects 493 + * dma_resv_wait_timeout_rcu - Wait on reservation's objects 489 494 * shared and/or exclusive fences. 490 495 * @obj: the reservation object 491 496 * @wait_all: if true, wait on all fences, else wait on just exclusive fence ··· 496 501 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 497 502 * greater than zer on success. 498 503 */ 499 - long reservation_object_wait_timeout_rcu(struct reservation_object *obj, 500 - bool wait_all, bool intr, 501 - unsigned long timeout) 504 + long dma_resv_wait_timeout_rcu(struct dma_resv *obj, 505 + bool wait_all, bool intr, 506 + unsigned long timeout) 502 507 { 503 508 struct dma_fence *fence; 504 509 unsigned seq, shared_count; ··· 526 531 } 527 532 528 533 if (wait_all) { 529 - struct reservation_object_list *fobj = 530 - rcu_dereference(obj->fence); 534 + struct dma_resv_list *fobj = rcu_dereference(obj->fence); 531 535 532 536 if (fobj) 533 537 shared_count = fobj->shared_count; ··· 569 575 rcu_read_unlock(); 570 576 goto retry; 571 577 } 572 - EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); 578 + EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu); 573 579 574 580 575 - static inline int 576 - reservation_object_test_signaled_single(struct dma_fence *passed_fence) 581 + static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) 577 582 { 578 583 struct dma_fence *fence, *lfence = passed_fence; 579 584 int ret = 1; ··· 589 596 } 590 597 591 598 /** 592 - * reservation_object_test_signaled_rcu - Test if a reservation object's 599 + * dma_resv_test_signaled_rcu - Test if a reservation object's 593 600 * fences have been signaled. 594 601 * @obj: the reservation object 595 602 * @test_all: if true, test all fences, otherwise only test the exclusive ··· 598 605 * RETURNS 599 606 * true if all fences signaled, else false 600 607 */ 601 - bool reservation_object_test_signaled_rcu(struct reservation_object *obj, 602 - bool test_all) 608 + bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) 603 609 { 604 610 unsigned seq, shared_count; 605 611 int ret; ··· 612 620 if (test_all) { 613 621 unsigned i; 614 622 615 - struct reservation_object_list *fobj = 616 - rcu_dereference(obj->fence); 623 + struct dma_resv_list *fobj = rcu_dereference(obj->fence); 617 624 618 625 if (fobj) 619 626 shared_count = fobj->shared_count; ··· 620 629 for (i = 0; i < shared_count; ++i) { 621 630 struct dma_fence *fence = rcu_dereference(fobj->shared[i]); 622 631 623 - ret = reservation_object_test_signaled_single(fence); 632 + ret = dma_resv_test_signaled_single(fence); 624 633 if (ret < 0) 625 634 goto retry; 626 635 else if (!ret) ··· 635 644 struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); 636 645 637 646 if (fence_excl) { 638 - ret = reservation_object_test_signaled_single( 639 - fence_excl); 647 + ret = dma_resv_test_signaled_single(fence_excl); 640 648 if (ret < 0) 641 649 goto retry; 642 650 ··· 647 657 rcu_read_unlock(); 648 658 return ret; 649 659 } 650 - EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); 660 + EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
+7 -9
drivers/dma-buf/sw_sync.c
··· 132 132 { 133 133 struct sync_pt *pt = dma_fence_to_sync_pt(fence); 134 134 struct sync_timeline *parent = dma_fence_parent(fence); 135 + unsigned long flags; 135 136 137 + spin_lock_irqsave(fence->lock, flags); 136 138 if (!list_empty(&pt->link)) { 137 - unsigned long flags; 138 - 139 - spin_lock_irqsave(fence->lock, flags); 140 - if (!list_empty(&pt->link)) { 141 - list_del(&pt->link); 142 - rb_erase(&pt->node, &parent->pt_tree); 143 - } 144 - spin_unlock_irqrestore(fence->lock, flags); 139 + list_del(&pt->link); 140 + rb_erase(&pt->node, &parent->pt_tree); 145 141 } 142 + spin_unlock_irqrestore(fence->lock, flags); 146 143 147 144 sync_timeline_put(parent); 148 145 dma_fence_free(fence); ··· 262 265 p = &parent->rb_left; 263 266 } else { 264 267 if (dma_fence_get_rcu(&other->base)) { 265 - dma_fence_put(&pt->base); 268 + sync_timeline_put(obj); 269 + kfree(pt); 266 270 pt = other; 267 271 goto unlock; 268 272 }
+1 -1
drivers/dma-buf/sync_file.c
··· 419 419 * info->num_fences. 420 420 */ 421 421 if (!info.num_fences) { 422 - info.status = dma_fence_is_signaled(sync_file->fence); 422 + info.status = dma_fence_get_status(sync_file->fence); 423 423 goto no_fences; 424 424 } else { 425 425 info.status = 1;
+7 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 218 218 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 219 219 struct amdgpu_amdkfd_fence *ef) 220 220 { 221 - struct reservation_object *resv = bo->tbo.base.resv; 222 - struct reservation_object_list *old, *new; 221 + struct dma_resv *resv = bo->tbo.base.resv; 222 + struct dma_resv_list *old, *new; 223 223 unsigned int i, j, k; 224 224 225 225 if (!ef) 226 226 return -EINVAL; 227 227 228 - old = reservation_object_get_list(resv); 228 + old = dma_resv_get_list(resv); 229 229 if (!old) 230 230 return 0; 231 231 ··· 241 241 struct dma_fence *f; 242 242 243 243 f = rcu_dereference_protected(old->shared[i], 244 - reservation_object_held(resv)); 244 + dma_resv_held(resv)); 245 245 246 246 if (f->context == ef->base.context) 247 247 RCU_INIT_POINTER(new->shared[--j], f); ··· 263 263 struct dma_fence *f; 264 264 265 265 f = rcu_dereference_protected(new->shared[i], 266 - reservation_object_held(resv)); 266 + dma_resv_held(resv)); 267 267 dma_fence_put(f); 268 268 } 269 269 kfree_rcu(old, rcu); ··· 887 887 AMDGPU_FENCE_OWNER_KFD, false); 888 888 if (ret) 889 889 goto wait_pd_fail; 890 - ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); 890 + ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); 891 891 if (ret) 892 892 goto reserve_shared_fail; 893 893 amdgpu_bo_fence(vm->root.base.bo, ··· 2133 2133 * Add process eviction fence to bo so they can 2134 2134 * evict each other. 2135 2135 */ 2136 - ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1); 2136 + ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); 2137 2137 if (ret) 2138 2138 goto reserve_shared_fail; 2139 2139 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 730 730 731 731 list_for_each_entry(e, &p->validated, tv.head) { 732 732 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 733 - struct reservation_object *resv = bo->tbo.base.resv; 733 + struct dma_resv *resv = bo->tbo.base.resv; 734 734 735 735 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, 736 736 amdgpu_bo_explicit_sync(bo)); ··· 1727 1727 *map = mapping; 1728 1728 1729 1729 /* Double check that the BO is reserved by this CS */ 1730 - if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) 1730 + if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) 1731 1731 return -EINVAL; 1732 1732 1733 1733 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 205 205 goto unpin; 206 206 } 207 207 208 - r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, 208 + r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, 209 209 &work->shared_count, 210 210 &work->shared); 211 211 if (unlikely(r != 0)) {
+10 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 137 137 } 138 138 139 139 static int 140 - __reservation_object_make_exclusive(struct reservation_object *obj) 140 + __dma_resv_make_exclusive(struct dma_resv *obj) 141 141 { 142 142 struct dma_fence **fences; 143 143 unsigned int count; 144 144 int r; 145 145 146 - if (!reservation_object_get_list(obj)) /* no shared fences to convert */ 146 + if (!dma_resv_get_list(obj)) /* no shared fences to convert */ 147 147 return 0; 148 148 149 - r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); 149 + r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); 150 150 if (r) 151 151 return r; 152 152 153 153 if (count == 0) { 154 154 /* Now that was unexpected. */ 155 155 } else if (count == 1) { 156 - reservation_object_add_excl_fence(obj, fences[0]); 156 + dma_resv_add_excl_fence(obj, fences[0]); 157 157 dma_fence_put(fences[0]); 158 158 kfree(fences); 159 159 } else { ··· 165 165 if (!array) 166 166 goto err_fences_put; 167 167 168 - reservation_object_add_excl_fence(obj, &array->base); 168 + dma_resv_add_excl_fence(obj, &array->base); 169 169 dma_fence_put(&array->base); 170 170 } 171 171 ··· 216 216 * fences on the reservation object into a single exclusive 217 217 * fence. 218 218 */ 219 - r = __reservation_object_make_exclusive(bo->tbo.base.resv); 219 + r = __dma_resv_make_exclusive(bo->tbo.base.resv); 220 220 if (r) 221 221 goto error_unreserve; 222 222 } ··· 367 367 struct dma_buf_attachment *attach, 368 368 struct sg_table *sg) 369 369 { 370 - struct reservation_object *resv = attach->dmabuf->resv; 370 + struct dma_resv *resv = attach->dmabuf->resv; 371 371 struct amdgpu_device *adev = dev->dev_private; 372 372 struct amdgpu_bo *bo; 373 373 struct amdgpu_bo_param bp; ··· 380 380 bp.flags = 0; 381 381 bp.type = ttm_bo_type_sg; 382 382 bp.resv = resv; 383 - reservation_object_lock(resv, NULL); 383 + dma_resv_lock(resv, NULL); 384 384 ret = amdgpu_bo_create(adev, &bp, &bo); 385 385 if (ret) 386 386 goto error; ··· 392 392 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) 393 393 bo->prime_shared_count = 1; 394 394 395 - reservation_object_unlock(resv); 395 + dma_resv_unlock(resv); 396 396 return &bo->tbo.base; 397 397 398 398 error: 399 - reservation_object_unlock(resv); 399 + dma_resv_unlock(resv); 400 400 return ERR_PTR(ret); 401 401 } 402 402
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 50 50 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 51 51 int alignment, u32 initial_domain, 52 52 u64 flags, enum ttm_bo_type type, 53 - struct reservation_object *resv, 53 + struct dma_resv *resv, 54 54 struct drm_gem_object **obj) 55 55 { 56 56 struct amdgpu_bo *bo; ··· 215 215 union drm_amdgpu_gem_create *args = data; 216 216 uint64_t flags = args->in.domain_flags; 217 217 uint64_t size = args->in.bo_size; 218 - struct reservation_object *resv = NULL; 218 + struct dma_resv *resv = NULL; 219 219 struct drm_gem_object *gobj; 220 220 uint32_t handle; 221 221 int r; ··· 433 433 return -ENOENT; 434 434 } 435 435 robj = gem_to_amdgpu_bo(gobj); 436 - ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 436 + ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 437 437 timeout); 438 438 439 439 /* ret == 0 means not signaled,
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
··· 47 47 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 48 48 int alignment, u32 initial_domain, 49 49 u64 flags, enum ttm_bo_type type, 50 - struct reservation_object *resv, 50 + struct dma_resv *resv, 51 51 struct drm_gem_object **obj); 52 52 53 53 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
··· 104 104 * 105 105 * Free the pasid only after all the fences in resv are signaled. 106 106 */ 107 - void amdgpu_pasid_free_delayed(struct reservation_object *resv, 107 + void amdgpu_pasid_free_delayed(struct dma_resv *resv, 108 108 unsigned int pasid) 109 109 { 110 110 struct dma_fence *fence, **fences; ··· 112 112 unsigned count; 113 113 int r; 114 114 115 - r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences); 115 + r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); 116 116 if (r) 117 117 goto fallback; 118 118 ··· 156 156 /* Not enough memory for the delayed delete, as last resort 157 157 * block for all the fences to complete. 158 158 */ 159 - reservation_object_wait_timeout_rcu(resv, true, false, 159 + dma_resv_wait_timeout_rcu(resv, true, false, 160 160 MAX_SCHEDULE_TIMEOUT); 161 161 amdgpu_pasid_free(pasid); 162 162 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
··· 72 72 73 73 int amdgpu_pasid_alloc(unsigned int bits); 74 74 void amdgpu_pasid_free(unsigned int pasid); 75 - void amdgpu_pasid_free_delayed(struct reservation_object *resv, 75 + void amdgpu_pasid_free_delayed(struct dma_resv *resv, 76 76 unsigned int pasid); 77 77 78 78 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
··· 179 179 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) 180 180 continue; 181 181 182 - r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, 182 + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, 183 183 true, false, MAX_SCHEDULE_TIMEOUT); 184 184 if (r <= 0) 185 185 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+11 -11
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 550 550 551 551 fail_unreserve: 552 552 if (!bp->resv) 553 - reservation_object_unlock(bo->tbo.base.resv); 553 + dma_resv_unlock(bo->tbo.base.resv); 554 554 amdgpu_bo_unref(&bo); 555 555 return r; 556 556 } ··· 612 612 613 613 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) { 614 614 if (!bp->resv) 615 - WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv, 615 + WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv, 616 616 NULL)); 617 617 618 618 r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr); 619 619 620 620 if (!bp->resv) 621 - reservation_object_unlock((*bo_ptr)->tbo.base.resv); 621 + dma_resv_unlock((*bo_ptr)->tbo.base.resv); 622 622 623 623 if (r) 624 624 amdgpu_bo_unref(bo_ptr); ··· 715 715 return 0; 716 716 } 717 717 718 - r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false, 718 + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false, 719 719 MAX_SCHEDULE_TIMEOUT); 720 720 if (r < 0) 721 721 return r; ··· 1093 1093 */ 1094 1094 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) 1095 1095 { 1096 - reservation_object_assert_held(bo->tbo.base.resv); 1096 + dma_resv_assert_held(bo->tbo.base.resv); 1097 1097 1098 1098 if (tiling_flags) 1099 1099 *tiling_flags = bo->tiling_flags; ··· 1242 1242 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) 1243 1243 return; 1244 1244 1245 - reservation_object_lock(bo->base.resv, NULL); 1245 + dma_resv_lock(bo->base.resv, NULL); 1246 1246 1247 1247 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence); 1248 1248 if (!WARN_ON(r)) { ··· 1250 1250 dma_fence_put(fence); 1251 1251 } 1252 1252 1253 - reservation_object_unlock(bo->base.resv); 1253 + dma_resv_unlock(bo->base.resv); 1254 1254 } 1255 1255 1256 1256 /** ··· 1325 1325 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, 1326 1326 bool shared) 1327 1327 { 1328 - struct reservation_object *resv = bo->tbo.base.resv; 1328 + struct dma_resv *resv = bo->tbo.base.resv; 1329 1329 1330 1330 if (shared) 1331 - reservation_object_add_shared_fence(resv, fence); 1331 + dma_resv_add_shared_fence(resv, fence); 1332 1332 else 1333 - reservation_object_add_excl_fence(resv, fence); 1333 + dma_resv_add_excl_fence(resv, fence); 1334 1334 } 1335 1335 1336 1336 /** ··· 1370 1370 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) 1371 1371 { 1372 1372 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); 1373 - WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) && 1373 + WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && 1374 1374 !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel); 1375 1375 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); 1376 1376 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
··· 41 41 u32 preferred_domain; 42 42 u64 flags; 43 43 enum ttm_bo_type type; 44 - struct reservation_object *resv; 44 + struct dma_resv *resv; 45 45 }; 46 46 47 47 /* bo virtual addresses in a vm */
+5 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
··· 190 190 */ 191 191 int amdgpu_sync_resv(struct amdgpu_device *adev, 192 192 struct amdgpu_sync *sync, 193 - struct reservation_object *resv, 193 + struct dma_resv *resv, 194 194 void *owner, bool explicit_sync) 195 195 { 196 - struct reservation_object_list *flist; 196 + struct dma_resv_list *flist; 197 197 struct dma_fence *f; 198 198 void *fence_owner; 199 199 unsigned i; ··· 203 203 return -EINVAL; 204 204 205 205 /* always sync to the exclusive fence */ 206 - f = reservation_object_get_excl(resv); 206 + f = dma_resv_get_excl(resv); 207 207 r = amdgpu_sync_fence(adev, sync, f, false); 208 208 209 - flist = reservation_object_get_list(resv); 209 + flist = dma_resv_get_list(resv); 210 210 if (!flist || r) 211 211 return r; 212 212 213 213 for (i = 0; i < flist->shared_count; ++i) { 214 214 f = rcu_dereference_protected(flist->shared[i], 215 - reservation_object_held(resv)); 215 + dma_resv_held(resv)); 216 216 /* We only want to trigger KFD eviction fences on 217 217 * evict or move jobs. Skip KFD fences otherwise. 218 218 */
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
··· 27 27 #include <linux/hashtable.h> 28 28 29 29 struct dma_fence; 30 - struct reservation_object; 30 + struct dma_resv; 31 31 struct amdgpu_device; 32 32 struct amdgpu_ring; 33 33 ··· 44 44 struct dma_fence *f, bool explicit); 45 45 int amdgpu_sync_resv(struct amdgpu_device *adev, 46 46 struct amdgpu_sync *sync, 47 - struct reservation_object *resv, 47 + struct dma_resv *resv, 48 48 void *owner, 49 49 bool explicit_sync); 50 50 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+7 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 303 303 struct amdgpu_copy_mem *src, 304 304 struct amdgpu_copy_mem *dst, 305 305 uint64_t size, 306 - struct reservation_object *resv, 306 + struct dma_resv *resv, 307 307 struct dma_fence **f) 308 308 { 309 309 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; ··· 1486 1486 { 1487 1487 unsigned long num_pages = bo->mem.num_pages; 1488 1488 struct drm_mm_node *node = bo->mem.mm_node; 1489 - struct reservation_object_list *flist; 1489 + struct dma_resv_list *flist; 1490 1490 struct dma_fence *f; 1491 1491 int i; 1492 1492 ··· 1494 1494 * cleanly handle page faults. 1495 1495 */ 1496 1496 if (bo->type == ttm_bo_type_kernel && 1497 - !reservation_object_test_signaled_rcu(bo->base.resv, true)) 1497 + !dma_resv_test_signaled_rcu(bo->base.resv, true)) 1498 1498 return false; 1499 1499 1500 1500 /* If bo is a KFD BO, check if the bo belongs to the current process. 1501 1501 * If true, then return false as any KFD process needs all its BOs to 1502 1502 * be resident to run successfully 1503 1503 */ 1504 - flist = reservation_object_get_list(bo->base.resv); 1504 + flist = dma_resv_get_list(bo->base.resv); 1505 1505 if (flist) { 1506 1506 for (i = 0; i < flist->shared_count; ++i) { 1507 1507 f = rcu_dereference_protected(flist->shared[i], 1508 - reservation_object_held(bo->base.resv)); 1508 + dma_resv_held(bo->base.resv)); 1509 1509 if (amdkfd_fence_check_mm(f, current->mm)) 1510 1510 return false; 1511 1511 } ··· 2009 2009 2010 2010 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 2011 2011 uint64_t dst_offset, uint32_t byte_count, 2012 - struct reservation_object *resv, 2012 + struct dma_resv *resv, 2013 2013 struct dma_fence **fence, bool direct_submit, 2014 2014 bool vm_needs_flush) 2015 2015 { ··· 2083 2083 2084 2084 int amdgpu_fill_buffer(struct amdgpu_bo *bo, 2085 2085 uint32_t src_data, 2086 - struct reservation_object *resv, 2086 + struct dma_resv *resv, 2087 2087 struct dma_fence **fence) 2088 2088 { 2089 2089 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 85 85 86 86 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 87 87 uint64_t dst_offset, uint32_t byte_count, 88 - struct reservation_object *resv, 88 + struct dma_resv *resv, 89 89 struct dma_fence **fence, bool direct_submit, 90 90 bool vm_needs_flush); 91 91 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, 92 92 struct amdgpu_copy_mem *src, 93 93 struct amdgpu_copy_mem *dst, 94 94 uint64_t size, 95 - struct reservation_object *resv, 95 + struct dma_resv *resv, 96 96 struct dma_fence **f); 97 97 int amdgpu_fill_buffer(struct amdgpu_bo *bo, 98 98 uint32_t src_data, 99 - struct reservation_object *resv, 99 + struct dma_resv *resv, 100 100 struct dma_fence **fence); 101 101 102 102 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 1073 1073 ib->length_dw = 16; 1074 1074 1075 1075 if (direct) { 1076 - r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, 1076 + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, 1077 1077 true, false, 1078 1078 msecs_to_jiffies(10)); 1079 1079 if (r == 0)
+10 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1702 1702 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); 1703 1703 pages_addr = ttm->dma_address; 1704 1704 } 1705 - exclusive = reservation_object_get_excl(bo->tbo.base.resv); 1705 + exclusive = dma_resv_get_excl(bo->tbo.base.resv); 1706 1706 } 1707 1707 1708 1708 if (bo) { ··· 1879 1879 */ 1880 1880 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1881 1881 { 1882 - struct reservation_object *resv = vm->root.base.bo->tbo.base.resv; 1882 + struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; 1883 1883 struct dma_fence *excl, **shared; 1884 1884 unsigned i, shared_count; 1885 1885 int r; 1886 1886 1887 - r = reservation_object_get_fences_rcu(resv, &excl, 1887 + r = dma_resv_get_fences_rcu(resv, &excl, 1888 1888 &shared_count, &shared); 1889 1889 if (r) { 1890 1890 /* Not enough memory to grab the fence list, as last resort 1891 1891 * block for all the fences to complete. 1892 1892 */ 1893 - reservation_object_wait_timeout_rcu(resv, true, false, 1893 + dma_resv_wait_timeout_rcu(resv, true, false, 1894 1894 MAX_SCHEDULE_TIMEOUT); 1895 1895 return; 1896 1896 } ··· 1978 1978 struct amdgpu_vm *vm) 1979 1979 { 1980 1980 struct amdgpu_bo_va *bo_va, *tmp; 1981 - struct reservation_object *resv; 1981 + struct dma_resv *resv; 1982 1982 bool clear; 1983 1983 int r; 1984 1984 ··· 1997 1997 spin_unlock(&vm->invalidated_lock); 1998 1998 1999 1999 /* Try to reserve the BO to avoid clearing its ptes */ 2000 - if (!amdgpu_vm_debug && reservation_object_trylock(resv)) 2000 + if (!amdgpu_vm_debug && dma_resv_trylock(resv)) 2001 2001 clear = false; 2002 2002 /* Somebody else is using the BO right now */ 2003 2003 else ··· 2008 2008 return r; 2009 2009 2010 2010 if (!clear) 2011 - reservation_object_unlock(resv); 2011 + dma_resv_unlock(resv); 2012 2012 spin_lock(&vm->invalidated_lock); 2013 2013 } 2014 2014 spin_unlock(&vm->invalidated_lock); ··· 2416 2416 struct amdgpu_bo *bo; 2417 2417 2418 2418 bo = mapping->bo_va->base.bo; 2419 - if (reservation_object_locking_ctx(bo->tbo.base.resv) != 2419 + if (dma_resv_locking_ctx(bo->tbo.base.resv) != 2420 2420 ticket) 2421 2421 continue; 2422 2422 } ··· 2649 2649 */ 2650 2650 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) 2651 2651 { 2652 - return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, 2652 + return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, 2653 2653 true, true, timeout); 2654 2654 } 2655 2655 ··· 2724 2724 if (r) 2725 2725 goto error_free_root; 2726 2726 2727 - r = reservation_object_reserve_shared(root->tbo.base.resv, 1); 2727 + r = dma_resv_reserve_shared(root->tbo.base.resv, 1); 2728 2728 if (r) 2729 2729 goto error_unreserve; 2730 2730
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 5695 5695 * deadlock during GPU reset when this fence will not signal 5696 5696 * but we hold reservation lock for the BO. 5697 5697 */ 5698 - r = reservation_object_wait_timeout_rcu(abo->tbo.base.resv, true, 5698 + r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true, 5699 5699 false, 5700 5700 msecs_to_jiffies(5000)); 5701 5701 if (unlikely(r <= 0))
+1 -1
drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
··· 27 27 return; 28 28 } 29 29 30 - pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000; 30 + pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000ULL; 31 31 aclk = komeda_crtc_get_aclk(kcrtc_st); 32 32 33 33 kcrtc_st->clock_ratio = div64_u64(aclk << 32, pxlclk);
+7 -5
drivers/gpu/drm/arm/hdlcd_crtc.c
··· 9 9 * Implementation of a CRTC class for the HDLCD driver. 10 10 */ 11 11 12 - #include <drm/drmP.h> 12 + #include <linux/clk.h> 13 + #include <linux/of_graph.h> 14 + #include <linux/platform_data/simplefb.h> 15 + 16 + #include <video/videomode.h> 17 + 13 18 #include <drm/drm_atomic.h> 14 19 #include <drm/drm_atomic_helper.h> 15 20 #include <drm/drm_crtc.h> ··· 24 19 #include <drm/drm_of.h> 25 20 #include <drm/drm_plane_helper.h> 26 21 #include <drm/drm_probe_helper.h> 27 - #include <linux/clk.h> 28 - #include <linux/of_graph.h> 29 - #include <linux/platform_data/simplefb.h> 30 - #include <video/videomode.h> 22 + #include <drm/drm_vblank.h> 31 23 32 24 #include "hdlcd_drv.h" 33 25 #include "hdlcd_regs.h"
+6 -1
drivers/gpu/drm/arm/hdlcd_drv.c
··· 14 14 #include <linux/clk.h> 15 15 #include <linux/component.h> 16 16 #include <linux/console.h> 17 + #include <linux/dma-mapping.h> 17 18 #include <linux/list.h> 18 19 #include <linux/of_graph.h> 19 20 #include <linux/of_reserved_mem.h> 21 + #include <linux/platform_device.h> 20 22 #include <linux/pm_runtime.h> 21 23 22 - #include <drm/drmP.h> 23 24 #include <drm/drm_atomic_helper.h> 24 25 #include <drm/drm_crtc.h> 26 + #include <drm/drm_debugfs.h> 27 + #include <drm/drm_drv.h> 25 28 #include <drm/drm_fb_cma_helper.h> 26 29 #include <drm/drm_fb_helper.h> 27 30 #include <drm/drm_gem_cma_helper.h> 28 31 #include <drm/drm_gem_framebuffer_helper.h> 32 + #include <drm/drm_irq.h> 29 33 #include <drm/drm_modeset_helper.h> 30 34 #include <drm/drm_of.h> 31 35 #include <drm/drm_probe_helper.h> 36 + #include <drm/drm_vblank.h> 32 37 33 38 #include "hdlcd_drv.h" 34 39 #include "hdlcd_regs.h"
+7 -4
drivers/gpu/drm/arm/malidp_crtc.c
··· 6 6 * ARM Mali DP500/DP550/DP650 driver (crtc operations) 7 7 */ 8 8 9 - #include <drm/drmP.h> 9 + #include <linux/clk.h> 10 + #include <linux/pm_runtime.h> 11 + 12 + #include <video/videomode.h> 13 + 10 14 #include <drm/drm_atomic.h> 11 15 #include <drm/drm_atomic_helper.h> 12 16 #include <drm/drm_crtc.h> 17 + #include <drm/drm_print.h> 13 18 #include <drm/drm_probe_helper.h> 14 - #include <linux/clk.h> 15 - #include <linux/pm_runtime.h> 16 - #include <video/videomode.h> 19 + #include <drm/drm_vblank.h> 17 20 18 21 #include "malidp_drv.h" 19 22 #include "malidp_hw.h"
+5 -3
drivers/gpu/drm/arm/malidp_drv.c
··· 15 15 #include <linux/pm_runtime.h> 16 16 #include <linux/debugfs.h> 17 17 18 - #include <drm/drmP.h> 19 18 #include <drm/drm_atomic.h> 20 19 #include <drm/drm_atomic_helper.h> 21 20 #include <drm/drm_crtc.h> 22 - #include <drm/drm_probe_helper.h> 23 - #include <drm/drm_fb_helper.h> 21 + #include <drm/drm_drv.h> 24 22 #include <drm/drm_fb_cma_helper.h> 23 + #include <drm/drm_fb_helper.h> 24 + #include <drm/drm_fourcc.h> 25 25 #include <drm/drm_gem_cma_helper.h> 26 26 #include <drm/drm_gem_framebuffer_helper.h> 27 27 #include <drm/drm_modeset_helper.h> 28 28 #include <drm/drm_of.h> 29 + #include <drm/drm_probe_helper.h> 30 + #include <drm/drm_vblank.h> 29 31 30 32 #include "malidp_drv.h" 31 33 #include "malidp_mw.h"
+4 -3
drivers/gpu/drm/arm/malidp_drv.h
··· 9 9 #ifndef __MALIDP_DRV_H__ 10 10 #define __MALIDP_DRV_H__ 11 11 12 - #include <drm/drm_writeback.h> 13 - #include <drm/drm_encoder.h> 14 12 #include <linux/mutex.h> 15 13 #include <linux/wait.h> 16 14 #include <linux/spinlock.h> 17 - #include <drm/drmP.h> 15 + 16 + #include <drm/drm_writeback.h> 17 + #include <drm/drm_encoder.h> 18 + 18 19 #include "malidp_hw.h" 19 20 20 21 #define MALIDP_CONFIG_VALID_INIT 0
+6 -1
drivers/gpu/drm/arm/malidp_hw.c
··· 9 9 */ 10 10 11 11 #include <linux/clk.h> 12 + #include <linux/delay.h> 12 13 #include <linux/types.h> 13 14 #include <linux/io.h> 14 - #include <drm/drmP.h> 15 + 15 16 #include <video/videomode.h> 16 17 #include <video/display_timing.h> 18 + 19 + #include <drm/drm_fourcc.h> 20 + #include <drm/drm_vblank.h> 21 + #include <drm/drm_print.h> 17 22 18 23 #include "malidp_drv.h" 19 24 #include "malidp_hw.h"
+3 -2
drivers/gpu/drm/arm/malidp_mw.c
··· 5 5 * 6 6 * ARM Mali DP Writeback connector implementation 7 7 */ 8 + 8 9 #include <drm/drm_atomic.h> 9 10 #include <drm/drm_atomic_helper.h> 10 11 #include <drm/drm_crtc.h> 11 - #include <drm/drm_probe_helper.h> 12 12 #include <drm/drm_fb_cma_helper.h> 13 + #include <drm/drm_fourcc.h> 13 14 #include <drm/drm_gem_cma_helper.h> 14 - #include <drm/drmP.h> 15 + #include <drm/drm_probe_helper.h> 15 16 #include <drm/drm_writeback.h> 16 17 17 18 #include "malidp_drv.h"
+3 -1
drivers/gpu/drm/arm/malidp_planes.c
··· 7 7 */ 8 8 9 9 #include <linux/iommu.h> 10 + #include <linux/platform_device.h> 10 11 11 - #include <drm/drmP.h> 12 12 #include <drm/drm_atomic.h> 13 13 #include <drm/drm_atomic_helper.h> 14 + #include <drm/drm_drv.h> 14 15 #include <drm/drm_fb_cma_helper.h> 16 + #include <drm/drm_fourcc.h> 15 17 #include <drm/drm_gem_cma_helper.h> 16 18 #include <drm/drm_gem_framebuffer_helper.h> 17 19 #include <drm/drm_plane_helper.h>
+7 -3
drivers/gpu/drm/armada/armada_crtc.c
··· 3 3 * Copyright (C) 2012 Russell King 4 4 * Rewritten from the dovefb driver, and Armada510 manuals. 5 5 */ 6 + 6 7 #include <linux/clk.h> 7 8 #include <linux/component.h> 9 + #include <linux/module.h> 8 10 #include <linux/of_device.h> 9 11 #include <linux/platform_device.h> 10 - #include <drm/drmP.h> 12 + 11 13 #include <drm/drm_atomic.h> 12 - #include <drm/drm_probe_helper.h> 13 - #include <drm/drm_plane_helper.h> 14 14 #include <drm/drm_atomic_helper.h> 15 + #include <drm/drm_plane_helper.h> 16 + #include <drm/drm_probe_helper.h> 17 + #include <drm/drm_vblank.h> 18 + 15 19 #include "armada_crtc.h" 16 20 #include "armada_drm.h" 17 21 #include "armada_fb.h"
+6 -2
drivers/gpu/drm/armada/armada_debugfs.c
··· 3 3 * Copyright (C) 2012 Russell King 4 4 * Rewritten from the dovefb driver, and Armada510 manuals. 5 5 */ 6 + 6 7 #include <linux/ctype.h> 7 - #include <linux/debugfs.h> 8 8 #include <linux/module.h> 9 9 #include <linux/seq_file.h> 10 - #include <drm/drmP.h> 10 + #include <linux/uaccess.h> 11 + 12 + #include <drm/drm_debugfs.h> 13 + #include <drm/drm_file.h> 14 + 11 15 #include "armada_crtc.h" 12 16 #include "armada_drm.h" 13 17
+4 -1
drivers/gpu/drm/armada/armada_drm.h
··· 8 8 #include <linux/kfifo.h> 9 9 #include <linux/io.h> 10 10 #include <linux/workqueue.h> 11 - #include <drm/drmP.h> 11 + 12 + #include <drm/drm_device.h> 13 + #include <drm/drm_mm.h> 12 14 13 15 struct armada_crtc; 14 16 struct armada_gem_object; 15 17 struct clk; 18 + struct drm_display_mode; 16 19 struct drm_fb_helper; 17 20 18 21 static inline void
+8
drivers/gpu/drm/armada/armada_drv.c
··· 2 2 /* 3 3 * Copyright (C) 2012 Russell King 4 4 */ 5 + 5 6 #include <linux/clk.h> 6 7 #include <linux/component.h> 7 8 #include <linux/module.h> 8 9 #include <linux/of_graph.h> 10 + #include <linux/platform_device.h> 11 + 9 12 #include <drm/drm_atomic_helper.h> 13 + #include <drm/drm_drv.h> 14 + #include <drm/drm_ioctl.h> 15 + #include <drm/drm_prime.h> 10 16 #include <drm/drm_probe_helper.h> 11 17 #include <drm/drm_fb_helper.h> 12 18 #include <drm/drm_of.h> 19 + #include <drm/drm_vblank.h> 20 + 13 21 #include "armada_crtc.h" 14 22 #include "armada_drm.h" 15 23 #include "armada_gem.h"
+3
drivers/gpu/drm/armada/armada_fb.c
··· 2 2 /* 3 3 * Copyright (C) 2012 Russell King 4 4 */ 5 + 5 6 #include <drm/drm_modeset_helper.h> 6 7 #include <drm/drm_fb_helper.h> 8 + #include <drm/drm_fourcc.h> 7 9 #include <drm/drm_gem_framebuffer_helper.h> 10 + 8 11 #include "armada_drm.h" 9 12 #include "armada_fb.h" 10 13 #include "armada_gem.h"
+3
drivers/gpu/drm/armada/armada_fbdev.c
··· 3 3 * Copyright (C) 2012 Russell King 4 4 * Written from the i915 driver. 5 5 */ 6 + 6 7 #include <linux/errno.h> 7 8 #include <linux/kernel.h> 8 9 #include <linux/module.h> 9 10 10 11 #include <drm/drm_fb_helper.h> 12 + #include <drm/drm_fourcc.h> 13 + 11 14 #include "armada_crtc.h" 12 15 #include "armada_drm.h" 13 16 #include "armada_fb.h"
+6 -1
drivers/gpu/drm/armada/armada_gem.c
··· 2 2 /* 3 3 * Copyright (C) 2012 Russell King 4 4 */ 5 + 5 6 #include <linux/dma-buf.h> 6 7 #include <linux/dma-mapping.h> 8 + #include <linux/mman.h> 7 9 #include <linux/shmem_fs.h> 10 + 11 + #include <drm/armada_drm.h> 12 + #include <drm/drm_prime.h> 13 + 8 14 #include "armada_drm.h" 9 15 #include "armada_gem.h" 10 - #include <drm/armada_drm.h> 11 16 #include "armada_ioctlP.h" 12 17 13 18 static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
+7 -5
drivers/gpu/drm/armada/armada_overlay.c
··· 3 3 * Copyright (C) 2012 Russell King 4 4 * Rewritten from the dovefb driver, and Armada510 manuals. 5 5 */ 6 - #include <drm/drmP.h> 7 - #include <drm/drm_atomic.h> 8 - #include <drm/drm_atomic_uapi.h> 9 - #include <drm/drm_atomic_helper.h> 10 - #include <drm/drm_plane_helper.h> 6 + 11 7 #include <drm/armada_drm.h> 8 + #include <drm/drm_atomic.h> 9 + #include <drm/drm_atomic_helper.h> 10 + #include <drm/drm_atomic_uapi.h> 11 + #include <drm/drm_fourcc.h> 12 + #include <drm/drm_plane_helper.h> 13 + 12 14 #include "armada_crtc.h" 13 15 #include "armada_drm.h" 14 16 #include "armada_fb.h"
+3 -1
drivers/gpu/drm/armada/armada_plane.c
··· 3 3 * Copyright (C) 2012 Russell King 4 4 * Rewritten from the dovefb driver, and Armada510 manuals. 5 5 */ 6 - #include <drm/drmP.h> 6 + 7 7 #include <drm/drm_atomic.h> 8 8 #include <drm/drm_atomic_helper.h> 9 + #include <drm/drm_fourcc.h> 9 10 #include <drm/drm_plane_helper.h> 11 + 10 12 #include "armada_crtc.h" 11 13 #include "armada_drm.h" 12 14 #include "armada_fb.h"
+4 -1
drivers/gpu/drm/armada/armada_trace.h
··· 3 3 #define ARMADA_TRACE_H 4 4 5 5 #include <linux/tracepoint.h> 6 - #include <drm/drmP.h> 6 + 7 + struct drm_crtc; 8 + struct drm_framebuffer; 9 + struct drm_plane; 7 10 8 11 #undef TRACE_SYSTEM 9 12 #define TRACE_SYSTEM armada
+1 -1
drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
··· 215 215 writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1); 216 216 } 217 217 218 - static struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = { 218 + static const struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = { 219 219 .enable = aspeed_gfx_pipe_enable, 220 220 .disable = aspeed_gfx_pipe_disable, 221 221 .update = aspeed_gfx_pipe_update,
+1 -2
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
··· 1780 1780 if (dp->plat_data->panel) { 1781 1781 if (drm_panel_unprepare(dp->plat_data->panel)) 1782 1782 DRM_ERROR("failed to turnoff the panel\n"); 1783 - if (drm_panel_detach(dp->plat_data->panel)) 1784 - DRM_ERROR("failed to detach the panel\n"); 1783 + drm_panel_detach(dp->plat_data->panel); 1785 1784 } 1786 1785 1787 1786 drm_dp_aux_unregister(&dp->aux);
+4 -3
drivers/gpu/drm/bridge/dumb-vga-dac.c
··· 42 42 struct edid *edid; 43 43 int ret; 44 44 45 - if (IS_ERR(vga->ddc)) 45 + if (!vga->ddc) 46 46 goto fallback; 47 47 48 48 edid = drm_get_edid(connector, vga->ddc); ··· 84 84 * wire the DDC pins, or the I2C bus might not be working at 85 85 * all. 86 86 */ 87 - if (!IS_ERR(vga->ddc) && drm_probe_ddc(vga->ddc)) 87 + if (vga->ddc && drm_probe_ddc(vga->ddc)) 88 88 return connector_status_connected; 89 89 90 90 return connector_status_unknown; ··· 197 197 if (PTR_ERR(vga->ddc) == -ENODEV) { 198 198 dev_dbg(&pdev->dev, 199 199 "No i2c bus specified. Disabling EDID readout\n"); 200 + vga->ddc = NULL; 200 201 } else { 201 202 dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n"); 202 203 return PTR_ERR(vga->ddc); ··· 219 218 220 219 drm_bridge_remove(&vga->bridge); 221 220 222 - if (!IS_ERR(vga->ddc)) 221 + if (vga->ddc) 223 222 i2c_put_adapter(vga->ddc); 224 223 225 224 return 0;
+4 -16
drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
··· 63 63 HDMI_REVISION_ID = 0x0001, 64 64 HDMI_IH_AHBDMAAUD_STAT0 = 0x0109, 65 65 HDMI_IH_MUTE_AHBDMAAUD_STAT0 = 0x0189, 66 - HDMI_FC_AUDICONF2 = 0x1027, 67 - HDMI_FC_AUDSCONF = 0x1063, 68 - HDMI_FC_AUDSCONF_LAYOUT1 = 1 << 0, 69 - HDMI_FC_AUDSCONF_LAYOUT0 = 0 << 0, 70 66 HDMI_AHB_DMA_CONF0 = 0x3600, 71 67 HDMI_AHB_DMA_START = 0x3601, 72 68 HDMI_AHB_DMA_STOP = 0x3602, ··· 399 403 { 400 404 struct snd_pcm_runtime *runtime = substream->runtime; 401 405 struct snd_dw_hdmi *dw = substream->private_data; 402 - u8 threshold, conf0, conf1, layout, ca; 406 + u8 threshold, conf0, conf1, ca; 403 407 404 408 /* Setup as per 3.0.5 FSL 4.1.0 BSP */ 405 409 switch (dw->revision) { ··· 430 434 conf1 = default_hdmi_channel_config[runtime->channels - 2].conf1; 431 435 ca = default_hdmi_channel_config[runtime->channels - 2].ca; 432 436 433 - /* 434 - * For >2 channel PCM audio, we need to select layout 1 435 - * and set an appropriate channel map. 436 - */ 437 - if (runtime->channels > 2) 438 - layout = HDMI_FC_AUDSCONF_LAYOUT1; 439 - else 440 - layout = HDMI_FC_AUDSCONF_LAYOUT0; 441 - 442 437 writeb_relaxed(threshold, dw->data.base + HDMI_AHB_DMA_THRSLD); 443 438 writeb_relaxed(conf0, dw->data.base + HDMI_AHB_DMA_CONF0); 444 439 writeb_relaxed(conf1, dw->data.base + HDMI_AHB_DMA_CONF1); 445 - writeb_relaxed(layout, dw->data.base + HDMI_FC_AUDSCONF); 446 - writeb_relaxed(ca, dw->data.base + HDMI_FC_AUDICONF2); 440 + 441 + dw_hdmi_set_channel_count(dw->data.hdmi, runtime->channels); 442 + dw_hdmi_set_channel_allocation(dw->data.hdmi, ca); 447 443 448 444 switch (runtime->format) { 449 445 case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
+1
drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
··· 14 14 15 15 struct dw_hdmi_i2s_audio_data { 16 16 struct dw_hdmi *hdmi; 17 + u8 *eld; 17 18 18 19 void (*write)(struct dw_hdmi *hdmi, u8 val, int offset); 19 20 u8 (*read)(struct dw_hdmi *hdmi, int offset);
+54 -6
drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
··· 10 10 #include <linux/module.h> 11 11 12 12 #include <drm/bridge/dw_hdmi.h> 13 + #include <drm/drm_crtc.h> 13 14 14 15 #include <sound/hdmi-codec.h> 15 16 ··· 45 44 u8 inputclkfs = 0; 46 45 47 46 /* it cares I2S only */ 48 - if ((fmt->fmt != HDMI_I2S) || 49 - (fmt->bit_clk_master | fmt->frame_clk_master)) { 50 - dev_err(dev, "unsupported format/settings\n"); 47 + if (fmt->bit_clk_master | fmt->frame_clk_master) { 48 + dev_err(dev, "unsupported clock settings\n"); 51 49 return -EINVAL; 52 50 } 53 51 52 + /* Reset the FIFOs before applying new params */ 53 + hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0); 54 + hdmi_write(audio, (u8)~HDMI_MC_SWRSTZ_I2SSWRST_REQ, HDMI_MC_SWRSTZ); 55 + 54 56 inputclkfs = HDMI_AUD_INPUTCLKFS_64FS; 55 - conf0 = HDMI_AUD_CONF0_I2S_ALL_ENABLE; 57 + conf0 = (HDMI_AUD_CONF0_I2S_SELECT | HDMI_AUD_CONF0_I2S_EN0); 58 + 59 + /* Enable the required i2s lanes */ 60 + switch (hparms->channels) { 61 + case 7 ... 8: 62 + conf0 |= HDMI_AUD_CONF0_I2S_EN3; 63 + /* Fall-thru */ 64 + case 5 ... 6: 65 + conf0 |= HDMI_AUD_CONF0_I2S_EN2; 66 + /* Fall-thru */ 67 + case 3 ... 4: 68 + conf0 |= HDMI_AUD_CONF0_I2S_EN1; 69 + /* Fall-thru */ 70 + } 56 71 57 72 switch (hparms->sample_width) { 58 73 case 16: ··· 80 63 break; 81 64 } 82 65 66 + switch (fmt->fmt) { 67 + case HDMI_I2S: 68 + conf1 |= HDMI_AUD_CONF1_MODE_I2S; 69 + break; 70 + case HDMI_RIGHT_J: 71 + conf1 |= HDMI_AUD_CONF1_MODE_RIGHT_J; 72 + break; 73 + case HDMI_LEFT_J: 74 + conf1 |= HDMI_AUD_CONF1_MODE_LEFT_J; 75 + break; 76 + case HDMI_DSP_A: 77 + conf1 |= HDMI_AUD_CONF1_MODE_BURST_1; 78 + break; 79 + case HDMI_DSP_B: 80 + conf1 |= HDMI_AUD_CONF1_MODE_BURST_2; 81 + break; 82 + default: 83 + dev_err(dev, "unsupported format\n"); 84 + return -EINVAL; 85 + } 86 + 83 87 dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate); 88 + dw_hdmi_set_channel_count(hdmi, hparms->channels); 89 + dw_hdmi_set_channel_allocation(hdmi, hparms->cea.channel_allocation); 84 90 85 91 hdmi_write(audio, inputclkfs, HDMI_AUD_INPUTCLKFS); 86 92 hdmi_write(audio, conf0, HDMI_AUD_CONF0); ··· 120 80 struct dw_hdmi *hdmi = audio->hdmi; 121 81 122 82 dw_hdmi_audio_disable(hdmi); 83 + } 123 84 124 - hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0); 85 + static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf, 86 + size_t len) 87 + { 88 + struct dw_hdmi_i2s_audio_data *audio = data; 89 + 90 + memcpy(buf, audio->eld, min_t(size_t, MAX_ELD_BYTES, len)); 91 + return 0; 125 92 } 126 93 127 94 static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component, ··· 154 107 static struct hdmi_codec_ops dw_hdmi_i2s_ops = { 155 108 .hw_params = dw_hdmi_i2s_hw_params, 156 109 .audio_shutdown = dw_hdmi_i2s_audio_shutdown, 110 + .get_eld = dw_hdmi_i2s_get_eld, 157 111 .get_dai_id = dw_hdmi_i2s_get_dai_id, 158 112 }; 159 113 ··· 167 119 168 120 pdata.ops = &dw_hdmi_i2s_ops; 169 121 pdata.i2s = 1; 170 - pdata.max_i2s_channels = 6; 122 + pdata.max_i2s_channels = 8; 171 123 pdata.data = audio; 172 124 173 125 memset(&pdevinfo, 0, sizeof(pdevinfo));
+37
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
··· 645 645 } 646 646 EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate); 647 647 648 + void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt) 649 + { 650 + u8 layout; 651 + 652 + mutex_lock(&hdmi->audio_mutex); 653 + 654 + /* 655 + * For >2 channel PCM audio, we need to select layout 1 656 + * and set an appropriate channel map. 657 + */ 658 + if (cnt > 2) 659 + layout = HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT1; 660 + else 661 + layout = HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT0; 662 + 663 + hdmi_modb(hdmi, layout, HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_MASK, 664 + HDMI_FC_AUDSCONF); 665 + 666 + /* Set the audio infoframes channel count */ 667 + hdmi_modb(hdmi, (cnt - 1) << HDMI_FC_AUDICONF0_CC_OFFSET, 668 + HDMI_FC_AUDICONF0_CC_MASK, HDMI_FC_AUDICONF0); 669 + 670 + mutex_unlock(&hdmi->audio_mutex); 671 + } 672 + EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_count); 673 + 674 + void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca) 675 + { 676 + mutex_lock(&hdmi->audio_mutex); 677 + 678 + hdmi_writeb(hdmi, ca, HDMI_FC_AUDICONF2); 679 + 680 + mutex_unlock(&hdmi->audio_mutex); 681 + } 682 + EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_allocation); 683 + 648 684 static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable) 649 685 { 650 686 if (enable) ··· 2799 2763 struct dw_hdmi_i2s_audio_data audio; 2800 2764 2801 2765 audio.hdmi = hdmi; 2766 + audio.eld = hdmi->connector.eld; 2802 2767 audio.write = hdmi_writeb; 2803 2768 audio.read = hdmi_readb; 2804 2769 hdmi->enable_audio = dw_hdmi_i2s_audio_enable;
+10 -3
drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
··· 865 865 866 866 /* AUD_CONF0 field values */ 867 867 HDMI_AUD_CONF0_SW_RESET = 0x80, 868 - HDMI_AUD_CONF0_I2S_ALL_ENABLE = 0x2F, 868 + HDMI_AUD_CONF0_I2S_SELECT = 0x20, 869 + HDMI_AUD_CONF0_I2S_EN3 = 0x08, 870 + HDMI_AUD_CONF0_I2S_EN2 = 0x04, 871 + HDMI_AUD_CONF0_I2S_EN1 = 0x02, 872 + HDMI_AUD_CONF0_I2S_EN0 = 0x01, 869 873 870 874 /* AUD_CONF1 field values */ 871 875 HDMI_AUD_CONF1_MODE_I2S = 0x00, 872 - HDMI_AUD_CONF1_MODE_RIGHT_J = 0x02, 873 - HDMI_AUD_CONF1_MODE_LEFT_J = 0x04, 876 + HDMI_AUD_CONF1_MODE_RIGHT_J = 0x20, 877 + HDMI_AUD_CONF1_MODE_LEFT_J = 0x40, 878 + HDMI_AUD_CONF1_MODE_BURST_1 = 0x60, 879 + HDMI_AUD_CONF1_MODE_BURST_2 = 0x80, 874 880 HDMI_AUD_CONF1_WIDTH_16 = 0x10, 875 881 HDMI_AUD_CONF1_WIDTH_24 = 0x18, 876 882 ··· 944 938 HDMI_MC_CLKDIS_PIXELCLK_DISABLE = 0x1, 945 939 946 940 /* MC_SWRSTZ field values */ 941 + HDMI_MC_SWRSTZ_I2SSWRST_REQ = 0x08, 947 942 HDMI_MC_SWRSTZ_TMDSSWRST_REQ = 0x02, 948 943 949 944 /* MC_FLOWCTRL field values */
+4 -6
drivers/gpu/drm/bridge/tc358767.c
··· 1312 1312 { 1313 1313 struct tc_data *tc = connector_to_tc(connector); 1314 1314 struct edid *edid; 1315 - unsigned int count; 1315 + int count; 1316 1316 int ret; 1317 1317 1318 1318 ret = tc_get_display_props(tc); ··· 1321 1321 return 0; 1322 1322 } 1323 1323 1324 - if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) { 1325 - count = tc->panel->funcs->get_modes(tc->panel); 1326 - if (count > 0) 1327 - return count; 1328 - } 1324 + count = drm_panel_get_modes(tc->panel); 1325 + if (count > 0) 1326 + return count; 1329 1327 1330 1328 edid = drm_get_edid(connector, &tc->aux.ddc); 1331 1329
+1 -1
drivers/gpu/drm/drm_atomic_uapi.c
··· 1037 1037 * As a contrast, with implicit fencing the kernel keeps track of any 1038 1038 * ongoing rendering, and automatically ensures that the atomic update waits 1039 1039 * for any pending rendering to complete. For shared buffers represented with 1040 - * a &struct dma_buf this is tracked in &struct reservation_object. 1040 + * a &struct dma_buf this is tracked in &struct dma_resv. 1041 1041 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), 1042 1042 * whereas explicit fencing is what Android wants. 1043 1043 *
+6 -4
drivers/gpu/drm/drm_connector.c
··· 986 986 * - Kernel sends uevent with the connector id and property id through 987 987 * @drm_hdcp_update_content_protection, upon below kernel triggered 988 988 * scenarios: 989 - * DESIRED -> ENABLED (authentication success) 990 - * ENABLED -> DESIRED (termination of authentication) 989 + * 990 + * - DESIRED -> ENABLED (authentication success) 991 + * - ENABLED -> DESIRED (termination of authentication) 991 992 * - Please note no uevents for userspace triggered property state changes, 992 993 * which can't fail such as 993 - * DESIRED/ENABLED -> UNDESIRED 994 - * UNDESIRED -> DESIRED 994 + * 995 + * - DESIRED/ENABLED -> UNDESIRED 996 + * - UNDESIRED -> DESIRED 995 997 * - Userspace is responsible for polling the property or listen to uevents 996 998 * to determine when the value transitions from ENABLED to DESIRED. 997 999 * This signifies the link is no longer protected and userspace should
+16 -13
drivers/gpu/drm/drm_gem.c
··· 159 159 kref_init(&obj->refcount); 160 160 obj->handle_count = 0; 161 161 obj->size = size; 162 - reservation_object_init(&obj->_resv); 162 + dma_resv_init(&obj->_resv); 163 163 if (!obj->resv) 164 164 obj->resv = &obj->_resv; 165 165 ··· 633 633 634 634 pagevec_init(&pvec); 635 635 for (i = 0; i < npages; i++) { 636 + if (!pages[i]) 637 + continue; 638 + 636 639 if (dirty) 637 640 set_page_dirty(pages[i]); 638 641 ··· 755 752 EXPORT_SYMBOL(drm_gem_object_lookup); 756 753 757 754 /** 758 - * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects 755 + * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 759 756 * shared and/or exclusive fences. 760 757 * @filep: DRM file private date 761 758 * @handle: userspace handle ··· 767 764 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 768 765 * greater than 0 on success. 769 766 */ 770 - long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, 767 + long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 771 768 bool wait_all, unsigned long timeout) 772 769 { 773 770 long ret; ··· 779 776 return -EINVAL; 780 777 } 781 778 782 - ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all, 779 + ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, 783 780 true, timeout); 784 781 if (ret == 0) 785 782 ret = -ETIME; ··· 790 787 791 788 return ret; 792 789 } 793 - EXPORT_SYMBOL(drm_gem_reservation_object_wait); 790 + EXPORT_SYMBOL(drm_gem_dma_resv_wait); 794 791 795 792 /** 796 793 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl ··· 956 953 if (obj->filp) 957 954 fput(obj->filp); 958 955 959 - reservation_object_fini(&obj->_resv); 956 + dma_resv_fini(&obj->_resv); 960 957 drm_gem_free_mmap_offset(obj); 961 958 } 962 959 EXPORT_SYMBOL(drm_gem_object_release); ··· 1291 1288 if (contended != -1) { 1292 1289 struct drm_gem_object *obj = objs[contended]; 1293 1290 1294 - ret = reservation_object_lock_slow_interruptible(obj->resv, 1291 + ret = dma_resv_lock_slow_interruptible(obj->resv, 1295 1292 acquire_ctx); 1296 1293 if (ret) { 1297 1294 ww_acquire_done(acquire_ctx); ··· 1303 1300 if (i == contended) 1304 1301 continue; 1305 1302 1306 - ret = reservation_object_lock_interruptible(objs[i]->resv, 1303 + ret = dma_resv_lock_interruptible(objs[i]->resv, 1307 1304 acquire_ctx); 1308 1305 if (ret) { 1309 1306 int j; 1310 1307 1311 1308 for (j = 0; j < i; j++) 1312 - reservation_object_unlock(objs[j]->resv); 1309 + dma_resv_unlock(objs[j]->resv); 1313 1310 1314 1311 if (contended != -1 && contended >= i) 1315 - reservation_object_unlock(objs[contended]->resv); 1312 + dma_resv_unlock(objs[contended]->resv); 1316 1313 1317 1314 if (ret == -EDEADLK) { 1318 1315 contended = i; ··· 1337 1334 int i; 1338 1335 1339 1336 for (i = 0; i < count; i++) 1340 - reservation_object_unlock(objs[i]->resv); 1337 + dma_resv_unlock(objs[i]->resv); 1341 1338 1342 1339 ww_acquire_fini(acquire_ctx); 1343 1340 } ··· 1413 1410 1414 1411 if (!write) { 1415 1412 struct dma_fence *fence = 1416 - reservation_object_get_excl_rcu(obj->resv); 1413 + dma_resv_get_excl_rcu(obj->resv); 1417 1414 1418 1415 return drm_gem_fence_array_add(fence_array, fence); 1419 1416 } 1420 1417 1421 - ret = reservation_object_get_fences_rcu(obj->resv, NULL, 1418 + ret = dma_resv_get_fences_rcu(obj->resv, NULL, 1422 1419 &fence_count, &fences); 1423 1420 if (ret || !fence_count) 1424 1421 return ret;
+2 -2
drivers/gpu/drm/drm_gem_framebuffer_helper.c
··· 7 7 8 8 #include <linux/dma-buf.h> 9 9 #include <linux/dma-fence.h> 10 - #include <linux/reservation.h> 10 + #include <linux/dma-resv.h> 11 11 #include <linux/slab.h> 12 12 13 13 #include <drm/drm_atomic.h> ··· 294 294 return 0; 295 295 296 296 obj = drm_gem_fb_get_obj(state->fb, 0); 297 - fence = reservation_object_get_excl_rcu(obj->resv); 297 + fence = dma_resv_get_excl_rcu(obj->resv); 298 298 drm_atomic_set_fence_for_plane(state, fence); 299 299 300 300 return 0;
+59 -2
drivers/gpu/drm/drm_gem_shmem_helper.c
··· 75 75 shmem = to_drm_gem_shmem_obj(obj); 76 76 mutex_init(&shmem->pages_lock); 77 77 mutex_init(&shmem->vmap_lock); 78 + INIT_LIST_HEAD(&shmem->madv_list); 78 79 79 80 /* 80 81 * Our buffers are kept pinned, so allocating them ··· 119 118 if (shmem->sgt) { 120 119 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, 121 120 shmem->sgt->nents, DMA_BIDIRECTIONAL); 122 - 123 - drm_gem_shmem_put_pages(shmem); 124 121 sg_free_table(shmem->sgt); 125 122 kfree(shmem->sgt); 126 123 } 124 + if (shmem->pages) 125 + drm_gem_shmem_put_pages(shmem); 127 126 } 128 127 129 128 WARN_ON(shmem->pages_use_count); ··· 362 361 return shmem; 363 362 } 364 363 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); 364 + 365 + /* Update madvise status, returns true if not purged, else 366 + * false or -errno. 367 + */ 368 + int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv) 369 + { 370 + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 371 + 372 + mutex_lock(&shmem->pages_lock); 373 + 374 + if (shmem->madv >= 0) 375 + shmem->madv = madv; 376 + 377 + madv = shmem->madv; 378 + 379 + mutex_unlock(&shmem->pages_lock); 380 + 381 + return (madv >= 0); 382 + } 383 + EXPORT_SYMBOL(drm_gem_shmem_madvise); 384 + 385 + void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) 386 + { 387 + struct drm_device *dev = obj->dev; 388 + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 389 + 390 + WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); 391 + 392 + drm_gem_shmem_put_pages_locked(shmem); 393 + 394 + shmem->madv = -1; 395 + 396 + drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 397 + drm_gem_free_mmap_offset(obj); 398 + 399 + /* Our goal here is to return as much of the memory as 400 + * is possible back to the system as we are called from OOM. 401 + * To do this we must instruct the shmfs to drop all of its 402 + * backing pages, *now*. 403 + */ 404 + shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 405 + 406 + invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 407 + 0, (loff_t)-1); 408 + } 409 + EXPORT_SYMBOL(drm_gem_shmem_purge_locked); 410 + 411 + void drm_gem_shmem_purge(struct drm_gem_object *obj) 412 + { 413 + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 414 + 415 + mutex_lock(&shmem->pages_lock); 416 + drm_gem_shmem_purge_locked(obj); 417 + mutex_unlock(&shmem->pages_lock); 418 + } 419 + EXPORT_SYMBOL(drm_gem_shmem_purge); 365 420 366 421 /** 367 422 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
+97 -5
drivers/gpu/drm/drm_panel.c
··· 123 123 * 124 124 * This function should not be called by the panel device itself. It 125 125 * is only for the drm device that called drm_panel_attach(). 126 - * 127 - * Return: 0 on success or a negative error code on failure. 128 126 */ 129 - int drm_panel_detach(struct drm_panel *panel) 127 + void drm_panel_detach(struct drm_panel *panel) 130 128 { 131 129 panel->connector = NULL; 132 130 panel->drm = NULL; 133 - 134 - return 0; 135 131 } 136 132 EXPORT_SYMBOL(drm_panel_detach); 133 + 134 + /** 135 + * drm_panel_prepare - power on a panel 136 + * @panel: DRM panel 137 + * 138 + * Calling this function will enable power and deassert any reset signals to 139 + * the panel. After this has completed it is possible to communicate with any 140 + * integrated circuitry via a command bus. 141 + * 142 + * Return: 0 on success or a negative error code on failure. 143 + */ 144 + int drm_panel_prepare(struct drm_panel *panel) 145 + { 146 + if (panel && panel->funcs && panel->funcs->prepare) 147 + return panel->funcs->prepare(panel); 148 + 149 + return panel ? -ENOSYS : -EINVAL; 150 + } 151 + EXPORT_SYMBOL(drm_panel_prepare); 152 + 153 + /** 154 + * drm_panel_unprepare - power off a panel 155 + * @panel: DRM panel 156 + * 157 + * Calling this function will completely power off a panel (assert the panel's 158 + * reset, turn off power supplies, ...). After this function has completed, it 159 + * is usually no longer possible to communicate with the panel until another 160 + * call to drm_panel_prepare(). 161 + * 162 + * Return: 0 on success or a negative error code on failure. 163 + */ 164 + int drm_panel_unprepare(struct drm_panel *panel) 165 + { 166 + if (panel && panel->funcs && panel->funcs->unprepare) 167 + return panel->funcs->unprepare(panel); 168 + 169 + return panel ? -ENOSYS : -EINVAL; 170 + } 171 + EXPORT_SYMBOL(drm_panel_unprepare); 172 + 173 + /** 174 + * drm_panel_enable - enable a panel 175 + * @panel: DRM panel 176 + * 177 + * Calling this function will cause the panel display drivers to be turned on 178 + * and the backlight to be enabled. Content will be visible on screen after 179 + * this call completes. 180 + * 181 + * Return: 0 on success or a negative error code on failure. 182 + */ 183 + int drm_panel_enable(struct drm_panel *panel) 184 + { 185 + if (panel && panel->funcs && panel->funcs->enable) 186 + return panel->funcs->enable(panel); 187 + 188 + return panel ? -ENOSYS : -EINVAL; 189 + } 190 + EXPORT_SYMBOL(drm_panel_enable); 191 + 192 + /** 193 + * drm_panel_disable - disable a panel 194 + * @panel: DRM panel 195 + * 196 + * This will typically turn off the panel's backlight or disable the display 197 + * drivers. For smart panels it should still be possible to communicate with 198 + * the integrated circuitry via any command bus after this call. 199 + * 200 + * Return: 0 on success or a negative error code on failure. 201 + */ 202 + int drm_panel_disable(struct drm_panel *panel) 203 + { 204 + if (panel && panel->funcs && panel->funcs->disable) 205 + return panel->funcs->disable(panel); 206 + 207 + return panel ? -ENOSYS : -EINVAL; 208 + } 209 + EXPORT_SYMBOL(drm_panel_disable); 210 + 211 + /** 212 + * drm_panel_get_modes - probe the available display modes of a panel 213 + * @panel: DRM panel 214 + * 215 + * The modes probed from the panel are automatically added to the connector 216 + * that the panel is attached to. 217 + * 218 + * Return: The number of modes available from the panel on success or a 219 + * negative error code on failure. 220 + */ 221 + int drm_panel_get_modes(struct drm_panel *panel) 222 + { 223 + if (panel && panel->funcs && panel->funcs->get_modes) 224 + return panel->funcs->get_modes(panel); 225 + 226 + return panel ? -ENOSYS : -EINVAL; 227 + } 228 + EXPORT_SYMBOL(drm_panel_get_modes); 137 229 138 230 #ifdef CONFIG_OF 139 231 /**
+89 -13
drivers/gpu/drm/drm_syncobj.c
··· 29 29 /** 30 30 * DOC: Overview 31 31 * 32 - * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are 33 - * persistent objects that contain an optional fence. The fence can be updated 34 - * with a new fence, or be NULL. 35 - * 36 - * syncobj's can be waited upon, where it will wait for the underlying 37 - * fence. 38 - * 39 - * syncobj's can be export to fd's and back, these fd's are opaque and 40 - * have no other use case, except passing the syncobj between processes. 41 - * 32 + * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a 33 + * container for a synchronization primitive which can be used by userspace 34 + * to explicitly synchronize GPU commands, can be shared between userspace 35 + * processes, and can be shared between different DRM drivers. 42 36 * Their primary use-case is to implement Vulkan fences and semaphores. 37 + * The syncobj userspace API provides ioctls for several operations: 43 38 * 44 - * syncobj have a kref reference count, but also have an optional file. 45 - * The file is only created once the syncobj is exported. 46 - * The file takes a reference on the kref. 39 + * - Creation and destruction of syncobjs 40 + * - Import and export of syncobjs to/from a syncobj file descriptor 41 + * - Import and export a syncobj's underlying fence to/from a sync file 42 + * - Reset a syncobj (set its fence to NULL) 43 + * - Signal a syncobj (set a trivially signaled fence) 44 + * - Wait for a syncobj's fence to appear and be signaled 45 + * 46 + * At it's core, a syncobj is simply a wrapper around a pointer to a struct 47 + * &dma_fence which may be NULL. 48 + * When a syncobj is first created, its pointer is either NULL or a pointer 49 + * to an already signaled fence depending on whether the 50 + * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to 51 + * &DRM_IOCTL_SYNCOBJ_CREATE. 52 + * When GPU work which signals a syncobj is enqueued in a DRM driver, 53 + * the syncobj fence is replaced with a fence which will be signaled by the 54 + * completion of that work. 55 + * When GPU work which waits on a syncobj is enqueued in a DRM driver, the 56 + * driver retrieves syncobj's current fence at the time the work is enqueued 57 + * waits on that fence before submitting the work to hardware. 58 + * If the syncobj's fence is NULL, the enqueue operation is expected to fail. 59 + * All manipulation of the syncobjs's fence happens in terms of the current 60 + * fence at the time the ioctl is called by userspace regardless of whether 61 + * that operation is an immediate host-side operation (signal or reset) or 62 + * or an operation which is enqueued in some driver queue. 63 + * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to 64 + * manipulate a syncobj from the host by resetting its pointer to NULL or 65 + * setting its pointer to a fence which is already signaled. 66 + * 67 + * 68 + * Host-side wait on syncobjs 69 + * -------------------------- 70 + * 71 + * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a 72 + * host-side wait on all of the syncobj fences simultaneously. 73 + * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on 74 + * all of the syncobj fences to be signaled before it returns. 75 + * Otherwise, it returns once at least one syncobj fence has been signaled 76 + * and the index of a signaled fence is written back to the client. 77 + * 78 + * Unlike the enqueued GPU work dependencies which fail if they see a NULL 79 + * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set, 80 + * the host-side wait will first wait for the syncobj to receive a non-NULL 81 + * fence and then wait on that fence. 82 + * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the 83 + * syncobjs in the array has a NULL fence, -EINVAL will be returned. 84 + * Assuming the syncobj starts off with a NULL fence, this allows a client 85 + * to do a host wait in one thread (or process) which waits on GPU work 86 + * submitted in another thread (or process) without having to manually 87 + * synchronize between the two. 88 + * This requirement is inherited from the Vulkan fence API. 89 + * 90 + * 91 + * Import/export of syncobjs 92 + * ------------------------- 93 + * 94 + * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD 95 + * provide two mechanisms for import/export of syncobjs. 96 + * 97 + * The first lets the client import or export an entire syncobj to a file 98 + * descriptor. 99 + * These fd's are opaque and have no other use case, except passing the 100 + * syncobj between processes. 101 + * All exported file descriptors and any syncobj handles created as a 102 + * result of importing those file descriptors own a reference to the 103 + * same underlying struct &drm_syncobj and the syncobj can be used 104 + * persistently across all the processes with which it is shared. 105 + * The syncobj is freed only once the last reference is dropped. 106 + * Unlike dma-buf, importing a syncobj creates a new handle (with its own 107 + * reference) for every import instead of de-duplicating. 108 + * The primary use-case of this persistent import/export is for shared 109 + * Vulkan fences and semaphores. 110 + * 111 + * The second import/export mechanism, which is indicated by 112 + * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or 113 + * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client 114 + * import/export the syncobj's current fence from/to a &sync_file. 115 + * When a syncobj is exported to a sync file, that sync file wraps the 116 + * sycnobj's fence at the time of export and any later signal or reset 117 + * operations on the syncobj will not affect the exported sync file. 118 + * When a sync file is imported into a syncobj, the syncobj's fence is set 119 + * to the fence wrapped by that sync file. 120 + * Because sync files are immutable, resetting or signaling the syncobj 121 + * will not affect any sync files whose fences have been imported into the 122 + * syncobj. 47 123 */ 48 124 49 125 #include <linux/anon_inodes.h>
+4 -4
drivers/gpu/drm/etnaviv/etnaviv_gem.c
··· 397 397 } 398 398 399 399 if (op & ETNA_PREP_NOSYNC) { 400 - if (!reservation_object_test_signaled_rcu(obj->resv, 400 + if (!dma_resv_test_signaled_rcu(obj->resv, 401 401 write)) 402 402 return -EBUSY; 403 403 } else { 404 404 unsigned long remain = etnaviv_timeout_to_jiffies(timeout); 405 405 406 - ret = reservation_object_wait_timeout_rcu(obj->resv, 406 + ret = dma_resv_wait_timeout_rcu(obj->resv, 407 407 write, true, remain); 408 408 if (ret <= 0) 409 409 return ret == 0 ? -ETIMEDOUT : ret; ··· 459 459 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 460 460 { 461 461 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 462 - struct reservation_object *robj = obj->resv; 463 - struct reservation_object_list *fobj; 462 + struct dma_resv *robj = obj->resv; 463 + struct dma_resv_list *fobj; 464 464 struct dma_fence *fence; 465 465 unsigned long off = drm_vma_node_start(&obj->vma_node); 466 466
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_gem.h
··· 6 6 #ifndef __ETNAVIV_GEM_H__ 7 7 #define __ETNAVIV_GEM_H__ 8 8 9 - #include <linux/reservation.h> 9 + #include <linux/dma-resv.h> 10 10 #include "etnaviv_cmdbuf.h" 11 11 #include "etnaviv_drv.h" 12 12
+7 -7
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
··· 4 4 */ 5 5 6 6 #include <linux/dma-fence-array.h> 7 - #include <linux/reservation.h> 7 + #include <linux/dma-resv.h> 8 8 #include <linux/sync_file.h> 9 9 #include "etnaviv_cmdbuf.h" 10 10 #include "etnaviv_drv.h" ··· 165 165 166 166 for (i = 0; i < submit->nr_bos; i++) { 167 167 struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; 168 - struct reservation_object *robj = bo->obj->base.resv; 168 + struct dma_resv *robj = bo->obj->base.resv; 169 169 170 170 if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) { 171 - ret = reservation_object_reserve_shared(robj, 1); 171 + ret = dma_resv_reserve_shared(robj, 1); 172 172 if (ret) 173 173 return ret; 174 174 } ··· 177 177 continue; 178 178 179 179 if (bo->flags & ETNA_SUBMIT_BO_WRITE) { 180 - ret = reservation_object_get_fences_rcu(robj, &bo->excl, 180 + ret = dma_resv_get_fences_rcu(robj, &bo->excl, 181 181 &bo->nr_shared, 182 182 &bo->shared); 183 183 if (ret) 184 184 return ret; 185 185 } else { 186 - bo->excl = reservation_object_get_excl_rcu(robj); 186 + bo->excl = dma_resv_get_excl_rcu(robj); 187 187 } 188 188 189 189 } ··· 199 199 struct drm_gem_object *obj = &submit->bos[i].obj->base; 200 200 201 201 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) 202 - reservation_object_add_excl_fence(obj->resv, 202 + dma_resv_add_excl_fence(obj->resv, 203 203 submit->out_fence); 204 204 else 205 - reservation_object_add_shared_fence(obj->resv, 205 + dma_resv_add_shared_fence(obj->resv, 206 206 submit->out_fence); 207 207 208 208 submit_unlock_object(submit, i);
+1 -9
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
··· 65 65 static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) 66 66 { 67 67 struct fsl_dcu_drm_connector *fsl_connector; 68 - int (*get_modes)(struct drm_panel *panel); 69 - int num_modes = 0; 70 68 71 69 fsl_connector = to_fsl_dcu_connector(connector); 72 - if (fsl_connector->panel && fsl_connector->panel->funcs && 73 - fsl_connector->panel->funcs->get_modes) { 74 - get_modes = fsl_connector->panel->funcs->get_modes; 75 - num_modes = get_modes(fsl_connector->panel); 76 - } 77 - 78 - return num_modes; 70 + return drm_panel_get_modes(fsl_connector->panel); 79 71 } 80 72 81 73 static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
+1 -1
drivers/gpu/drm/i2c/tda998x_drv.c
··· 13 13 #include <sound/asoundef.h> 14 14 #include <sound/hdmi-codec.h> 15 15 16 - #include <drm/drmP.h> 17 16 #include <drm/drm_atomic_helper.h> 18 17 #include <drm/drm_edid.h> 19 18 #include <drm/drm_of.h> 19 + #include <drm/drm_print.h> 20 20 #include <drm/drm_probe_helper.h> 21 21 #include <drm/i2c/tda998x.h> 22 22
+2 -2
drivers/gpu/drm/i915/display/intel_display.c
··· 29 29 #include <linux/intel-iommu.h> 30 30 #include <linux/kernel.h> 31 31 #include <linux/module.h> 32 - #include <linux/reservation.h> 32 + #include <linux/dma-resv.h> 33 33 #include <linux/slab.h> 34 34 #include <linux/vgaarb.h> 35 35 ··· 14431 14431 if (ret < 0) 14432 14432 return ret; 14433 14433 14434 - fence = reservation_object_get_excl_rcu(obj->base.resv); 14434 + fence = dma_resv_get_excl_rcu(obj->base.resv); 14435 14435 if (fence) { 14436 14436 add_rps_boost_after_vblank(new_state->crtc, fence); 14437 14437 dma_fence_put(fence);
+2 -2
drivers/gpu/drm/i915/gem/i915_gem_busy.c
··· 82 82 { 83 83 struct drm_i915_gem_busy *args = data; 84 84 struct drm_i915_gem_object *obj; 85 - struct reservation_object_list *list; 85 + struct dma_resv_list *list; 86 86 unsigned int seq; 87 87 int err; 88 88 ··· 105 105 * Alternatively, we can trade that extra information on read/write 106 106 * activity with 107 107 * args->busy = 108 - * !reservation_object_test_signaled_rcu(obj->resv, true); 108 + * !dma_resv_test_signaled_rcu(obj->resv, true); 109 109 * to report the overall busyness. This is what the wait-ioctl does. 110 110 * 111 111 */
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
··· 147 147 true, I915_FENCE_TIMEOUT, 148 148 I915_FENCE_GFP); 149 149 150 - reservation_object_add_excl_fence(obj->base.resv, 150 + dma_resv_add_excl_fence(obj->base.resv, 151 151 &clflush->dma); 152 152 153 153 i915_sw_fence_commit(&clflush->wait);
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
··· 287 287 if (err < 0) { 288 288 dma_fence_set_error(&work->dma, err); 289 289 } else { 290 - reservation_object_add_excl_fence(obj->base.resv, &work->dma); 290 + dma_resv_add_excl_fence(obj->base.resv, &work->dma); 291 291 err = 0; 292 292 } 293 293 i915_gem_object_unlock(obj);
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
··· 6 6 7 7 #include <linux/dma-buf.h> 8 8 #include <linux/highmem.h> 9 - #include <linux/reservation.h> 9 + #include <linux/dma-resv.h> 10 10 11 11 #include "i915_drv.h" 12 12 #include "i915_gem_object.h"
+3 -3
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 5 5 */ 6 6 7 7 #include <linux/intel-iommu.h> 8 - #include <linux/reservation.h> 8 + #include <linux/dma-resv.h> 9 9 #include <linux/sync_file.h> 10 10 #include <linux/uaccess.h> 11 11 ··· 1242 1242 goto skip_request; 1243 1243 1244 1244 i915_vma_lock(batch); 1245 - GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); 1245 + GEM_BUG_ON(!dma_resv_test_signaled_rcu(batch->resv, true)); 1246 1246 err = i915_vma_move_to_active(batch, rq, 0); 1247 1247 i915_vma_unlock(batch); 1248 1248 if (err) ··· 1313 1313 1314 1314 if (!eb->reloc_cache.vaddr && 1315 1315 (DBG_FORCE_RELOC == FORCE_GPU_RELOC || 1316 - !reservation_object_test_signaled_rcu(vma->resv, true))) { 1316 + !dma_resv_test_signaled_rcu(vma->resv, true))) { 1317 1317 const unsigned int gen = eb->reloc_cache.gen; 1318 1318 unsigned int len; 1319 1319 u32 *batch;
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_fence.c
··· 78 78 I915_FENCE_GFP) < 0) 79 79 goto err; 80 80 81 - reservation_object_add_excl_fence(obj->base.resv, &stub->dma); 81 + dma_resv_add_excl_fence(obj->base.resv, &stub->dma); 82 82 83 83 return &stub->dma; 84 84
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_object.c
··· 152 152 container_of(head, typeof(*obj), rcu); 153 153 struct drm_i915_private *i915 = to_i915(obj->base.dev); 154 154 155 - reservation_object_fini(&obj->base._resv); 155 + dma_resv_fini(&obj->base._resv); 156 156 i915_gem_object_free(obj); 157 157 158 158 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
+5 -5
drivers/gpu/drm/i915/gem/i915_gem_object.h
··· 99 99 __drm_gem_object_put(&obj->base); 100 100 } 101 101 102 - #define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv) 102 + #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) 103 103 104 104 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) 105 105 { 106 - reservation_object_lock(obj->base.resv, NULL); 106 + dma_resv_lock(obj->base.resv, NULL); 107 107 } 108 108 109 109 static inline int 110 110 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj) 111 111 { 112 - return reservation_object_lock_interruptible(obj->base.resv, NULL); 112 + return dma_resv_lock_interruptible(obj->base.resv, NULL); 113 113 } 114 114 115 115 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 116 116 { 117 - reservation_object_unlock(obj->base.resv); 117 + dma_resv_unlock(obj->base.resv); 118 118 } 119 119 120 120 struct dma_fence * ··· 367 367 struct dma_fence *fence; 368 368 369 369 rcu_read_lock(); 370 - fence = reservation_object_get_excl_rcu(obj->base.resv); 370 + fence = dma_resv_get_excl_rcu(obj->base.resv); 371 371 rcu_read_unlock(); 372 372 373 373 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
+9 -9
drivers/gpu/drm/i915/gem/i915_gem_wait.c
··· 31 31 } 32 32 33 33 static long 34 - i915_gem_object_wait_reservation(struct reservation_object *resv, 34 + i915_gem_object_wait_reservation(struct dma_resv *resv, 35 35 unsigned int flags, 36 36 long timeout) 37 37 { ··· 43 43 unsigned int count, i; 44 44 int ret; 45 45 46 - ret = reservation_object_get_fences_rcu(resv, 46 + ret = dma_resv_get_fences_rcu(resv, 47 47 &excl, &count, &shared); 48 48 if (ret) 49 49 return ret; ··· 72 72 */ 73 73 prune_fences = count && timeout >= 0; 74 74 } else { 75 - excl = reservation_object_get_excl_rcu(resv); 75 + excl = dma_resv_get_excl_rcu(resv); 76 76 } 77 77 78 78 if (excl && timeout >= 0) ··· 84 84 * Opportunistically prune the fences iff we know they have *all* been 85 85 * signaled. 86 86 */ 87 - if (prune_fences && reservation_object_trylock(resv)) { 88 - if (reservation_object_test_signaled_rcu(resv, true)) 89 - reservation_object_add_excl_fence(resv, NULL); 90 - reservation_object_unlock(resv); 87 + if (prune_fences && dma_resv_trylock(resv)) { 88 + if (dma_resv_test_signaled_rcu(resv, true)) 89 + dma_resv_add_excl_fence(resv, NULL); 90 + dma_resv_unlock(resv); 91 91 } 92 92 93 93 return timeout; ··· 140 140 unsigned int count, i; 141 141 int ret; 142 142 143 - ret = reservation_object_get_fences_rcu(obj->base.resv, 143 + ret = dma_resv_get_fences_rcu(obj->base.resv, 144 144 &excl, &count, &shared); 145 145 if (ret) 146 146 return ret; ··· 152 152 153 153 kfree(shared); 154 154 } else { 155 - excl = reservation_object_get_excl_rcu(obj->base.resv); 155 + excl = dma_resv_get_excl_rcu(obj->base.resv); 156 156 } 157 157 158 158 if (excl) {
+7 -6
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
··· 112 112 } 113 113 114 114 static void 115 - __dma_fence_signal__notify(struct dma_fence *fence) 115 + __dma_fence_signal__notify(struct dma_fence *fence, 116 + const struct list_head *list) 116 117 { 117 118 struct dma_fence_cb *cur, *tmp; 118 119 119 120 lockdep_assert_held(fence->lock); 120 121 lockdep_assert_irqs_disabled(); 121 122 122 - list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { 123 + list_for_each_entry_safe(cur, tmp, list, node) { 123 124 INIT_LIST_HEAD(&cur->node); 124 125 cur->func(fence, cur); 125 126 } 126 - INIT_LIST_HEAD(&fence->cb_list); 127 127 } 128 128 129 129 void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) ··· 185 185 list_for_each_safe(pos, next, &signal) { 186 186 struct i915_request *rq = 187 187 list_entry(pos, typeof(*rq), signal_link); 188 - 189 - __dma_fence_signal__timestamp(&rq->fence, timestamp); 188 + struct list_head cb_list; 190 189 191 190 spin_lock(&rq->lock); 192 - __dma_fence_signal__notify(&rq->fence); 191 + list_replace(&rq->fence.cb_list, &cb_list); 192 + __dma_fence_signal__timestamp(&rq->fence, timestamp); 193 + __dma_fence_signal__notify(&rq->fence, &cb_list); 193 194 spin_unlock(&rq->lock); 194 195 195 196 i915_request_put(rq);
+1 -1
drivers/gpu/drm/i915/i915_drv.h
··· 43 43 #include <linux/mm_types.h> 44 44 #include <linux/perf_event.h> 45 45 #include <linux/pm_qos.h> 46 - #include <linux/reservation.h> 46 + #include <linux/dma-resv.h> 47 47 #include <linux/shmem_fs.h> 48 48 #include <linux/stackdepot.h> 49 49
+1 -1
drivers/gpu/drm/i915/i915_gem.c
··· 29 29 #include <drm/i915_drm.h> 30 30 #include <linux/dma-fence-array.h> 31 31 #include <linux/kthread.h> 32 - #include <linux/reservation.h> 32 + #include <linux/dma-resv.h> 33 33 #include <linux/shmem_fs.h> 34 34 #include <linux/slab.h> 35 35 #include <linux/stop_machine.h>
+5 -5
drivers/gpu/drm/i915/i915_gem_batch_pool.c
··· 94 94 list = &pool->cache_list[n]; 95 95 96 96 list_for_each_entry(obj, list, batch_pool_link) { 97 - struct reservation_object *resv = obj->base.resv; 97 + struct dma_resv *resv = obj->base.resv; 98 98 99 99 /* The batches are strictly LRU ordered */ 100 - if (!reservation_object_test_signaled_rcu(resv, true)) 100 + if (!dma_resv_test_signaled_rcu(resv, true)) 101 101 break; 102 102 103 103 /* ··· 109 109 * than replace the existing fence. 110 110 */ 111 111 if (rcu_access_pointer(resv->fence)) { 112 - reservation_object_lock(resv, NULL); 113 - reservation_object_add_excl_fence(resv, NULL); 114 - reservation_object_unlock(resv); 112 + dma_resv_lock(resv, NULL); 113 + dma_resv_add_excl_fence(resv, NULL); 114 + dma_resv_unlock(resv); 115 115 } 116 116 117 117 if (obj->base.size >= size)
+2 -2
drivers/gpu/drm/i915/i915_request.c
··· 1038 1038 struct dma_fence **shared; 1039 1039 unsigned int count, i; 1040 1040 1041 - ret = reservation_object_get_fences_rcu(obj->base.resv, 1041 + ret = dma_resv_get_fences_rcu(obj->base.resv, 1042 1042 &excl, &count, &shared); 1043 1043 if (ret) 1044 1044 return ret; ··· 1055 1055 dma_fence_put(shared[i]); 1056 1056 kfree(shared); 1057 1057 } else { 1058 - excl = reservation_object_get_excl_rcu(obj->base.resv); 1058 + excl = dma_resv_get_excl_rcu(obj->base.resv); 1059 1059 } 1060 1060 1061 1061 if (excl) {
+4 -4
drivers/gpu/drm/i915/i915_sw_fence.c
··· 7 7 #include <linux/slab.h> 8 8 #include <linux/dma-fence.h> 9 9 #include <linux/irq_work.h> 10 - #include <linux/reservation.h> 10 + #include <linux/dma-resv.h> 11 11 12 12 #include "i915_sw_fence.h" 13 13 #include "i915_selftest.h" ··· 510 510 } 511 511 512 512 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, 513 - struct reservation_object *resv, 513 + struct dma_resv *resv, 514 514 const struct dma_fence_ops *exclude, 515 515 bool write, 516 516 unsigned long timeout, ··· 526 526 struct dma_fence **shared; 527 527 unsigned int count, i; 528 528 529 - ret = reservation_object_get_fences_rcu(resv, 529 + ret = dma_resv_get_fences_rcu(resv, 530 530 &excl, &count, &shared); 531 531 if (ret) 532 532 return ret; ··· 551 551 dma_fence_put(shared[i]); 552 552 kfree(shared); 553 553 } else { 554 - excl = reservation_object_get_excl_rcu(resv); 554 + excl = dma_resv_get_excl_rcu(resv); 555 555 } 556 556 557 557 if (ret >= 0 && excl && excl->ops != exclude) {
+2 -2
drivers/gpu/drm/i915/i915_sw_fence.h
··· 16 16 #include <linux/wait.h> 17 17 18 18 struct completion; 19 - struct reservation_object; 19 + struct dma_resv; 20 20 21 21 struct i915_sw_fence { 22 22 wait_queue_head_t wait; ··· 82 82 gfp_t gfp); 83 83 84 84 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, 85 - struct reservation_object *resv, 85 + struct dma_resv *resv, 86 86 const struct dma_fence_ops *exclude, 87 87 bool write, 88 88 unsigned long timeout,
+4 -4
drivers/gpu/drm/i915/i915_vma.c
··· 890 890 struct i915_request *rq, 891 891 unsigned int flags) 892 892 { 893 - struct reservation_object *resv = vma->resv; 893 + struct dma_resv *resv = vma->resv; 894 894 895 895 /* 896 896 * Ignore errors from failing to allocate the new fence, we can't ··· 898 898 * synchronisation leading to rendering corruption. 899 899 */ 900 900 if (flags & EXEC_OBJECT_WRITE) 901 - reservation_object_add_excl_fence(resv, &rq->fence); 902 - else if (reservation_object_reserve_shared(resv, 1) == 0) 903 - reservation_object_add_shared_fence(resv, &rq->fence); 901 + dma_resv_add_excl_fence(resv, &rq->fence); 902 + else if (dma_resv_reserve_shared(resv, 1) == 0) 903 + dma_resv_add_shared_fence(resv, &rq->fence); 904 904 } 905 905 906 906 int i915_vma_move_to_active(struct i915_vma *vma,
+4 -4
drivers/gpu/drm/i915/i915_vma.h
··· 55 55 struct i915_address_space *vm; 56 56 const struct i915_vma_ops *ops; 57 57 struct i915_fence_reg *fence; 58 - struct reservation_object *resv; /** Alias of obj->resv */ 58 + struct dma_resv *resv; /** Alias of obj->resv */ 59 59 struct sg_table *pages; 60 60 void __iomem *iomap; 61 61 void *private; /* owned by creator */ ··· 299 299 void i915_vma_reopen(struct i915_vma *vma); 300 300 void i915_vma_destroy(struct i915_vma *vma); 301 301 302 - #define assert_vma_held(vma) reservation_object_assert_held((vma)->resv) 302 + #define assert_vma_held(vma) dma_resv_assert_held((vma)->resv) 303 303 304 304 static inline void i915_vma_lock(struct i915_vma *vma) 305 305 { 306 - reservation_object_lock(vma->resv, NULL); 306 + dma_resv_lock(vma->resv, NULL); 307 307 } 308 308 309 309 static inline void i915_vma_unlock(struct i915_vma *vma) 310 310 { 311 - reservation_object_unlock(vma->resv); 311 + dma_resv_unlock(vma->resv); 312 312 } 313 313 314 314 int __i915_vma_do_pin(struct i915_vma *vma,
+4 -7
drivers/gpu/drm/imx/imx-ldb.c
··· 124 124 static int imx_ldb_connector_get_modes(struct drm_connector *connector) 125 125 { 126 126 struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector); 127 - int num_modes = 0; 127 + int num_modes; 128 128 129 - if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs && 130 - imx_ldb_ch->panel->funcs->get_modes) { 131 - num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel); 132 - if (num_modes > 0) 133 - return num_modes; 134 - } 129 + num_modes = drm_panel_get_modes(imx_ldb_ch->panel); 130 + if (num_modes > 0) 131 + return num_modes; 135 132 136 133 if (!imx_ldb_ch->edid && imx_ldb_ch->ddc) 137 134 imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
+4 -7
drivers/gpu/drm/imx/parallel-display.c
··· 47 47 { 48 48 struct imx_parallel_display *imxpd = con_to_imxpd(connector); 49 49 struct device_node *np = imxpd->dev->of_node; 50 - int num_modes = 0; 50 + int num_modes; 51 51 52 - if (imxpd->panel && imxpd->panel->funcs && 53 - imxpd->panel->funcs->get_modes) { 54 - num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); 55 - if (num_modes > 0) 56 - return num_modes; 57 - } 52 + num_modes = drm_panel_get_modes(imxpd->panel); 53 + if (num_modes > 0) 54 + return num_modes; 58 55 59 56 if (imxpd->edid) { 60 57 drm_connector_update_edid_property(connector, imxpd->edid);
+4 -4
drivers/gpu/drm/lima/lima_gem.c
··· 136 136 int err = 0; 137 137 138 138 if (!write) { 139 - err = reservation_object_reserve_shared(bo->gem.resv, 1); 139 + err = dma_resv_reserve_shared(bo->gem.resv, 1); 140 140 if (err) 141 141 return err; 142 142 } ··· 296 296 297 297 for (i = 0; i < submit->nr_bos; i++) { 298 298 if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) 299 - reservation_object_add_excl_fence(bos[i]->gem.resv, fence); 299 + dma_resv_add_excl_fence(bos[i]->gem.resv, fence); 300 300 else 301 - reservation_object_add_shared_fence(bos[i]->gem.resv, fence); 301 + dma_resv_add_shared_fence(bos[i]->gem.resv, fence); 302 302 } 303 303 304 304 lima_gem_unlock_bos(bos, submit->nr_bos, &ctx); ··· 341 341 342 342 timeout = drm_timeout_abs_to_jiffies(timeout_ns); 343 343 344 - ret = drm_gem_reservation_object_wait(file, handle, write, timeout); 344 + ret = drm_gem_dma_resv_wait(file, handle, write, timeout); 345 345 if (ret == 0) 346 346 ret = timeout ? -ETIMEDOUT : -EBUSY; 347 347
+1 -1
drivers/gpu/drm/mediatek/mtk_drm_fb.c
··· 4 4 */ 5 5 6 6 #include <linux/dma-buf.h> 7 - #include <linux/reservation.h> 7 + #include <linux/dma-resv.h> 8 8 9 9 #include <drm/drm_modeset_helper.h> 10 10 #include <drm/drm_fb_helper.h>
+11 -6
drivers/gpu/drm/meson/meson_crtc.c
··· 265 265 266 266 static void meson_g12a_crtc_enable_vd1(struct meson_drm *priv) 267 267 { 268 - writel_relaxed(((1 << 16) | /* post bld premult*/ 269 - (1 << 8) | /* post src */ 270 - (1 << 4) | /* pre bld premult*/ 271 - (1 << 0)), 272 - priv->io_base + _REG(VD1_BLEND_SRC_CTRL)); 268 + writel_relaxed(VD_BLEND_PREBLD_SRC_VD1 | 269 + VD_BLEND_PREBLD_PREMULT_EN | 270 + VD_BLEND_POSTBLD_SRC_VD1 | 271 + VD_BLEND_POSTBLD_PREMULT_EN, 272 + priv->io_base + _REG(VD1_BLEND_SRC_CTRL)); 273 273 } 274 274 275 275 void meson_crtc_irq(struct meson_drm *priv) ··· 487 487 writel_relaxed(priv->viu.vd1_range_map_cr, 488 488 priv->io_base + meson_crtc->viu_offset + 489 489 _REG(VD1_IF0_RANGE_MAP_CR)); 490 - writel_relaxed(0x78404, 490 + writel_relaxed(VPP_VSC_BANK_LENGTH(4) | 491 + VPP_HSC_BANK_LENGTH(4) | 492 + VPP_SC_VD_EN_ENABLE | 493 + VPP_SC_TOP_EN_ENABLE | 494 + VPP_SC_HSC_EN_ENABLE | 495 + VPP_SC_VSC_EN_ENABLE, 491 496 priv->io_base + _REG(VPP_SC_MISC)); 492 497 writel_relaxed(priv->viu.vpp_pic_in_height, 493 498 priv->io_base + _REG(VPP_PIC_IN_HEIGHT));
+22 -4
drivers/gpu/drm/meson/meson_drv.c
··· 140 140 141 141 static void meson_vpu_init(struct meson_drm *priv) 142 142 { 143 - writel_relaxed(0x210000, priv->io_base + _REG(VPU_RDARB_MODE_L1C1)); 144 - writel_relaxed(0x10000, priv->io_base + _REG(VPU_RDARB_MODE_L1C2)); 145 - writel_relaxed(0x900000, priv->io_base + _REG(VPU_RDARB_MODE_L2C1)); 146 - writel_relaxed(0x20000, priv->io_base + _REG(VPU_WRARB_MODE_L2C1)); 143 + u32 value; 144 + 145 + /* 146 + * Slave dc0 and dc5 connected to master port 1. 147 + * By default other slaves are connected to master port 0. 148 + */ 149 + value = VPU_RDARB_SLAVE_TO_MASTER_PORT(0, 1) | 150 + VPU_RDARB_SLAVE_TO_MASTER_PORT(5, 1); 151 + writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L1C1)); 152 + 153 + /* Slave dc0 connected to master port 1 */ 154 + value = VPU_RDARB_SLAVE_TO_MASTER_PORT(0, 1); 155 + writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L1C2)); 156 + 157 + /* Slave dc4 and dc7 connected to master port 1 */ 158 + value = VPU_RDARB_SLAVE_TO_MASTER_PORT(4, 1) | 159 + VPU_RDARB_SLAVE_TO_MASTER_PORT(7, 1); 160 + writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L2C1)); 161 + 162 + /* Slave dc1 connected to master port 1 */ 163 + value = VPU_RDARB_SLAVE_TO_MASTER_PORT(1, 1); 164 + writel_relaxed(value, priv->io_base + _REG(VPU_WRARB_MODE_L2C1)); 147 165 } 148 166 149 167 static void meson_remove_framebuffers(void)
+2
drivers/gpu/drm/meson/meson_dw_hdmi.c
··· 429 429 /* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */ 430 430 dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL, 431 431 0x3, 0x3); 432 + 433 + /* Enable cec_clk and hdcp22_tmdsclk_en */ 432 434 dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL, 433 435 0x3 << 4, 0x3 << 4); 434 436
+8 -4
drivers/gpu/drm/meson/meson_dw_hdmi.h
··· 100 100 #define HDMITX_TOP_INTR_RXSENSE_RISE BIT(6) 101 101 #define HDMITX_TOP_INTR_RXSENSE_FALL BIT(7) 102 102 103 - /* Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data; 103 + /* 104 + * Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data; 104 105 * 3'b010=Output PRBS data; 3'b100=Output shift pattern. Default 0. 105 106 * Bit 11: 9 RW shift_pttn_repeat: 0=New pattern every clk cycle; 1=New pattern 106 107 * every 2 clk cycles; ...; 7=New pattern every 8 clk cycles. Default 0. ··· 136 135 /* Bit 9: 0 RW tmds_clk_pttn[29:20]. Default 0. */ 137 136 #define HDMITX_TOP_TMDS_CLK_PTTN_23 (0x00B) 138 137 139 - /* Bit 1 RW shift_tmds_clk_pttn:1=Enable shifting clk pattern, 138 + /* 139 + * Bit 1 RW shift_tmds_clk_pttn:1=Enable shifting clk pattern, 140 140 * used when TMDS CLK rate = TMDS character rate /4. Default 0. 141 141 * Bit 0 R Reserved. Default 0. 142 142 * [ 1] shift_tmds_clk_pttn ··· 145 143 */ 146 144 #define HDMITX_TOP_TMDS_CLK_PTTN_CNTL (0x00C) 147 145 148 - /* Bit 0 RW revocmem_wr_fail: Read back 1 to indicate Host write REVOC MEM 146 + /* 147 + * Bit 0 RW revocmem_wr_fail: Read back 1 to indicate Host write REVOC MEM 149 148 * failure, write 1 to clear the failure flag. Default 0. 150 149 */ 151 150 #define HDMITX_TOP_REVOCMEM_STAT (0x00D) 152 151 153 - /* Bit 1 R filtered RxSense status 152 + /* 153 + * Bit 1 R filtered RxSense status 154 154 * Bit 0 R filtered HPD status. 155 155 */ 156 156 #define HDMITX_TOP_STAT0 (0x00E)
+1 -1
drivers/gpu/drm/meson/meson_plane.c
··· 328 328 329 329 /* Disable OSD1 */ 330 330 if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) 331 - writel_bits_relaxed(3 << 8, 0, 331 + writel_bits_relaxed(VIU_OSD1_POSTBLD_SRC_OSD1, 0, 332 332 priv->io_base + _REG(OSD1_BLEND_SRC_CTRL)); 333 333 else 334 334 writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
+118 -18
drivers/gpu/drm/meson/meson_registers.h
··· 12 12 #define _REG(reg) ((reg) << 2) 13 13 14 14 #define writel_bits_relaxed(mask, val, addr) \ 15 - writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr) 15 + writel_relaxed((readl_relaxed(addr) & ~(mask)) | ((val) & (mask)), addr) 16 16 17 17 /* vpp2 */ 18 18 #define VPP2_DUMMY_DATA 0x1900 ··· 138 138 #define VIU_ADDR_START 0x1a00 139 139 #define VIU_ADDR_END 0x1aff 140 140 #define VIU_SW_RESET 0x1a01 141 + #define VIU_SW_RESET_OSD1 BIT(0) 141 142 #define VIU_MISC_CTRL0 0x1a06 143 + #define VIU_CTRL0_VD1_AFBC_MASK 0x170000 142 144 #define VIU_MISC_CTRL1 0x1a07 143 145 #define D2D3_INTF_LENGTH 0x1a08 144 146 #define D2D3_INTF_CTRL0 0x1a09 145 147 #define VIU_OSD1_CTRL_STAT 0x1a10 148 + #define VIU_OSD1_OSD_BLK_ENABLE BIT(0) 149 + #define VIU_OSD1_POSTBLD_SRC_VD1 (1 << 8) 150 + #define VIU_OSD1_POSTBLD_SRC_VD2 (2 << 8) 151 + #define VIU_OSD1_POSTBLD_SRC_OSD1 (3 << 8) 152 + #define VIU_OSD1_POSTBLD_SRC_OSD2 (4 << 8) 153 + #define VIU_OSD1_OSD_ENABLE BIT(21) 146 154 #define VIU_OSD1_CTRL_STAT2 0x1a2d 147 155 #define VIU_OSD1_COLOR_ADDR 0x1a11 148 156 #define VIU_OSD1_COLOR 0x1a12 ··· 239 231 #define VIU_OSD3_PROT_CTRL 0x3d9e 240 232 #define VIU_OSD3_MALI_UNPACK_CTRL 0x3d9f 241 233 #define VIU_OSD3_DIMM_CTRL 0x3da0 234 + 235 + #define VIU_OSD_DDR_PRIORITY_URGENT BIT(0) 236 + #define VIU_OSD_HOLD_FIFO_LINES(lines) ((lines & 0x1f) << 5) 237 + #define VIU_OSD_FIFO_DEPTH_VAL(val) ((val & 0x7f) << 12) 238 + #define VIU_OSD_WORDS_PER_BURST(words) (((words & 0x4) >> 1) << 22) 239 + #define VIU_OSD_FIFO_LIMITS(size) ((size & 0xf) << 24) 242 240 243 241 #define VD1_IF0_GEN_REG 0x1a50 244 242 #define VD1_IF0_CANVAS0 0x1a51 ··· 355 341 #define VPP_LINE_IN_LENGTH 0x1d01 356 342 #define VPP_PIC_IN_HEIGHT 0x1d02 357 343 #define VPP_SCALE_COEF_IDX 0x1d03 344 + #define VPP_SCALE_HORIZONTAL_COEF BIT(8) 358 345 #define VPP_SCALE_COEF 0x1d04 359 346 #define VPP_VSC_REGION12_STARTP 0x1d05 360 347 #define VPP_VSC_REGION34_STARTP 0x1d06 ··· 377 362 #define VPP_HSC_REGION4_PHASE_SLOPE 0x1d17 378 363 #define VPP_HSC_PHASE_CTRL 0x1d18 379 364 #define VPP_SC_MISC 0x1d19 365 + #define VPP_SC_VD_EN_ENABLE BIT(15) 366 + #define VPP_SC_TOP_EN_ENABLE BIT(16) 367 + #define VPP_SC_HSC_EN_ENABLE BIT(17) 368 + #define VPP_SC_VSC_EN_ENABLE BIT(18) 369 + #define VPP_VSC_BANK_LENGTH(length) (length & 0x7) 370 + #define VPP_HSC_BANK_LENGTH(length) ((length & 0x7) << 8) 380 371 #define VPP_PREBLEND_VD1_H_START_END 0x1d1a 381 372 #define VPP_PREBLEND_VD1_V_START_END 0x1d1b 382 373 #define VPP_POSTBLEND_VD1_H_START_END 0x1d1c ··· 392 371 #define VPP_PREBLEND_H_SIZE 0x1d20 393 372 #define VPP_POSTBLEND_H_SIZE 0x1d21 394 373 #define VPP_HOLD_LINES 0x1d22 374 + #define VPP_POSTBLEND_HOLD_LINES(lines) (lines & 0xf) 375 + #define VPP_PREBLEND_HOLD_LINES(lines) ((lines & 0xf) << 8) 395 376 #define VPP_BLEND_ONECOLOR_CTRL 0x1d23 396 377 #define VPP_PREBLEND_CURRENT_XY 0x1d24 397 378 #define VPP_POSTBLEND_CURRENT_XY 0x1d25 398 379 #define VPP_MISC 0x1d26 399 - #define VPP_PREBLEND_ENABLE BIT(6) 400 - #define VPP_POSTBLEND_ENABLE BIT(7) 401 - #define VPP_OSD2_ALPHA_PREMULT BIT(8) 402 - #define VPP_OSD1_ALPHA_PREMULT BIT(9) 403 - #define VPP_VD1_POSTBLEND BIT(10) 404 - #define VPP_VD2_POSTBLEND BIT(11) 405 - #define VPP_OSD1_POSTBLEND BIT(12) 406 - #define VPP_OSD2_POSTBLEND BIT(13) 407 - #define VPP_VD1_PREBLEND BIT(14) 408 - #define VPP_VD2_PREBLEND BIT(15) 409 - #define VPP_OSD1_PREBLEND BIT(16) 410 - #define VPP_OSD2_PREBLEND BIT(17) 411 - #define VPP_COLOR_MNG_ENABLE BIT(28) 380 + #define VPP_PREBLEND_ENABLE BIT(6) 381 + #define VPP_POSTBLEND_ENABLE BIT(7) 382 + #define VPP_OSD2_ALPHA_PREMULT BIT(8) 383 + #define VPP_OSD1_ALPHA_PREMULT BIT(9) 384 + #define VPP_VD1_POSTBLEND BIT(10) 385 + #define VPP_VD2_POSTBLEND BIT(11) 386 + #define VPP_OSD1_POSTBLEND BIT(12) 387 + #define VPP_OSD2_POSTBLEND BIT(13) 388 + #define VPP_VD1_PREBLEND BIT(14) 389 + #define VPP_VD2_PREBLEND BIT(15) 390 + #define VPP_OSD1_PREBLEND BIT(16) 391 + #define VPP_OSD2_PREBLEND BIT(17) 392 + #define VPP_COLOR_MNG_ENABLE BIT(28) 412 393 #define VPP_OFIFO_SIZE 0x1d27 394 + #define VPP_OFIFO_SIZE_MASK GENMASK(13, 0) 395 + #define VPP_OFIFO_SIZE_DEFAULT (0xfff << 20 | 0x1000) 413 396 #define VPP_FIFO_STATUS 0x1d28 414 397 #define VPP_SMOKE_CTRL 0x1d29 415 398 #define VPP_SMOKE1_VAL 0x1d2a ··· 429 404 #define VPP_HSC_PHASE_CTRL1 0x1d34 430 405 #define VPP_HSC_INI_PAT_CTRL 0x1d35 431 406 #define VPP_VADJ_CTRL 0x1d40 407 + #define VPP_MINUS_BLACK_LVL_VADJ1_ENABLE BIT(1) 408 + 432 409 #define VPP_VADJ1_Y 0x1d41 433 410 #define VPP_VADJ1_MA_MB 0x1d42 434 411 #define VPP_VADJ1_MC_MD 0x1d43 ··· 490 463 #define VPP_PEAKING_VGAIN 0x1d92 491 464 #define VPP_PEAKING_NLP_1 0x1d93 492 465 #define VPP_DOLBY_CTRL 0x1d93 466 + #define VPP_PPS_DUMMY_DATA_MODE (1 << 17) 493 467 #define VPP_PEAKING_NLP_2 0x1d94 494 468 #define VPP_PEAKING_NLP_3 0x1d95 495 469 #define VPP_PEAKING_NLP_4 0x1d96 ··· 621 593 #define OSD34_SCI_WH_M1 0x3d29 622 594 #define OSD34_SCO_H_START_END 0x3d2a 623 595 #define OSD34_SCO_V_START_END 0x3d2b 596 + 624 597 /* viu2 */ 625 598 #define VIU2_ADDR_START 0x1e00 626 599 #define VIU2_ADDR_END 0x1eff ··· 735 706 #define VENC_UPSAMPLE_CTRL0 0x1b64 736 707 #define VENC_UPSAMPLE_CTRL1 0x1b65 737 708 #define VENC_UPSAMPLE_CTRL2 0x1b66 709 + #define VENC_UPSAMPLE_CTRL_F0_2_CLK_RATIO BIT(0) 710 + #define VENC_UPSAMPLE_CTRL_F1_EN BIT(5) 711 + #define VENC_UPSAMPLE_CTRL_F1_UPSAMPLE_EN BIT(6) 712 + #define VENC_UPSAMPLE_CTRL_INTERLACE_HIGH_LUMA (0x0 << 12) 713 + #define VENC_UPSAMPLE_CTRL_CVBS (0x1 << 12) 714 + #define VENC_UPSAMPLE_CTRL_S_VIDEO_LUMA (0x2 << 12) 715 + #define VENC_UPSAMPLE_CTRL_S_VIDEO_CHROMA (0x3 << 12) 716 + #define VENC_UPSAMPLE_CTRL_INTERLACE_PB (0x4 << 12) 717 + #define VENC_UPSAMPLE_CTRL_INTERLACE_PR (0x5 << 12) 718 + #define VENC_UPSAMPLE_CTRL_INTERLACE_R (0x6 << 12) 719 + #define VENC_UPSAMPLE_CTRL_INTERLACE_G (0x7 << 12) 720 + #define VENC_UPSAMPLE_CTRL_INTERLACE_B (0x8 << 12) 721 + #define VENC_UPSAMPLE_CTRL_PROGRESSIVE_Y (0x9 << 12) 722 + #define VENC_UPSAMPLE_CTRL_PROGRESSIVE_PB (0xa << 12) 723 + #define VENC_UPSAMPLE_CTRL_PROGRESSIVE_PR (0xb << 12) 724 + #define VENC_UPSAMPLE_CTRL_PROGRESSIVE_R (0xc << 12) 725 + #define VENC_UPSAMPLE_CTRL_PROGRESSIVE_G (0xd << 12) 726 + #define VENC_UPSAMPLE_CTRL_PROGRESSIVE_B (0xe << 12) 727 + #define VENC_UPSAMPLE_CTRL_VDAC_TEST_VALUE (0xf << 12) 738 728 #define TCON_INVERT_CTL 0x1b67 739 729 #define VENC_VIDEO_PROG_MODE 0x1b68 740 730 #define VENC_ENCI_LINE 0x1b69 ··· 762 714 #define VENC_ENCP_PIXEL 0x1b6c 763 715 #define VENC_STATA 0x1b6d 764 716 #define VENC_INTCTRL 0x1b6e 717 + #define VENC_INTCTRL_ENCI_LNRST_INT_EN BIT(1) 765 718 #define VENC_INTFLAG 0x1b6f 766 719 #define VENC_VIDEO_TST_EN 0x1b70 767 720 #define VENC_VIDEO_TST_MDSEL 0x1b71 ··· 773 724 #define VENC_VIDEO_TST_CLRBAR_WIDTH 0x1b76 774 725 #define VENC_VIDEO_TST_VDCNT_STSET 0x1b77 775 726 #define VENC_VDAC_DACSEL0 0x1b78 727 + #define VENC_VDAC_SEL_ATV_DMD BIT(5) 776 728 #define VENC_VDAC_DACSEL1 0x1b79 777 729 #define VENC_VDAC_DACSEL2 0x1b7a 778 730 #define VENC_VDAC_DACSEL3 0x1b7b ··· 794 744 #define VENC_VDAC_DAC5_GAINCTRL 0x1bfa 795 745 #define VENC_VDAC_DAC5_OFFSET 0x1bfb 796 746 #define VENC_VDAC_FIFO_CTRL 0x1bfc 747 + #define VENC_VDAC_FIFO_EN_ENCI_ENABLE BIT(13) 797 748 #define ENCL_TCON_INVERT_CTL 0x1bfd 798 749 #define ENCP_VIDEO_EN 0x1b80 799 750 #define ENCP_VIDEO_SYNC_MODE 0x1b81 ··· 810 759 #define ENCP_VIDEO_SYNC_OFFST 0x1b8b 811 760 #define ENCP_VIDEO_MACV_OFFST 0x1b8c 812 761 #define ENCP_VIDEO_MODE 0x1b8d 762 + #define ENCP_VIDEO_MODE_DE_V_HIGH BIT(14) 813 763 #define ENCP_VIDEO_MODE_ADV 0x1b8e 814 764 #define ENCP_DBG_PX_RST 0x1b90 815 765 #define ENCP_DBG_LN_RST 0x1b91 ··· 889 837 #define C656_FS_LNED 0x1be7 890 838 #define ENCI_VIDEO_MODE 0x1b00 891 839 #define ENCI_VIDEO_MODE_ADV 0x1b01 840 + #define ENCI_VIDEO_MODE_ADV_DMXMD(val) (val & 0x3) 841 + #define ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 BIT(2) 842 + #define ENCI_VIDEO_MODE_ADV_YBW_MEDIUM (0 << 4) 843 + #define ENCI_VIDEO_MODE_ADV_YBW_LOW (0x1 << 4) 844 + #define ENCI_VIDEO_MODE_ADV_YBW_HIGH (0x2 << 4) 892 845 #define ENCI_VIDEO_FSC_ADJ 0x1b02 893 846 #define ENCI_VIDEO_BRIGHT 0x1b03 894 847 #define ENCI_VIDEO_CONT 0x1b04 ··· 964 907 #define ENCI_DBG_MAXPX 0x1b4c 965 908 #define ENCI_DBG_MAXLN 0x1b4d 966 909 #define ENCI_MACV_MAX_AMP 0x1b50 910 + #define ENCI_MACV_MAX_AMP_ENABLE_CHANGE BIT(15) 911 + #define ENCI_MACV_MAX_AMP_VAL(val) (val & 0x83ff) 967 912 #define ENCI_MACV_PULSE_LO 0x1b51 968 913 #define ENCI_MACV_PULSE_HI 0x1b52 969 914 #define ENCI_MACV_BKP_MAX 0x1b53 970 915 #define ENCI_CFILT_CTRL 0x1b54 916 + #define ENCI_CFILT_CMPT_SEL_HIGH BIT(1) 971 917 #define ENCI_CFILT7 0x1b55 972 918 #define ENCI_YC_DELAY 0x1b56 973 919 #define ENCI_VIDEO_EN 0x1b57 920 + #define ENCI_VIDEO_EN_ENABLE BIT(0) 974 921 #define ENCI_DVI_HSO_BEGIN 0x1c00 975 922 #define ENCI_DVI_HSO_END 0x1c01 976 923 #define ENCI_DVI_VSO_BLINE_EVN 0x1c02 ··· 986 925 #define ENCI_DVI_VSO_END_EVN 0x1c08 987 926 #define ENCI_DVI_VSO_END_ODD 0x1c09 988 927 #define ENCI_CFILT_CTRL2 0x1c0a 928 + #define ENCI_CFILT_CMPT_CR_DLY(delay) (delay & 0xf) 929 + #define ENCI_CFILT_CMPT_CB_DLY(delay) ((delay & 0xf) << 4) 930 + #define ENCI_CFILT_CVBS_CR_DLY(delay) ((delay & 0xf) << 8) 931 + #define ENCI_CFILT_CVBS_CB_DLY(delay) ((delay & 0xf) << 12) 989 932 #define ENCI_DACSEL_0 0x1c0b 990 933 #define ENCI_DACSEL_1 0x1c0c 991 934 #define ENCP_DACSEL_0 0x1c0d ··· 1004 939 #define ENCI_TST_CLRBAR_WIDTH 0x1c16 1005 940 #define ENCI_TST_VDCNT_STSET 0x1c17 1006 941 #define ENCI_VFIFO2VD_CTL 0x1c18 942 + #define ENCI_VFIFO2VD_CTL_ENABLE BIT(0) 943 + #define ENCI_VFIFO2VD_CTL_VD_SEL(val) ((val & 0xff) << 8) 1007 944 #define ENCI_VFIFO2VD_PIXEL_START 0x1c19 1008 945 #define ENCI_VFIFO2VD_PIXEL_END 0x1c1a 1009 946 #define ENCI_VFIFO2VD_LINE_TOP_START 0x1c1b ··· 1068 1001 #define VENC_VDAC_DAC5_FILT_CTRL0 0x1c56 1069 1002 #define VENC_VDAC_DAC5_FILT_CTRL1 0x1c57 1070 1003 #define VENC_VDAC_DAC0_FILT_CTRL0 0x1c58 1004 + #define VENC_VDAC_DAC0_FILT_CTRL0_EN BIT(0) 1071 1005 #define VENC_VDAC_DAC0_FILT_CTRL1 0x1c59 1072 1006 #define VENC_VDAC_DAC1_FILT_CTRL0 0x1c5a 1073 1007 #define VENC_VDAC_DAC1_FILT_CTRL1 0x1c5b ··· 1474 1406 #define VIU2_SEL_VENC_ENCP (2 << 2) 1475 1407 #define VIU2_SEL_VENC_ENCT (3 << 2) 1476 1408 #define VPU_HDMI_SETTING 0x271b 1409 + #define VPU_HDMI_ENCI_DATA_TO_HDMI BIT(0) 1410 + #define VPU_HDMI_ENCP_DATA_TO_HDMI BIT(1) 1411 + #define VPU_HDMI_INV_HSYNC BIT(2) 1412 + #define VPU_HDMI_INV_VSYNC BIT(3) 1413 + #define VPU_HDMI_OUTPUT_CRYCB (0 << 5) 1414 + #define VPU_HDMI_OUTPUT_YCBCR (1 << 5) 1415 + #define VPU_HDMI_OUTPUT_YCRCB (2 << 5) 1416 + #define VPU_HDMI_OUTPUT_CBCRY (3 << 5) 1417 + #define VPU_HDMI_OUTPUT_CBYCR (4 << 5) 1418 + #define VPU_HDMI_OUTPUT_CRCBY (5 << 5) 1419 + #define VPU_HDMI_WR_RATE(rate) (((rate & 0x1f) - 1) << 8) 1420 + #define VPU_HDMI_RD_RATE(rate) (((rate & 0x1f) - 1) << 12) 1477 1421 #define ENCI_INFO_READ 0x271c 1478 1422 #define ENCP_INFO_READ 0x271d 1479 1423 #define ENCT_INFO_READ 0x271e ··· 1562 1482 #define VPU_RDARB_MODE_L1C2 0x2799 1563 1483 #define VPU_RDARB_MODE_L2C1 0x279d 1564 1484 #define VPU_WRARB_MODE_L2C1 0x27a2 1485 + #define VPU_RDARB_SLAVE_TO_MASTER_PORT(dc, port) (port << (16 + dc)) 1565 1486 1566 1487 /* osd super scale */ 1567 1488 #define OSDSR_HV_SIZEIN 0x3130 ··· 1604 1523 #define OSD1_AFBCD_STATUS 0x31a8 1605 1524 #define OSD1_AFBCD_PIXEL_HSCOPE 0x31a9 1606 1525 #define OSD1_AFBCD_PIXEL_VSCOPE 0x31aa 1607 - #define VIU_MISC_CTRL1 0x1a07 1608 1526 1609 1527 /* add for gxm and 962e dv core2 */ 1610 1528 #define DOLBY_CORE2A_SWAP_CTRL1 0x3434 ··· 1618 1538 #define VPU_MAFBC_COMMAND 0x3a05 1619 1539 #define VPU_MAFBC_STATUS 0x3a06 1620 1540 #define VPU_MAFBC_SURFACE_CFG 0x3a07 1621 - 1622 - /* osd afbc on g12a */ 1623 1541 #define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0 0x3a10 1624 1542 #define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0 0x3a11 1625 1543 #define VPU_MAFBC_FORMAT_SPECIFIER_S0 0x3a12 ··· 1675 1597 #define VPU_MAFBC_PREFETCH_CFG_S3 0x3a7c 1676 1598 1677 1599 #define DOLBY_PATH_CTRL 0x1a0c 1600 + #define DOLBY_BYPASS_EN(val) (val & 0xf) 1678 1601 #define OSD_PATH_MISC_CTRL 0x1a0e 1679 1602 #define MALI_AFBCD_TOP_CTRL 0x1a0f 1680 1603 1681 1604 #define VIU_OSD_BLEND_CTRL 0x39b0 1605 + #define VIU_OSD_BLEND_REORDER(dest, src) ((src) << (dest * 4)) 1606 + #define VIU_OSD_BLEND_DIN_EN(bits) ((bits & 0xf) << 20) 1607 + #define VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 BIT(24) 1608 + #define VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 BIT(25) 1609 + #define VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 BIT(26) 1610 + #define VIU_OSD_BLEND_BLEN2_PREMULT_EN(input) ((input & 0x3) << 27) 1611 + #define VIU_OSD_BLEND_HOLD_LINES(lines) ((lines & 0x7) << 29) 1682 1612 #define VIU_OSD_BLEND_CTRL1 0x39c0 1683 1613 #define VIU_OSD_BLEND_DIN0_SCOPE_H 0x39b1 1684 1614 #define VIU_OSD_BLEND_DIN0_SCOPE_V 0x39b2 ··· 1716 1630 #define VPP_SLEEP_CTRL 0x1dfa 1717 1631 #define VD1_BLEND_SRC_CTRL 0x1dfb 1718 1632 #define VD2_BLEND_SRC_CTRL 0x1dfc 1633 + #define VD_BLEND_PREBLD_SRC_VD1 (1 << 0) 1634 + #define VD_BLEND_PREBLD_SRC_VD2 (2 << 0) 1635 + #define VD_BLEND_PREBLD_SRC_OSD1 (3 << 0) 1636 + #define VD_BLEND_PREBLD_SRC_OSD2 (4 << 0) 1637 + #define VD_BLEND_PREBLD_PREMULT_EN BIT(4) 1638 + #define VD_BLEND_POSTBLD_SRC_VD1 (1 << 8) 1639 + #define VD_BLEND_POSTBLD_SRC_VD2 (2 << 8) 1640 + #define VD_BLEND_POSTBLD_SRC_OSD1 (3 << 8) 1641 + #define VD_BLEND_POSTBLD_SRC_OSD2 (4 << 8) 1642 + #define VD_BLEND_POSTBLD_PREMULT_EN BIT(16) 1719 1643 #define OSD1_BLEND_SRC_CTRL 0x1dfd 1720 1644 #define OSD2_BLEND_SRC_CTRL 0x1dfe 1645 + #define OSD_BLEND_POSTBLD_SRC_VD1 (1 << 8) 1646 + #define OSD_BLEND_POSTBLD_SRC_VD2 (2 << 8) 1647 + #define OSD_BLEND_POSTBLD_SRC_OSD1 (3 << 8) 1648 + #define OSD_BLEND_POSTBLD_SRC_OSD2 (4 << 8) 1649 + #define OSD_BLEND_PATH_SEL_ENABLE BIT(20) 1721 1650 1722 1651 #define VPP_POST_BLEND_BLEND_DUMMY_DATA 0x3968 1723 1652 #define VPP_POST_BLEND_DUMMY_ALPHA 0x3969 1724 1653 #define VPP_RDARB_MODE 0x3978 1725 1654 #define VPP_RDARB_REQEN_SLV 0x3979 1726 - #define VPU_RDARB_MODE_L2C1 0x279d 1727 1655 1728 1656 #endif /* __MESON_REGISTERS_H */
+5 -2
drivers/gpu/drm/meson/meson_vclk.c
··· 97 97 #define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ 98 98 99 99 #define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */ 100 + #define HHI_HDMI_PLL_CNTL_EN BIT(30) 100 101 #define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */ 101 102 #define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */ 102 103 #define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */ ··· 470 469 471 470 /* Enable and unreset */ 472 471 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, 473 - 0x7 << 28, 0x4 << 28); 472 + 0x7 << 28, HHI_HDMI_PLL_CNTL_EN); 474 473 475 474 /* Poll for lock bit */ 476 475 regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, ··· 497 496 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x0b3a0400 | m); 498 497 499 498 /* Enable and reset */ 499 + /* TODO: add specific macro for g12a here */ 500 500 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, 501 501 0x3 << 28, 0x3 << 28); 502 502 ··· 972 970 meson_venci_cvbs_clock_config(priv); 973 971 return; 974 972 } else if (target == MESON_VCLK_TARGET_DMT) { 975 - /* The DMT clock path is fixed after the PLL: 973 + /* 974 + * The DMT clock path is fixed after the PLL: 976 975 * - automatic PLL freq + OD management 977 976 * - vid_pll_div = VID_PLL_DIV_5 978 977 * - vclk_div = 2
+132 -37
drivers/gpu/drm/meson/meson_venc.c
··· 61 61 /* HHI Registers */ 62 62 #define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */ 63 63 #define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ 64 - #define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */ 64 + #define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbb offset in data sheet */ 65 65 #define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ 66 - #define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */ 66 + #define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbc offset in data sheet */ 67 67 #define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */ 68 68 69 69 struct meson_cvbs_enci_mode meson_cvbs_enci_pal = { ··· 192 192 .hso_end = 129, 193 193 .vso_even = 3, 194 194 .vso_odd = 260, 195 - .macv_max_amp = 0x810b, 195 + .macv_max_amp = 0xb, 196 196 .video_prog_mode = 0xf0, 197 197 .video_mode = 0x8, 198 198 .sch_adjust = 0x20, ··· 212 212 .hso_end = 129, 213 213 .vso_even = 3, 214 214 .vso_odd = 260, 215 - .macv_max_amp = 8107, 215 + .macv_max_amp = 0x7, 216 216 .video_prog_mode = 0xff, 217 217 .video_mode = 0x13, 218 218 .sch_adjust = 0x28, ··· 976 976 unsigned int eof_lines; 977 977 unsigned int sof_lines; 978 978 unsigned int vsync_lines; 979 + u32 reg; 979 980 980 981 /* Use VENCI for 480i and 576i and double HDMI pixels */ 981 982 if (mode->flags & DRM_MODE_FLAG_DBLCLK) { ··· 1049 1048 unsigned int lines_f1; 1050 1049 1051 1050 /* CVBS Filter settings */ 1052 - writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL)); 1053 - writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL2)); 1051 + writel_relaxed(ENCI_CFILT_CMPT_SEL_HIGH | 0x10, 1052 + priv->io_base + _REG(ENCI_CFILT_CTRL)); 1053 + writel_relaxed(ENCI_CFILT_CMPT_CR_DLY(2) | 1054 + ENCI_CFILT_CMPT_CB_DLY(1), 1055 + priv->io_base + _REG(ENCI_CFILT_CTRL2)); 1054 1056 1055 1057 /* Digital Video Select : Interlace, clk27 clk, external */ 1056 1058 writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING)); ··· 1075 1071 priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN)); 1076 1072 1077 1073 /* Macrovision max amplitude change */ 1078 - writel_relaxed(vmode->enci.macv_max_amp, 1079 - priv->io_base + _REG(ENCI_MACV_MAX_AMP)); 1074 + writel_relaxed(ENCI_MACV_MAX_AMP_ENABLE_CHANGE | 1075 + ENCI_MACV_MAX_AMP_VAL(vmode->enci.macv_max_amp), 1076 + priv->io_base + _REG(ENCI_MACV_MAX_AMP)); 1080 1077 1081 1078 /* Video mode */ 1082 1079 writel_relaxed(vmode->enci.video_prog_mode, ··· 1085 1080 writel_relaxed(vmode->enci.video_mode, 1086 1081 priv->io_base + _REG(ENCI_VIDEO_MODE)); 1087 1082 1088 - /* Advanced Video Mode : 1083 + /* 1084 + * Advanced Video Mode : 1089 1085 * Demux shifting 0x2 1090 1086 * Blank line end at line17/22 1091 1087 * High bandwidth Luma Filter ··· 1094 1088 * Bypass luma low pass filter 1095 1089 * No macrovision on CSYNC 1096 1090 */ 1097 - writel_relaxed(0x26, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV)); 1091 + writel_relaxed(ENCI_VIDEO_MODE_ADV_DMXMD(2) | 1092 + ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 | 1093 + ENCI_VIDEO_MODE_ADV_YBW_HIGH, 1094 + priv->io_base + _REG(ENCI_VIDEO_MODE_ADV)); 1098 1095 1099 1096 writel(vmode->enci.sch_adjust, 1100 1097 priv->io_base + _REG(ENCI_VIDEO_SCH)); ··· 1113 1104 /* UNreset Interlaced TV Encoder */ 1114 1105 writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST)); 1115 1106 1116 - /* Enable Vfifo2vd, Y_Cb_Y_Cr select */ 1117 - writel_relaxed(0x4e01, priv->io_base + _REG(ENCI_VFIFO2VD_CTL)); 1107 + /* 1108 + * Enable Vfifo2vd and set Y_Cb_Y_Cr: 1109 + * Corresponding value: 1110 + * Y => 00 or 10 1111 + * Cb => 01 1112 + * Cr => 11 1113 + * Ex: 0x4e => 01001110 would mean Cb/Y/Cr/Y 1114 + */ 1115 + writel_relaxed(ENCI_VFIFO2VD_CTL_ENABLE | 1116 + ENCI_VFIFO2VD_CTL_VD_SEL(0x4e), 1117 + priv->io_base + _REG(ENCI_VFIFO2VD_CTL)); 1118 1118 1119 1119 /* Timings */ 1120 1120 writel_relaxed(vmode->enci.pixel_start, ··· 1145 1127 meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI); 1146 1128 1147 1129 /* Interlace video enable */ 1148 - writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN)); 1130 + writel_relaxed(ENCI_VIDEO_EN_ENABLE, 1131 + priv->io_base + _REG(ENCI_VIDEO_EN)); 1149 1132 1150 1133 lines_f0 = mode->vtotal >> 1; 1151 1134 lines_f1 = lines_f0 + 1; ··· 1393 1374 writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN)); 1394 1375 1395 1376 /* Set DE signal’s polarity is active high */ 1396 - writel_bits_relaxed(BIT(14), BIT(14), 1377 + writel_bits_relaxed(ENCP_VIDEO_MODE_DE_V_HIGH, 1378 + ENCP_VIDEO_MODE_DE_V_HIGH, 1397 1379 priv->io_base + _REG(ENCP_VIDEO_MODE)); 1398 1380 1399 1381 /* Program DE timing */ ··· 1513 1493 meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCP); 1514 1494 } 1515 1495 1516 - writel_relaxed((use_enci ? 1 : 2) | 1517 - (mode->flags & DRM_MODE_FLAG_PHSYNC ? 1 << 2 : 0) | 1518 - (mode->flags & DRM_MODE_FLAG_PVSYNC ? 1 << 3 : 0) | 1519 - 4 << 5 | 1520 - (venc_repeat ? 1 << 8 : 0) | 1521 - (hdmi_repeat ? 1 << 12 : 0), 1522 - priv->io_base + _REG(VPU_HDMI_SETTING)); 1496 + /* Set VPU HDMI setting */ 1497 + /* Select ENCP or ENCI data to HDMI */ 1498 + if (use_enci) 1499 + reg = VPU_HDMI_ENCI_DATA_TO_HDMI; 1500 + else 1501 + reg = VPU_HDMI_ENCP_DATA_TO_HDMI; 1502 + 1503 + /* Invert polarity of HSYNC from VENC */ 1504 + if (mode->flags & DRM_MODE_FLAG_PHSYNC) 1505 + reg |= VPU_HDMI_INV_HSYNC; 1506 + 1507 + /* Invert polarity of VSYNC from VENC */ 1508 + if (mode->flags & DRM_MODE_FLAG_PVSYNC) 1509 + reg |= VPU_HDMI_INV_VSYNC; 1510 + 1511 + /* Output data format: CbYCr */ 1512 + reg |= VPU_HDMI_OUTPUT_CBYCR; 1513 + 1514 + /* 1515 + * Write rate to the async FIFO between VENC and HDMI. 1516 + * One write every 2 wr_clk. 1517 + */ 1518 + if (venc_repeat) 1519 + reg |= VPU_HDMI_WR_RATE(2); 1520 + 1521 + /* 1522 + * Read rate to the async FIFO between VENC and HDMI. 1523 + * One read every 2 wr_clk. 1524 + */ 1525 + if (hdmi_repeat) 1526 + reg |= VPU_HDMI_RD_RATE(2); 1527 + 1528 + writel_relaxed(reg, priv->io_base + _REG(VPU_HDMI_SETTING)); 1523 1529 1524 1530 priv->venc.hdmi_repeat = hdmi_repeat; 1525 1531 priv->venc.venc_repeat = venc_repeat; ··· 1558 1512 void meson_venci_cvbs_mode_set(struct meson_drm *priv, 1559 1513 struct meson_cvbs_enci_mode *mode) 1560 1514 { 1515 + u32 reg; 1516 + 1561 1517 if (mode->mode_tag == priv->venc.current_mode) 1562 1518 return; 1563 1519 1564 1520 /* CVBS Filter settings */ 1565 - writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL)); 1566 - writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL2)); 1521 + writel_relaxed(ENCI_CFILT_CMPT_SEL_HIGH | 0x10, 1522 + priv->io_base + _REG(ENCI_CFILT_CTRL)); 1523 + writel_relaxed(ENCI_CFILT_CMPT_CR_DLY(2) | 1524 + ENCI_CFILT_CMPT_CB_DLY(1), 1525 + priv->io_base + _REG(ENCI_CFILT_CTRL2)); 1567 1526 1568 1527 /* Digital Video Select : Interlace, clk27 clk, external */ 1569 1528 writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING)); ··· 1590 1539 priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN)); 1591 1540 1592 1541 /* Macrovision max amplitude change */ 1593 - writel_relaxed(0x8100 + mode->macv_max_amp, 1594 - priv->io_base + _REG(ENCI_MACV_MAX_AMP)); 1542 + writel_relaxed(ENCI_MACV_MAX_AMP_ENABLE_CHANGE | 1543 + ENCI_MACV_MAX_AMP_VAL(mode->macv_max_amp), 1544 + priv->io_base + _REG(ENCI_MACV_MAX_AMP)); 1595 1545 1596 1546 /* Video mode */ 1597 1547 writel_relaxed(mode->video_prog_mode, ··· 1600 1548 writel_relaxed(mode->video_mode, 1601 1549 priv->io_base + _REG(ENCI_VIDEO_MODE)); 1602 1550 1603 - /* Advanced Video Mode : 1551 + /* 1552 + * Advanced Video Mode : 1604 1553 * Demux shifting 0x2 1605 1554 * Blank line end at line17/22 1606 1555 * High bandwidth Luma Filter ··· 1609 1556 * Bypass luma low pass filter 1610 1557 * No macrovision on CSYNC 1611 1558 */ 1612 - writel_relaxed(0x26, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV)); 1559 + writel_relaxed(ENCI_VIDEO_MODE_ADV_DMXMD(2) | 1560 + ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 | 1561 + ENCI_VIDEO_MODE_ADV_YBW_HIGH, 1562 + priv->io_base + _REG(ENCI_VIDEO_MODE_ADV)); 1613 1563 1614 1564 writel(mode->sch_adjust, priv->io_base + _REG(ENCI_VIDEO_SCH)); 1615 1565 ··· 1644 1588 /* UNreset Interlaced TV Encoder */ 1645 1589 writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST)); 1646 1590 1647 - /* Enable Vfifo2vd, Y_Cb_Y_Cr select */ 1648 - writel_relaxed(0x4e01, priv->io_base + _REG(ENCI_VFIFO2VD_CTL)); 1591 + /* 1592 + * Enable Vfifo2vd and set Y_Cb_Y_Cr: 1593 + * Corresponding value: 1594 + * Y => 00 or 10 1595 + * Cb => 01 1596 + * Cr => 11 1597 + * Ex: 0x4e => 01001110 would mean Cb/Y/Cr/Y 1598 + */ 1599 + writel_relaxed(ENCI_VFIFO2VD_CTL_ENABLE | 1600 + ENCI_VFIFO2VD_CTL_VD_SEL(0x4e), 1601 + priv->io_base + _REG(ENCI_VFIFO2VD_CTL)); 1649 1602 1650 1603 /* Power UP Dacs */ 1651 1604 writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_SETTING)); 1652 1605 1653 1606 /* Video Upsampling */ 1654 - writel_relaxed(0x0061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL0)); 1655 - writel_relaxed(0x4061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL1)); 1656 - writel_relaxed(0x5061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL2)); 1607 + /* 1608 + * CTRL0, CTRL1 and CTRL2: 1609 + * Filter0: input data sample every 2 cloks 1610 + * Filter1: filtering and upsample enable 1611 + */ 1612 + reg = VENC_UPSAMPLE_CTRL_F0_2_CLK_RATIO | VENC_UPSAMPLE_CTRL_F1_EN | 1613 + VENC_UPSAMPLE_CTRL_F1_UPSAMPLE_EN; 1614 + 1615 + /* 1616 + * Upsample CTRL0: 1617 + * Interlace High Bandwidth Luma 1618 + */ 1619 + writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_HIGH_LUMA | reg, 1620 + priv->io_base + _REG(VENC_UPSAMPLE_CTRL0)); 1621 + 1622 + /* 1623 + * Upsample CTRL1: 1624 + * Interlace Pb 1625 + */ 1626 + writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_PB | reg, 1627 + priv->io_base + _REG(VENC_UPSAMPLE_CTRL1)); 1628 + 1629 + /* 1630 + * Upsample CTRL2: 1631 + * Interlace R 1632 + */ 1633 + writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_PR | reg, 1634 + priv->io_base + _REG(VENC_UPSAMPLE_CTRL2)); 1657 1635 1658 1636 /* Select Interlace Y DACs */ 1659 1637 writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL0)); ··· 1701 1611 meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI); 1702 1612 1703 1613 /* Enable ENCI FIFO */ 1704 - writel_relaxed(0x2000, priv->io_base + _REG(VENC_VDAC_FIFO_CTRL)); 1614 + writel_relaxed(VENC_VDAC_FIFO_EN_ENCI_ENABLE, 1615 + priv->io_base + _REG(VENC_VDAC_FIFO_CTRL)); 1705 1616 1706 1617 /* Select ENCI DACs 0, 1, 4, and 5 */ 1707 1618 writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_0)); 1708 1619 writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_1)); 1709 1620 1710 1621 /* Interlace video enable */ 1711 - writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN)); 1622 + writel_relaxed(ENCI_VIDEO_EN_ENABLE, 1623 + priv->io_base + _REG(ENCI_VIDEO_EN)); 1712 1624 1713 1625 /* Configure Video Saturation / Contrast / Brightness / Hue */ 1714 1626 writel_relaxed(mode->video_saturation, ··· 1723 1631 priv->io_base + _REG(ENCI_VIDEO_HUE)); 1724 1632 1725 1633 /* Enable DAC0 Filter */ 1726 - writel_relaxed(0x1, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL0)); 1634 + writel_relaxed(VENC_VDAC_DAC0_FILT_CTRL0_EN, 1635 + priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL0)); 1727 1636 writel_relaxed(0xfc48, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL1)); 1728 1637 1729 1638 /* 0 in Macrovision register 0 */ ··· 1745 1652 1746 1653 void meson_venc_enable_vsync(struct meson_drm *priv) 1747 1654 { 1748 - writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL)); 1655 + writel_relaxed(VENC_INTCTRL_ENCI_LNRST_INT_EN, 1656 + priv->io_base + _REG(VENC_INTCTRL)); 1749 1657 regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25)); 1750 1658 } 1751 1659 ··· 1774 1680 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0); 1775 1681 1776 1682 /* Disable HDMI */ 1777 - writel_bits_relaxed(0x3, 0, 1683 + writel_bits_relaxed(VPU_HDMI_ENCI_DATA_TO_HDMI | 1684 + VPU_HDMI_ENCP_DATA_TO_HDMI, 0, 1778 1685 priv->io_base + _REG(VPU_HDMI_SETTING)); 1779 1686 1780 1687 /* Disable all encoders */
+2 -1
drivers/gpu/drm/meson/meson_venc_cvbs.c
··· 171 171 struct meson_drm *priv = meson_venc_cvbs->priv; 172 172 173 173 /* VDAC0 source is not from ATV */ 174 - writel_bits_relaxed(BIT(5), 0, priv->io_base + _REG(VENC_VDAC_DACSEL0)); 174 + writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0, 175 + priv->io_base + _REG(VENC_VDAC_DACSEL0)); 175 176 176 177 if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) { 177 178 regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
+44 -38
drivers/gpu/drm/meson/meson_viu.c
··· 320 320 priv->io_base + _REG(VIU_OSD1_CTRL_STAT2)); 321 321 322 322 /* Reset OSD1 */ 323 - writel_bits_relaxed(BIT(0), BIT(0), 323 + writel_bits_relaxed(VIU_SW_RESET_OSD1, VIU_SW_RESET_OSD1, 324 324 priv->io_base + _REG(VIU_SW_RESET)); 325 - writel_bits_relaxed(BIT(0), 0, 325 + writel_bits_relaxed(VIU_SW_RESET_OSD1, 0, 326 326 priv->io_base + _REG(VIU_SW_RESET)); 327 327 328 328 /* Rewrite these registers state lost in the reset */ ··· 335 335 meson_viu_load_matrix(priv); 336 336 } 337 337 338 + static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length) 339 + { 340 + uint32_t val = (((length & 0x80) % 24) / 12); 341 + 342 + return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31)); 343 + } 344 + 338 345 void meson_viu_init(struct meson_drm *priv) 339 346 { 340 347 uint32_t reg; 341 348 342 349 /* Disable OSDs */ 343 - writel_bits_relaxed(BIT(0) | BIT(21), 0, 344 - priv->io_base + _REG(VIU_OSD1_CTRL_STAT)); 345 - writel_bits_relaxed(BIT(0) | BIT(21), 0, 346 - priv->io_base + _REG(VIU_OSD2_CTRL_STAT)); 350 + writel_bits_relaxed(VIU_OSD1_OSD_BLK_ENABLE | VIU_OSD1_OSD_ENABLE, 0, 351 + priv->io_base + _REG(VIU_OSD1_CTRL_STAT)); 352 + writel_bits_relaxed(VIU_OSD1_OSD_BLK_ENABLE | VIU_OSD1_OSD_ENABLE, 0, 353 + priv->io_base + _REG(VIU_OSD2_CTRL_STAT)); 347 354 348 355 /* On GXL/GXM, Use the 10bit HDR conversion matrix */ 349 356 if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") || ··· 361 354 true); 362 355 363 356 /* Initialize OSD1 fifo control register */ 364 - reg = BIT(0) | /* Urgent DDR request priority */ 365 - (4 << 5); /* hold_fifo_lines */ 357 + reg = VIU_OSD_DDR_PRIORITY_URGENT | 358 + VIU_OSD_HOLD_FIFO_LINES(4) | 359 + VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */ 360 + VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */ 361 + VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */ 362 + 366 363 if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) 367 - reg |= (1 << 10) | /* burst length 32 */ 368 - (32 << 12) | /* fifo_depth_val: 32*8=256 */ 369 - (2 << 22) | /* 4 words in 1 burst */ 370 - (2 << 24) | 371 - (1 << 31); 364 + reg |= meson_viu_osd_burst_length_reg(32); 372 365 else 373 - reg |= (3 << 10) | /* burst length 64 */ 374 - (32 << 12) | /* fifo_depth_val: 32*8=256 */ 375 - (2 << 22) | /* 4 words in 1 burst */ 376 - (2 << 24); 366 + reg |= meson_viu_osd_burst_length_reg(64); 367 + 377 368 writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT)); 378 369 writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT)); 379 370 ··· 384 379 priv->io_base + _REG(VIU_OSD2_CTRL_STAT2)); 385 380 386 381 /* Disable VD1 AFBC */ 387 - /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 */ 388 - writel_bits_relaxed(0x7 << 16, 0, 389 - priv->io_base + _REG(VIU_MISC_CTRL0)); 390 - /* afbc vd1 set=0 */ 391 - writel_bits_relaxed(BIT(20), 0, 392 - priv->io_base + _REG(VIU_MISC_CTRL0)); 382 + /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 and afbc vd1 set=0*/ 383 + writel_bits_relaxed(VIU_CTRL0_VD1_AFBC_MASK, 0, 384 + priv->io_base + _REG(VIU_MISC_CTRL0)); 393 385 writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE)); 394 386 395 387 writel_relaxed(0x00FF00C0, ··· 395 393 priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE)); 396 394 397 395 if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) { 398 - writel_relaxed(4 << 29 | 399 - 1 << 27 | 400 - 1 << 26 | /* blend_din0 input to blend0 */ 401 - 1 << 25 | /* blend1_dout to blend2 */ 402 - 1 << 24 | /* blend1_din3 input to blend1 */ 403 - 1 << 20 | 404 - 0 << 16 | 405 - 1, 406 - priv->io_base + _REG(VIU_OSD_BLEND_CTRL)); 407 - writel_relaxed(1 << 20, 408 - priv->io_base + _REG(OSD1_BLEND_SRC_CTRL)); 409 - writel_relaxed(1 << 20, 410 - priv->io_base + _REG(OSD2_BLEND_SRC_CTRL)); 396 + writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) | 397 + VIU_OSD_BLEND_REORDER(1, 0) | 398 + VIU_OSD_BLEND_REORDER(2, 0) | 399 + VIU_OSD_BLEND_REORDER(3, 0) | 400 + VIU_OSD_BLEND_DIN_EN(1) | 401 + VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 | 402 + VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 | 403 + VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 | 404 + VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) | 405 + VIU_OSD_BLEND_HOLD_LINES(4), 406 + priv->io_base + _REG(VIU_OSD_BLEND_CTRL)); 407 + 408 + writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE, 409 + priv->io_base + _REG(OSD1_BLEND_SRC_CTRL)); 410 + writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE, 411 + priv->io_base + _REG(OSD2_BLEND_SRC_CTRL)); 411 412 writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL)); 412 413 writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL)); 413 414 writel_relaxed(0, 414 415 priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_DATA0)); 415 416 writel_relaxed(0, 416 417 priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_ALPHA)); 417 - writel_bits_relaxed(0x3 << 2, 0x3 << 2, 418 - priv->io_base + _REG(DOLBY_PATH_CTRL)); 418 + 419 + writel_bits_relaxed(DOLBY_BYPASS_EN(0xc), DOLBY_BYPASS_EN(0xc), 420 + priv->io_base + _REG(DOLBY_PATH_CTRL)); 419 421 } 420 422 421 423 priv->viu.osd1_enabled = false;
+16 -9
drivers/gpu/drm/meson/meson_vpp.c
··· 56 56 { 57 57 int i; 58 58 59 - writel_relaxed(is_horizontal ? BIT(8) : 0, 59 + writel_relaxed(is_horizontal ? VPP_SCALE_HORIZONTAL_COEF : 0, 60 60 priv->io_base + _REG(VPP_OSD_SCALE_COEF_IDX)); 61 61 for (i = 0; i < 33; i++) 62 62 writel_relaxed(coefs[i], ··· 81 81 { 82 82 int i; 83 83 84 - writel_relaxed(is_horizontal ? BIT(8) : 0, 84 + writel_relaxed(is_horizontal ? VPP_SCALE_HORIZONTAL_COEF : 0, 85 85 priv->io_base + _REG(VPP_SCALE_COEF_IDX)); 86 86 for (i = 0; i < 33; i++) 87 87 writel_relaxed(coefs[i], ··· 96 96 else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu")) { 97 97 writel_bits_relaxed(0xff << 16, 0xff << 16, 98 98 priv->io_base + _REG(VIU_MISC_CTRL1)); 99 - writel_relaxed(0x20000, priv->io_base + _REG(VPP_DOLBY_CTRL)); 99 + writel_relaxed(VPP_PPS_DUMMY_DATA_MODE, 100 + priv->io_base + _REG(VPP_DOLBY_CTRL)); 100 101 writel_relaxed(0x1020080, 101 102 priv->io_base + _REG(VPP_DUMMY_DATA1)); 102 103 } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) ··· 105 104 106 105 /* Initialize vpu fifo control registers */ 107 106 if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) 108 - writel_relaxed(0xfff << 20 | 0x1000, 107 + writel_relaxed(VPP_OFIFO_SIZE_DEFAULT, 109 108 priv->io_base + _REG(VPP_OFIFO_SIZE)); 110 109 else 111 - writel_relaxed(readl_relaxed(priv->io_base + _REG(VPP_OFIFO_SIZE)) | 112 - 0x77f, priv->io_base + _REG(VPP_OFIFO_SIZE)); 113 - writel_relaxed(0x08080808, priv->io_base + _REG(VPP_HOLD_LINES)); 110 + writel_bits_relaxed(VPP_OFIFO_SIZE_MASK, 0x77f, 111 + priv->io_base + _REG(VPP_OFIFO_SIZE)); 112 + writel_relaxed(VPP_POSTBLEND_HOLD_LINES(4) | VPP_PREBLEND_HOLD_LINES(4), 113 + priv->io_base + _REG(VPP_HOLD_LINES)); 114 114 115 115 if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) { 116 116 /* Turn off preblend */ ··· 139 137 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0)); 140 138 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0)); 141 139 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0)); 142 - writel_relaxed(4 | (4 << 8) | BIT(15), 140 + 141 + /* Set horizontal/vertical bank length and enable video scale out */ 142 + writel_relaxed(VPP_VSC_BANK_LENGTH(4) | VPP_HSC_BANK_LENGTH(4) | 143 + VPP_SC_VD_EN_ENABLE, 143 144 priv->io_base + _REG(VPP_SC_MISC)); 144 145 145 - writel_relaxed(1, priv->io_base + _REG(VPP_VADJ_CTRL)); 146 + /* Enable minus black level for vadj1 */ 147 + writel_relaxed(VPP_MINUS_BLACK_LVL_VADJ1_ENABLE, 148 + priv->io_base + _REG(VPP_VADJ_CTRL)); 146 149 147 150 /* Write in the proper filter coefficients. */ 148 151 meson_vpp_write_scaling_filter_coefs(priv,
+9 -9
drivers/gpu/drm/msm/msm_gem.c
··· 700 700 int msm_gem_sync_object(struct drm_gem_object *obj, 701 701 struct msm_fence_context *fctx, bool exclusive) 702 702 { 703 - struct reservation_object_list *fobj; 703 + struct dma_resv_list *fobj; 704 704 struct dma_fence *fence; 705 705 int i, ret; 706 706 707 - fobj = reservation_object_get_list(obj->resv); 707 + fobj = dma_resv_get_list(obj->resv); 708 708 if (!fobj || (fobj->shared_count == 0)) { 709 - fence = reservation_object_get_excl(obj->resv); 709 + fence = dma_resv_get_excl(obj->resv); 710 710 /* don't need to wait on our own fences, since ring is fifo */ 711 711 if (fence && (fence->context != fctx->context)) { 712 712 ret = dma_fence_wait(fence, true); ··· 720 720 721 721 for (i = 0; i < fobj->shared_count; i++) { 722 722 fence = rcu_dereference_protected(fobj->shared[i], 723 - reservation_object_held(obj->resv)); 723 + dma_resv_held(obj->resv)); 724 724 if (fence->context != fctx->context) { 725 725 ret = dma_fence_wait(fence, true); 726 726 if (ret) ··· 738 738 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 739 739 msm_obj->gpu = gpu; 740 740 if (exclusive) 741 - reservation_object_add_excl_fence(obj->resv, fence); 741 + dma_resv_add_excl_fence(obj->resv, fence); 742 742 else 743 - reservation_object_add_shared_fence(obj->resv, fence); 743 + dma_resv_add_shared_fence(obj->resv, fence); 744 744 list_del_init(&msm_obj->mm_list); 745 745 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 746 746 } ··· 765 765 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 766 766 long ret; 767 767 768 - ret = reservation_object_wait_timeout_rcu(obj->resv, write, 768 + ret = dma_resv_wait_timeout_rcu(obj->resv, write, 769 769 true, remain); 770 770 if (ret == 0) 771 771 return remain == 0 ? -EBUSY : -ETIMEDOUT; ··· 797 797 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 798 798 { 799 799 struct msm_gem_object *msm_obj = to_msm_bo(obj); 800 - struct reservation_object *robj = obj->resv; 801 - struct reservation_object_list *fobj; 800 + struct dma_resv *robj = obj->resv; 801 + struct dma_resv_list *fobj; 802 802 struct dma_fence *fence; 803 803 struct msm_gem_vma *vma; 804 804 uint64_t off = drm_vma_node_start(&obj->vma_node);
+1 -1
drivers/gpu/drm/msm/msm_gem.h
··· 8 8 #define __MSM_GEM_H__ 9 9 10 10 #include <linux/kref.h> 11 - #include <linux/reservation.h> 11 + #include <linux/dma-resv.h> 12 12 #include "msm_drv.h" 13 13 14 14 /* Additional internal-use only BO flags: */
+1 -1
drivers/gpu/drm/msm/msm_gem_submit.c
··· 225 225 * strange place to call it. OTOH this is a 226 226 * convenient can-fail point to hook it in. 227 227 */ 228 - ret = reservation_object_reserve_shared(msm_obj->base.resv, 228 + ret = dma_resv_reserve_shared(msm_obj->base.resv, 229 229 1); 230 230 if (ret) 231 231 return ret;
+1 -1
drivers/gpu/drm/mxsfb/mxsfb_drv.c
··· 17 17 #include <linux/of_graph.h> 18 18 #include <linux/of_reserved_mem.h> 19 19 #include <linux/pm_runtime.h> 20 - #include <linux/reservation.h> 20 + #include <linux/dma-resv.h> 21 21 #include <linux/spinlock.h> 22 22 23 23 #include <drm/drm_atomic.h>
+1 -1
drivers/gpu/drm/mxsfb/mxsfb_out.c
··· 30 30 drm_connector_to_mxsfb_drm_private(connector); 31 31 32 32 if (mxsfb->panel) 33 - return mxsfb->panel->funcs->get_modes(mxsfb->panel); 33 + return drm_panel_get_modes(mxsfb->panel); 34 34 35 35 return 0; 36 36 }
+1 -1
drivers/gpu/drm/nouveau/dispnv50/wndw.c
··· 457 457 asyw->image.handle[0] = ctxdma->object.handle; 458 458 } 459 459 460 - asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.base.resv); 460 + asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv); 461 461 asyw->image.offset[0] = fb->nvbo->bo.offset; 462 462 463 463 if (wndw->func->prepare) {
+5 -5
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 188 188 int 189 189 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, 190 190 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, 191 - struct sg_table *sg, struct reservation_object *robj, 191 + struct sg_table *sg, struct dma_resv *robj, 192 192 struct nouveau_bo **pnvbo) 193 193 { 194 194 struct nouveau_drm *drm = cli->drm; ··· 1324 1324 { 1325 1325 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1326 1326 struct drm_device *dev = drm->dev; 1327 - struct dma_fence *fence = reservation_object_get_excl(bo->base.resv); 1327 + struct dma_fence *fence = dma_resv_get_excl(bo->base.resv); 1328 1328 1329 1329 nv10_bo_put_tile_region(dev, *old_tile, fence); 1330 1330 *old_tile = new_tile; ··· 1655 1655 void 1656 1656 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) 1657 1657 { 1658 - struct reservation_object *resv = nvbo->bo.base.resv; 1658 + struct dma_resv *resv = nvbo->bo.base.resv; 1659 1659 1660 1660 if (exclusive) 1661 - reservation_object_add_excl_fence(resv, &fence->base); 1661 + dma_resv_add_excl_fence(resv, &fence->base); 1662 1662 else if (fence) 1663 - reservation_object_add_shared_fence(resv, &fence->base); 1663 + dma_resv_add_shared_fence(resv, &fence->base); 1664 1664 } 1665 1665 1666 1666 struct ttm_bo_driver nouveau_bo_driver = {
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.h
··· 73 73 void nouveau_bo_move_init(struct nouveau_drm *); 74 74 int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags, 75 75 u32 tile_mode, u32 tile_flags, struct sg_table *sg, 76 - struct reservation_object *robj, 76 + struct dma_resv *robj, 77 77 struct nouveau_bo **); 78 78 int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig); 79 79 int nouveau_bo_unpin(struct nouveau_bo *);
+6 -6
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 335 335 { 336 336 struct nouveau_fence_chan *fctx = chan->fence; 337 337 struct dma_fence *fence; 338 - struct reservation_object *resv = nvbo->bo.base.resv; 339 - struct reservation_object_list *fobj; 338 + struct dma_resv *resv = nvbo->bo.base.resv; 339 + struct dma_resv_list *fobj; 340 340 struct nouveau_fence *f; 341 341 int ret = 0, i; 342 342 343 343 if (!exclusive) { 344 - ret = reservation_object_reserve_shared(resv, 1); 344 + ret = dma_resv_reserve_shared(resv, 1); 345 345 346 346 if (ret) 347 347 return ret; 348 348 } 349 349 350 - fobj = reservation_object_get_list(resv); 351 - fence = reservation_object_get_excl(resv); 350 + fobj = dma_resv_get_list(resv); 351 + fence = dma_resv_get_excl(resv); 352 352 353 353 if (fence && (!exclusive || !fobj || !fobj->shared_count)) { 354 354 struct nouveau_channel *prev = NULL; ··· 377 377 bool must_wait = true; 378 378 379 379 fence = rcu_dereference_protected(fobj->shared[i], 380 - reservation_object_held(resv)); 380 + dma_resv_held(resv)); 381 381 382 382 f = nouveau_local_fence(fence, chan->drm); 383 383 if (f) {
+1 -1
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 887 887 return -ENOENT; 888 888 nvbo = nouveau_gem_object(gem); 889 889 890 - lret = reservation_object_wait_timeout_rcu(nvbo->bo.base.resv, write, true, 890 + lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true, 891 891 no_wait ? 0 : 30 * HZ); 892 892 if (!lret) 893 893 ret = -EBUSY;
+3 -3
drivers/gpu/drm/nouveau/nouveau_prime.c
··· 62 62 { 63 63 struct nouveau_drm *drm = nouveau_drm(dev); 64 64 struct nouveau_bo *nvbo; 65 - struct reservation_object *robj = attach->dmabuf->resv; 65 + struct dma_resv *robj = attach->dmabuf->resv; 66 66 u32 flags = 0; 67 67 int ret; 68 68 69 69 flags = TTM_PL_FLAG_TT; 70 70 71 - reservation_object_lock(robj, NULL); 71 + dma_resv_lock(robj, NULL); 72 72 ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0, 73 73 sg, robj, &nvbo); 74 - reservation_object_unlock(robj); 74 + dma_resv_unlock(robj); 75 75 if (ret) 76 76 return ERR_PTR(ret); 77 77
-38
drivers/gpu/drm/omapdrm/displays/Kconfig
··· 29 29 help 30 30 Driver for generic DSI command mode panels. 31 31 32 - config DRM_OMAP_PANEL_SONY_ACX565AKM 33 - tristate "ACX565AKM Panel" 34 - depends on SPI && BACKLIGHT_CLASS_DEVICE 35 - help 36 - This is the LCD panel used on Nokia N900 37 - 38 - config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02 39 - tristate "LG.Philips LB035Q02 LCD Panel" 40 - depends on SPI 41 - help 42 - LCD Panel used on the Gumstix Overo Palo35 43 - 44 - config DRM_OMAP_PANEL_SHARP_LS037V7DW01 45 - tristate "Sharp LS037V7DW01 LCD Panel" 46 - depends on BACKLIGHT_CLASS_DEVICE 47 - help 48 - LCD Panel used in TI's SDP3430 and EVM boards 49 - 50 - config DRM_OMAP_PANEL_TPO_TD028TTEC1 51 - tristate "TPO TD028TTEC1 LCD Panel" 52 - depends on SPI 53 - help 54 - LCD panel used in Openmoko. 55 - 56 - config DRM_OMAP_PANEL_TPO_TD043MTEA1 57 - tristate "TPO TD043MTEA1 LCD Panel" 58 - depends on SPI 59 - help 60 - LCD Panel used in OMAP3 Pandora 61 - 62 - config DRM_OMAP_PANEL_NEC_NL8048HL11 63 - tristate "NEC NL8048HL11 Panel" 64 - depends on SPI 65 - depends on BACKLIGHT_CLASS_DEVICE 66 - help 67 - This NEC NL8048HL11 panel is TFT LCD used in the 68 - Zoom2/3/3630 sdp boards. 69 - 70 32 endmenu
-6
drivers/gpu/drm/omapdrm/displays/Makefile
··· 4 4 obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o 5 5 obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o 6 6 obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o 7 - obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o 8 - obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o 9 - obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o 10 - obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o 11 - obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o 12 - obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
-251
drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * LG.Philips LB035Q02 LCD Panel driver 4 - * 5 - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ 6 - * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> 7 - * Based on a driver by: Steve Sakoman <steve@sakoman.com> 8 - */ 9 - 10 - #include <linux/module.h> 11 - #include <linux/delay.h> 12 - #include <linux/spi/spi.h> 13 - #include <linux/mutex.h> 14 - #include <linux/gpio.h> 15 - #include <linux/gpio/consumer.h> 16 - 17 - #include "../dss/omapdss.h" 18 - 19 - static const struct videomode lb035q02_vm = { 20 - .hactive = 320, 21 - .vactive = 240, 22 - 23 - .pixelclock = 6500000, 24 - 25 - .hsync_len = 2, 26 - .hfront_porch = 20, 27 - .hback_porch = 68, 28 - 29 - .vsync_len = 2, 30 - .vfront_porch = 4, 31 - .vback_porch = 18, 32 - 33 - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, 34 - }; 35 - 36 - struct panel_drv_data { 37 - struct omap_dss_device dssdev; 38 - 39 - struct spi_device *spi; 40 - 41 - struct videomode vm; 42 - 43 - struct gpio_desc *enable_gpio; 44 - }; 45 - 46 - #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) 47 - 48 - static int lb035q02_write_reg(struct spi_device *spi, u8 reg, u16 val) 49 - { 50 - struct spi_message msg; 51 - struct spi_transfer index_xfer = { 52 - .len = 3, 53 - .cs_change = 1, 54 - }; 55 - struct spi_transfer value_xfer = { 56 - .len = 3, 57 - }; 58 - u8 buffer[16]; 59 - 60 - spi_message_init(&msg); 61 - 62 - /* register index */ 63 - buffer[0] = 0x70; 64 - buffer[1] = 0x00; 65 - buffer[2] = reg & 0x7f; 66 - index_xfer.tx_buf = buffer; 67 - spi_message_add_tail(&index_xfer, &msg); 68 - 69 - /* register value */ 70 - buffer[4] = 0x72; 71 - buffer[5] = val >> 8; 72 - buffer[6] = val; 73 - value_xfer.tx_buf = buffer + 4; 74 - spi_message_add_tail(&value_xfer, &msg); 75 - 76 - return spi_sync(spi, &msg); 77 - } 78 - 79 - static void init_lb035q02_panel(struct spi_device *spi) 80 - { 81 - /* Init sequence from page 28 of the lb035q02 spec */ 82 - lb035q02_write_reg(spi, 0x01, 0x6300); 83 - lb035q02_write_reg(spi, 0x02, 0x0200); 84 - lb035q02_write_reg(spi, 0x03, 0x0177); 85 - lb035q02_write_reg(spi, 0x04, 0x04c7); 86 - lb035q02_write_reg(spi, 0x05, 0xffc0); 87 - lb035q02_write_reg(spi, 0x06, 0xe806); 88 - lb035q02_write_reg(spi, 0x0a, 0x4008); 89 - lb035q02_write_reg(spi, 0x0b, 0x0000); 90 - lb035q02_write_reg(spi, 0x0d, 0x0030); 91 - lb035q02_write_reg(spi, 0x0e, 0x2800); 92 - lb035q02_write_reg(spi, 0x0f, 0x0000); 93 - lb035q02_write_reg(spi, 0x16, 0x9f80); 94 - lb035q02_write_reg(spi, 0x17, 0x0a0f); 95 - lb035q02_write_reg(spi, 0x1e, 0x00c1); 96 - lb035q02_write_reg(spi, 0x30, 0x0300); 97 - lb035q02_write_reg(spi, 0x31, 0x0007); 98 - lb035q02_write_reg(spi, 0x32, 0x0000); 99 - lb035q02_write_reg(spi, 0x33, 0x0000); 100 - lb035q02_write_reg(spi, 0x34, 0x0707); 101 - lb035q02_write_reg(spi, 0x35, 0x0004); 102 - lb035q02_write_reg(spi, 0x36, 0x0302); 103 - lb035q02_write_reg(spi, 0x37, 0x0202); 104 - lb035q02_write_reg(spi, 0x3a, 0x0a0d); 105 - lb035q02_write_reg(spi, 0x3b, 0x0806); 106 - } 107 - 108 - static int lb035q02_connect(struct omap_dss_device *src, 109 - struct omap_dss_device *dst) 110 - { 111 - struct panel_drv_data *ddata = to_panel_data(dst); 112 - 113 - init_lb035q02_panel(ddata->spi); 114 - 115 - return 0; 116 - } 117 - 118 - static void lb035q02_disconnect(struct omap_dss_device *src, 119 - struct omap_dss_device *dst) 120 - { 121 - } 122 - 123 - static void lb035q02_enable(struct omap_dss_device *dssdev) 124 - { 125 - struct panel_drv_data *ddata = to_panel_data(dssdev); 126 - 127 - if (ddata->enable_gpio) 128 - gpiod_set_value_cansleep(ddata->enable_gpio, 1); 129 - } 130 - 131 - static void lb035q02_disable(struct omap_dss_device *dssdev) 132 - { 133 - struct panel_drv_data *ddata = to_panel_data(dssdev); 134 - 135 - if (ddata->enable_gpio) 136 - gpiod_set_value_cansleep(ddata->enable_gpio, 0); 137 - } 138 - 139 - static int lb035q02_get_modes(struct omap_dss_device *dssdev, 140 - struct drm_connector *connector) 141 - { 142 - struct panel_drv_data *ddata = to_panel_data(dssdev); 143 - 144 - return omapdss_display_get_modes(connector, &ddata->vm); 145 - } 146 - 147 - static const struct omap_dss_device_ops lb035q02_ops = { 148 - .connect = lb035q02_connect, 149 - .disconnect = lb035q02_disconnect, 150 - 151 - .enable = lb035q02_enable, 152 - .disable = lb035q02_disable, 153 - 154 - .get_modes = lb035q02_get_modes, 155 - }; 156 - 157 - static int lb035q02_probe_of(struct spi_device *spi) 158 - { 159 - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); 160 - struct gpio_desc *gpio; 161 - 162 - gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW); 163 - if (IS_ERR(gpio)) { 164 - dev_err(&spi->dev, "failed to parse enable gpio\n"); 165 - return PTR_ERR(gpio); 166 - } 167 - 168 - ddata->enable_gpio = gpio; 169 - 170 - return 0; 171 - } 172 - 173 - static int lb035q02_panel_spi_probe(struct spi_device *spi) 174 - { 175 - struct panel_drv_data *ddata; 176 - struct omap_dss_device *dssdev; 177 - int r; 178 - 179 - ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); 180 - if (ddata == NULL) 181 - return -ENOMEM; 182 - 183 - dev_set_drvdata(&spi->dev, ddata); 184 - 185 - ddata->spi = spi; 186 - 187 - r = lb035q02_probe_of(spi); 188 - if (r) 189 - return r; 190 - 191 - ddata->vm = lb035q02_vm; 192 - 193 - dssdev = &ddata->dssdev; 194 - dssdev->dev = &spi->dev; 195 - dssdev->ops = &lb035q02_ops; 196 - dssdev->type = OMAP_DISPLAY_TYPE_DPI; 197 - dssdev->display = true; 198 - dssdev->owner = THIS_MODULE; 199 - dssdev->of_ports = BIT(0); 200 - dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; 201 - 202 - /* 203 - * Note: According to the panel documentation: 204 - * DE is active LOW 205 - * DATA needs to be driven on the FALLING edge 206 - */ 207 - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH 208 - | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE 209 - | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; 210 - 211 - omapdss_display_init(dssdev); 212 - omapdss_device_register(dssdev); 213 - 214 - return 0; 215 - } 216 - 217 - static int lb035q02_panel_spi_remove(struct spi_device *spi) 218 - { 219 - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); 220 - struct omap_dss_device *dssdev = &ddata->dssdev; 221 - 222 - omapdss_device_unregister(dssdev); 223 - 224 - lb035q02_disable(dssdev); 225 - 226 - return 0; 227 - } 228 - 229 - static const struct of_device_id lb035q02_of_match[] = { 230 - { .compatible = "omapdss,lgphilips,lb035q02", }, 231 - {}, 232 - }; 233 - 234 - MODULE_DEVICE_TABLE(of, lb035q02_of_match); 235 - 236 - static struct spi_driver lb035q02_spi_driver = { 237 - .probe = lb035q02_panel_spi_probe, 238 - .remove = lb035q02_panel_spi_remove, 239 - .driver = { 240 - .name = "panel_lgphilips_lb035q02", 241 - .of_match_table = lb035q02_of_match, 242 - .suppress_bind_attrs = true, 243 - }, 244 - }; 245 - 246 - module_spi_driver(lb035q02_spi_driver); 247 - 248 - MODULE_ALIAS("spi:lgphilips,lb035q02"); 249 - MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); 250 - MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver"); 251 - MODULE_LICENSE("GPL");
-271
drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - /* 3 - * NEC NL8048HL11 Panel driver 4 - * 5 - * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ 6 - * Author: Erik Gilling <konkers@android.com> 7 - * Converted to new DSS device model: Tomi Valkeinen <tomi.valkeinen@ti.com> 8 - */ 9 - 10 - #include <linux/delay.h> 11 - #include <linux/gpio/consumer.h> 12 - #include <linux/module.h> 13 - #include <linux/spi/spi.h> 14 - 15 - #include "../dss/omapdss.h" 16 - 17 - struct panel_drv_data { 18 - struct omap_dss_device dssdev; 19 - 20 - struct videomode vm; 21 - 22 - struct gpio_desc *res_gpio; 23 - 24 - struct spi_device *spi; 25 - }; 26 - 27 - #define LCD_XRES 800 28 - #define LCD_YRES 480 29 - /* 30 - * NEC PIX Clock Ratings 31 - * MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz 32 - */ 33 - #define LCD_PIXEL_CLOCK 23800000 34 - 35 - static const struct { 36 - unsigned char addr; 37 - unsigned char dat; 38 - } nec_8048_init_seq[] = { 39 - { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 }, { 5, 0x14 }, 40 - { 6, 0x24 }, { 16, 0xD7 }, { 17, 0x00 }, { 18, 0x00 }, { 19, 0x55 }, 41 - { 20, 0x01 }, { 21, 0x70 }, { 22, 0x1E }, { 23, 0x25 }, { 24, 0x25 }, 42 - { 25, 0x02 }, { 26, 0x02 }, { 27, 0xA0 }, { 32, 0x2F }, { 33, 0x0F }, 43 - { 34, 0x0F }, { 35, 0x0F }, { 36, 0x0F }, { 37, 0x0F }, { 38, 0x0F }, 44 - { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 }, { 42, 0x02 }, { 43, 0x0F }, 45 - { 44, 0x0F }, { 45, 0x0F }, { 46, 0x0F }, { 47, 0x0F }, { 48, 0x0F }, 46 - { 49, 0x0F }, { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 }, 47 - { 80, 0x0C }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 }, { 86, 0x14 }, 48 - { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 }, { 92, 0x02 }, { 93, 0x0C }, 49 - { 94, 0x1C }, { 95, 0x27 }, { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 }, 50 - { 103, 0x27 }, { 112, 0x01 }, { 113, 0x0E }, { 114, 0x02 }, 51 - { 115, 0x0C }, { 118, 0x0C }, { 121, 0x30 }, { 130, 0x00 }, 52 - { 131, 0x00 }, { 132, 0xFC }, { 134, 0x00 }, { 136, 0x00 }, 53 - { 138, 0x00 }, { 139, 0x00 }, { 140, 0x00 }, { 141, 0xFC }, 54 - { 143, 0x00 }, { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 }, 55 - { 149, 0x00 }, { 150, 0xFC }, { 152, 0x00 }, { 154, 0x00 }, 56 - { 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 }, 57 - }; 58 - 59 - static const struct videomode nec_8048_panel_vm = { 60 - .hactive = LCD_XRES, 61 - .vactive = LCD_YRES, 62 - .pixelclock = LCD_PIXEL_CLOCK, 63 - .hfront_porch = 6, 64 - .hsync_len = 1, 65 - .hback_porch = 4, 66 - .vfront_porch = 3, 67 - .vsync_len = 1, 68 - .vback_porch = 4, 69 - 70 - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, 71 - }; 72 - 73 - #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) 74 - 75 - static int nec_8048_spi_send(struct spi_device *spi, unsigned char reg_addr, 76 - unsigned char reg_data) 77 - { 78 - int ret = 0; 79 - unsigned int cmd = 0, data = 0; 80 - 81 - cmd = 0x0000 | reg_addr; /* register address write */ 82 - data = 0x0100 | reg_data; /* register data write */ 83 - data = (cmd << 16) | data; 84 - 85 - ret = spi_write(spi, (unsigned char *)&data, 4); 86 - if (ret) 87 - pr_err("error in spi_write %x\n", data); 88 - 89 - return ret; 90 - } 91 - 92 - static int init_nec_8048_wvga_lcd(struct spi_device *spi) 93 - { 94 - unsigned int i; 95 - /* Initialization Sequence */ 96 - /* nec_8048_spi_send(spi, REG, VAL) */ 97 - for (i = 0; i < (ARRAY_SIZE(nec_8048_init_seq) - 1); i++) 98 - nec_8048_spi_send(spi, nec_8048_init_seq[i].addr, 99 - nec_8048_init_seq[i].dat); 100 - udelay(20); 101 - nec_8048_spi_send(spi, nec_8048_init_seq[i].addr, 102 - nec_8048_init_seq[i].dat); 103 - return 0; 104 - } 105 - 106 - static int nec_8048_connect(struct omap_dss_device *src, 107 - struct omap_dss_device *dst) 108 - { 109 - return 0; 110 - } 111 - 112 - static void nec_8048_disconnect(struct omap_dss_device *src, 113 - struct omap_dss_device *dst) 114 - { 115 - } 116 - 117 - static void nec_8048_enable(struct omap_dss_device *dssdev) 118 - { 119 - struct panel_drv_data *ddata = to_panel_data(dssdev); 120 - 121 - gpiod_set_value_cansleep(ddata->res_gpio, 1); 122 - } 123 - 124 - static void nec_8048_disable(struct omap_dss_device *dssdev) 125 - { 126 - struct panel_drv_data *ddata = to_panel_data(dssdev); 127 - 128 - gpiod_set_value_cansleep(ddata->res_gpio, 0); 129 - } 130 - 131 - static int nec_8048_get_modes(struct omap_dss_device *dssdev, 132 - struct drm_connector *connector) 133 - { 134 - struct panel_drv_data *ddata = to_panel_data(dssdev); 135 - 136 - return omapdss_display_get_modes(connector, &ddata->vm); 137 - } 138 - 139 - static const struct omap_dss_device_ops nec_8048_ops = { 140 - .connect = nec_8048_connect, 141 - .disconnect = nec_8048_disconnect, 142 - 143 - .enable = nec_8048_enable, 144 - .disable = nec_8048_disable, 145 - 146 - .get_modes = nec_8048_get_modes, 147 - }; 148 - 149 - static int nec_8048_probe(struct spi_device *spi) 150 - { 151 - struct panel_drv_data *ddata; 152 - struct omap_dss_device *dssdev; 153 - struct gpio_desc *gpio; 154 - int r; 155 - 156 - dev_dbg(&spi->dev, "%s\n", __func__); 157 - 158 - spi->mode = SPI_MODE_0; 159 - spi->bits_per_word = 32; 160 - 161 - r = spi_setup(spi); 162 - if (r < 0) { 163 - dev_err(&spi->dev, "spi_setup failed: %d\n", r); 164 - return r; 165 - } 166 - 167 - init_nec_8048_wvga_lcd(spi); 168 - 169 - ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); 170 - if (ddata == NULL) 171 - return -ENOMEM; 172 - 173 - dev_set_drvdata(&spi->dev, ddata); 174 - 175 - ddata->spi = spi; 176 - 177 - gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW); 178 - if (IS_ERR(gpio)) { 179 - dev_err(&spi->dev, "failed to get reset gpio\n"); 180 - return PTR_ERR(gpio); 181 - } 182 - 183 - ddata->res_gpio = gpio; 184 - 185 - ddata->vm = nec_8048_panel_vm; 186 - 187 - dssdev = &ddata->dssdev; 188 - dssdev->dev = &spi->dev; 189 - dssdev->ops = &nec_8048_ops; 190 - dssdev->type = OMAP_DISPLAY_TYPE_DPI; 191 - dssdev->display = true; 192 - dssdev->owner = THIS_MODULE; 193 - dssdev->of_ports = BIT(0); 194 - dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; 195 - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH 196 - | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE 197 - | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; 198 - 199 - omapdss_display_init(dssdev); 200 - omapdss_device_register(dssdev); 201 - 202 - return 0; 203 - } 204 - 205 - static int nec_8048_remove(struct spi_device *spi) 206 - { 207 - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); 208 - struct omap_dss_device *dssdev = &ddata->dssdev; 209 - 210 - dev_dbg(&ddata->spi->dev, "%s\n", __func__); 211 - 212 - omapdss_device_unregister(dssdev); 213 - 214 - nec_8048_disable(dssdev); 215 - 216 - return 0; 217 - } 218 - 219 - #ifdef CONFIG_PM_SLEEP 220 - static int nec_8048_suspend(struct device *dev) 221 - { 222 - struct spi_device *spi = to_spi_device(dev); 223 - 224 - nec_8048_spi_send(spi, 2, 0x01); 225 - mdelay(40); 226 - 227 - return 0; 228 - } 229 - 230 - static int nec_8048_resume(struct device *dev) 231 - { 232 - struct spi_device *spi = to_spi_device(dev); 233 - 234 - /* reinitialize the panel */ 235 - spi_setup(spi); 236 - nec_8048_spi_send(spi, 2, 0x00); 237 - init_nec_8048_wvga_lcd(spi); 238 - 239 - return 0; 240 - } 241 - static SIMPLE_DEV_PM_OPS(nec_8048_pm_ops, nec_8048_suspend, 242 - nec_8048_resume); 243 - #define NEC_8048_PM_OPS (&nec_8048_pm_ops) 244 - #else 245 - #define NEC_8048_PM_OPS NULL 246 - #endif 247 - 248 - static const struct of_device_id nec_8048_of_match[] = { 249 - { .compatible = "omapdss,nec,nl8048hl11", }, 250 - {}, 251 - }; 252 - 253 - MODULE_DEVICE_TABLE(of, nec_8048_of_match); 254 - 255 - static struct spi_driver nec_8048_driver = { 256 - .driver = { 257 - .name = "panel-nec-nl8048hl11", 258 - .pm = NEC_8048_PM_OPS, 259 - .of_match_table = nec_8048_of_match, 260 - .suppress_bind_attrs = true, 261 - }, 262 - .probe = nec_8048_probe, 263 - .remove = nec_8048_remove, 264 - }; 265 - 266 - module_spi_driver(nec_8048_driver); 267 - 268 - MODULE_ALIAS("spi:nec,nl8048hl11"); 269 - MODULE_AUTHOR("Erik Gilling <konkers@android.com>"); 270 - MODULE_DESCRIPTION("NEC-NL8048HL11 Driver"); 271 - MODULE_LICENSE("GPL");
-262
drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * LCD panel driver for Sharp LS037V7DW01 4 - * 5 - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ 6 - * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> 7 - */ 8 - 9 - #include <linux/delay.h> 10 - #include <linux/gpio/consumer.h> 11 - #include <linux/module.h> 12 - #include <linux/of.h> 13 - #include <linux/platform_device.h> 14 - #include <linux/slab.h> 15 - #include <linux/regulator/consumer.h> 16 - 17 - #include "../dss/omapdss.h" 18 - 19 - struct panel_drv_data { 20 - struct omap_dss_device dssdev; 21 - struct regulator *vcc; 22 - 23 - struct videomode vm; 24 - 25 - struct gpio_desc *resb_gpio; /* low = reset active min 20 us */ 26 - struct gpio_desc *ini_gpio; /* high = power on */ 27 - struct gpio_desc *mo_gpio; /* low = 480x640, high = 240x320 */ 28 - struct gpio_desc *lr_gpio; /* high = conventional horizontal scanning */ 29 - struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */ 30 - }; 31 - 32 - static const struct videomode sharp_ls_vm = { 33 - .hactive = 480, 34 - .vactive = 640, 35 - 36 - .pixelclock = 19200000, 37 - 38 - .hsync_len = 2, 39 - .hfront_porch = 1, 40 - .hback_porch = 28, 41 - 42 - .vsync_len = 1, 43 - .vfront_porch = 1, 44 - .vback_porch = 1, 45 - 46 - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, 47 - }; 48 - 49 - #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) 50 - 51 - static int sharp_ls_connect(struct omap_dss_device *src, 52 - struct omap_dss_device *dst) 53 - { 54 - return 0; 55 - } 56 - 57 - static void sharp_ls_disconnect(struct omap_dss_device *src, 58 - struct omap_dss_device *dst) 59 - { 60 - } 61 - 62 - static void sharp_ls_pre_enable(struct omap_dss_device *dssdev) 63 - { 64 - struct panel_drv_data *ddata = to_panel_data(dssdev); 65 - int r; 66 - 67 - if (ddata->vcc) { 68 - r = regulator_enable(ddata->vcc); 69 - if (r) 70 - dev_err(dssdev->dev, "%s: failed to enable regulator\n", 71 - __func__); 72 - } 73 - } 74 - 75 - static void sharp_ls_enable(struct omap_dss_device *dssdev) 76 - { 77 - struct panel_drv_data *ddata = to_panel_data(dssdev); 78 - 79 - /* wait couple of vsyncs until enabling the LCD */ 80 - msleep(50); 81 - 82 - if (ddata->resb_gpio) 83 - gpiod_set_value_cansleep(ddata->resb_gpio, 1); 84 - 85 - if (ddata->ini_gpio) 86 - gpiod_set_value_cansleep(ddata->ini_gpio, 1); 87 - } 88 - 89 - static void sharp_ls_disable(struct omap_dss_device *dssdev) 90 - { 91 - struct panel_drv_data *ddata = to_panel_data(dssdev); 92 - 93 - if (ddata->ini_gpio) 94 - gpiod_set_value_cansleep(ddata->ini_gpio, 0); 95 - 96 - if (ddata->resb_gpio) 97 - gpiod_set_value_cansleep(ddata->resb_gpio, 0); 98 - 99 - /* wait at least 5 vsyncs after disabling the LCD */ 100 - msleep(100); 101 - } 102 - 103 - static void sharp_ls_post_disable(struct omap_dss_device *dssdev) 104 - { 105 - struct panel_drv_data *ddata = to_panel_data(dssdev); 106 - 107 - if (ddata->vcc) 108 - regulator_disable(ddata->vcc); 109 - } 110 - 111 - static int sharp_ls_get_modes(struct omap_dss_device *dssdev, 112 - struct drm_connector *connector) 113 - { 114 - struct panel_drv_data *ddata = to_panel_data(dssdev); 115 - 116 - return omapdss_display_get_modes(connector, &ddata->vm); 117 - } 118 - 119 - static const struct omap_dss_device_ops sharp_ls_ops = { 120 - .connect = sharp_ls_connect, 121 - .disconnect = sharp_ls_disconnect, 122 - 123 - .pre_enable = sharp_ls_pre_enable, 124 - .enable = sharp_ls_enable, 125 - .disable = sharp_ls_disable, 126 - .post_disable = sharp_ls_post_disable, 127 - 128 - .get_modes = sharp_ls_get_modes, 129 - }; 130 - 131 - static int sharp_ls_get_gpio_of(struct device *dev, int index, int val, 132 - const char *desc, struct gpio_desc **gpiod) 133 - { 134 - struct gpio_desc *gd; 135 - 136 - *gpiod = NULL; 137 - 138 - gd = devm_gpiod_get_index(dev, desc, index, GPIOD_OUT_LOW); 139 - if (IS_ERR(gd)) 140 - return PTR_ERR(gd); 141 - 142 - *gpiod = gd; 143 - return 0; 144 - } 145 - 146 - static int sharp_ls_probe_of(struct platform_device *pdev) 147 - { 148 - struct panel_drv_data *ddata = platform_get_drvdata(pdev); 149 - int r; 150 - 151 - ddata->vcc = devm_regulator_get(&pdev->dev, "envdd"); 152 - if (IS_ERR(ddata->vcc)) { 153 - dev_err(&pdev->dev, "failed to get regulator\n"); 154 - return PTR_ERR(ddata->vcc); 155 - } 156 - 157 - /* lcd INI */ 158 - r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "enable", &ddata->ini_gpio); 159 - if (r) 160 - return r; 161 - 162 - /* lcd RESB */ 163 - r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "reset", &ddata->resb_gpio); 164 - if (r) 165 - return r; 166 - 167 - /* lcd MO */ 168 - r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "mode", &ddata->mo_gpio); 169 - if (r) 170 - return r; 171 - 172 - /* lcd LR */ 173 - r = sharp_ls_get_gpio_of(&pdev->dev, 1, 1, "mode", &ddata->lr_gpio); 174 - if (r) 175 - return r; 176 - 177 - /* lcd UD */ 178 - r = sharp_ls_get_gpio_of(&pdev->dev, 2, 1, "mode", &ddata->ud_gpio); 179 - if (r) 180 - return r; 181 - 182 - return 0; 183 - } 184 - 185 - static int sharp_ls_probe(struct platform_device *pdev) 186 - { 187 - struct panel_drv_data *ddata; 188 - struct omap_dss_device *dssdev; 189 - int r; 190 - 191 - ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 192 - if (ddata == NULL) 193 - return -ENOMEM; 194 - 195 - platform_set_drvdata(pdev, ddata); 196 - 197 - r = sharp_ls_probe_of(pdev); 198 - if (r) 199 - return r; 200 - 201 - ddata->vm = sharp_ls_vm; 202 - 203 - dssdev = &ddata->dssdev; 204 - dssdev->dev = &pdev->dev; 205 - dssdev->ops = &sharp_ls_ops; 206 - dssdev->type = OMAP_DISPLAY_TYPE_DPI; 207 - dssdev->display = true; 208 - dssdev->owner = THIS_MODULE; 209 - dssdev->of_ports = BIT(0); 210 - dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; 211 - 212 - /* 213 - * Note: According to the panel documentation: 214 - * DATA needs to be driven on the FALLING edge 215 - */ 216 - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH 217 - | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE 218 - | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; 219 - 220 - omapdss_display_init(dssdev); 221 - omapdss_device_register(dssdev); 222 - 223 - return 0; 224 - } 225 - 226 - static int __exit sharp_ls_remove(struct platform_device *pdev) 227 - { 228 - struct panel_drv_data *ddata = platform_get_drvdata(pdev); 229 - struct omap_dss_device *dssdev = &ddata->dssdev; 230 - 231 - omapdss_device_unregister(dssdev); 232 - 233 - if (omapdss_device_is_enabled(dssdev)) { 234 - sharp_ls_disable(dssdev); 235 - sharp_ls_post_disable(dssdev); 236 - } 237 - 238 - return 0; 239 - } 240 - 241 - static const struct of_device_id sharp_ls_of_match[] = { 242 - { .compatible = "omapdss,sharp,ls037v7dw01", }, 243 - {}, 244 - }; 245 - 246 - MODULE_DEVICE_TABLE(of, sharp_ls_of_match); 247 - 248 - static struct platform_driver sharp_ls_driver = { 249 - .probe = sharp_ls_probe, 250 - .remove = __exit_p(sharp_ls_remove), 251 - .driver = { 252 - .name = "panel-sharp-ls037v7dw01", 253 - .of_match_table = sharp_ls_of_match, 254 - .suppress_bind_attrs = true, 255 - }, 256 - }; 257 - 258 - module_platform_driver(sharp_ls_driver); 259 - 260 - MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); 261 - MODULE_DESCRIPTION("Sharp LS037V7DW01 Panel Driver"); 262 - MODULE_LICENSE("GPL");
-755
drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Sony ACX565AKM LCD Panel driver 4 - * 5 - * Copyright (C) 2010 Nokia Corporation 6 - * 7 - * Original Driver Author: Imre Deak <imre.deak@nokia.com> 8 - * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@ti.com> 9 - * Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com> 10 - */ 11 - 12 - #include <linux/backlight.h> 13 - #include <linux/delay.h> 14 - #include <linux/gpio/consumer.h> 15 - #include <linux/jiffies.h> 16 - #include <linux/kernel.h> 17 - #include <linux/module.h> 18 - #include <linux/platform_device.h> 19 - #include <linux/sched.h> 20 - #include <linux/spi/spi.h> 21 - 22 - #include "../dss/omapdss.h" 23 - 24 - #define MIPID_CMD_READ_DISP_ID 0x04 25 - #define MIPID_CMD_READ_RED 0x06 26 - #define MIPID_CMD_READ_GREEN 0x07 27 - #define MIPID_CMD_READ_BLUE 0x08 28 - #define MIPID_CMD_READ_DISP_STATUS 0x09 29 - #define MIPID_CMD_RDDSDR 0x0F 30 - #define MIPID_CMD_SLEEP_IN 0x10 31 - #define MIPID_CMD_SLEEP_OUT 0x11 32 - #define MIPID_CMD_DISP_OFF 0x28 33 - #define MIPID_CMD_DISP_ON 0x29 34 - #define MIPID_CMD_WRITE_DISP_BRIGHTNESS 0x51 35 - #define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52 36 - #define MIPID_CMD_WRITE_CTRL_DISP 0x53 37 - 38 - #define CTRL_DISP_BRIGHTNESS_CTRL_ON (1 << 5) 39 - #define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON (1 << 4) 40 - #define CTRL_DISP_BACKLIGHT_ON (1 << 2) 41 - #define CTRL_DISP_AUTO_BRIGHTNESS_ON (1 << 1) 42 - 43 - #define MIPID_CMD_READ_CTRL_DISP 0x54 44 - #define MIPID_CMD_WRITE_CABC 0x55 45 - #define MIPID_CMD_READ_CABC 0x56 46 - 47 - #define MIPID_VER_LPH8923 3 48 - #define MIPID_VER_LS041Y3 4 49 - #define MIPID_VER_L4F00311 8 50 - #define MIPID_VER_ACX565AKM 9 51 - 52 - struct panel_drv_data { 53 - struct omap_dss_device dssdev; 54 - 55 - struct gpio_desc *reset_gpio; 56 - 57 - struct videomode vm; 58 - 59 - char *name; 60 - int enabled; 61 - int model; 62 - int revision; 63 - u8 display_id[3]; 64 - unsigned has_bc:1; 65 - unsigned has_cabc:1; 66 - unsigned cabc_mode; 67 - unsigned long hw_guard_end; /* next value of jiffies 68 - when we can issue the 69 - next sleep in/out command */ 70 - unsigned long hw_guard_wait; /* max guard time in jiffies */ 71 - 72 - struct spi_device *spi; 73 - struct mutex mutex; 74 - 75 - struct backlight_device *bl_dev; 76 - }; 77 - 78 - static const struct videomode acx565akm_panel_vm = { 79 - .hactive = 800, 80 - .vactive = 480, 81 - .pixelclock = 24000000, 82 - .hfront_porch = 28, 83 - .hsync_len = 4, 84 - .hback_porch = 24, 85 - .vfront_porch = 3, 86 - .vsync_len = 3, 87 - .vback_porch = 4, 88 - 89 - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, 90 - }; 91 - 92 - #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) 93 - 94 - static void acx565akm_transfer(struct panel_drv_data *ddata, int cmd, 95 - const u8 *wbuf, int wlen, u8 *rbuf, int rlen) 96 - { 97 - struct spi_message m; 98 - struct spi_transfer *x, xfer[5]; 99 - int r; 100 - 101 - BUG_ON(ddata->spi == NULL); 102 - 103 - spi_message_init(&m); 104 - 105 - memset(xfer, 0, sizeof(xfer)); 106 - x = &xfer[0]; 107 - 108 - cmd &= 0xff; 109 - x->tx_buf = &cmd; 110 - x->bits_per_word = 9; 111 - x->len = 2; 112 - 113 - if (rlen > 1 && wlen == 0) { 114 - /* 115 - * Between the command and the response data there is a 116 - * dummy clock cycle. Add an extra bit after the command 117 - * word to account for this. 118 - */ 119 - x->bits_per_word = 10; 120 - cmd <<= 1; 121 - } 122 - spi_message_add_tail(x, &m); 123 - 124 - if (wlen) { 125 - x++; 126 - x->tx_buf = wbuf; 127 - x->len = wlen; 128 - x->bits_per_word = 9; 129 - spi_message_add_tail(x, &m); 130 - } 131 - 132 - if (rlen) { 133 - x++; 134 - x->rx_buf = rbuf; 135 - x->len = rlen; 136 - spi_message_add_tail(x, &m); 137 - } 138 - 139 - r = spi_sync(ddata->spi, &m); 140 - if (r < 0) 141 - dev_dbg(&ddata->spi->dev, "spi_sync %d\n", r); 142 - } 143 - 144 - static inline void acx565akm_cmd(struct panel_drv_data *ddata, int cmd) 145 - { 146 - acx565akm_transfer(ddata, cmd, NULL, 0, NULL, 0); 147 - } 148 - 149 - static inline void acx565akm_write(struct panel_drv_data *ddata, 150 - int reg, const u8 *buf, int len) 151 - { 152 - acx565akm_transfer(ddata, reg, buf, len, NULL, 0); 153 - } 154 - 155 - static inline void acx565akm_read(struct panel_drv_data *ddata, 156 - int reg, u8 *buf, int len) 157 - { 158 - acx565akm_transfer(ddata, reg, NULL, 0, buf, len); 159 - } 160 - 161 - static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec) 162 - { 163 - ddata->hw_guard_wait = msecs_to_jiffies(guard_msec); 164 - ddata->hw_guard_end = jiffies + ddata->hw_guard_wait; 165 - } 166 - 167 - static void hw_guard_wait(struct panel_drv_data *ddata) 168 - { 169 - unsigned long wait = ddata->hw_guard_end - jiffies; 170 - 171 - if ((long)wait > 0 && wait <= ddata->hw_guard_wait) { 172 - set_current_state(TASK_UNINTERRUPTIBLE); 173 - schedule_timeout(wait); 174 - } 175 - } 176 - 177 - static void set_sleep_mode(struct panel_drv_data *ddata, int on) 178 - { 179 - int cmd; 180 - 181 - if (on) 182 - cmd = MIPID_CMD_SLEEP_IN; 183 - else 184 - cmd = MIPID_CMD_SLEEP_OUT; 185 - /* 186 - * We have to keep 120msec between sleep in/out commands. 187 - * (8.2.15, 8.2.16). 188 - */ 189 - hw_guard_wait(ddata); 190 - acx565akm_cmd(ddata, cmd); 191 - hw_guard_start(ddata, 120); 192 - } 193 - 194 - static void set_display_state(struct panel_drv_data *ddata, int enabled) 195 - { 196 - int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF; 197 - 198 - acx565akm_cmd(ddata, cmd); 199 - } 200 - 201 - static int panel_enabled(struct panel_drv_data *ddata) 202 - { 203 - __be32 v; 204 - u32 disp_status; 205 - int enabled; 206 - 207 - acx565akm_read(ddata, MIPID_CMD_READ_DISP_STATUS, (u8 *)&v, 4); 208 - disp_status = __be32_to_cpu(v); 209 - enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10)); 210 - dev_dbg(&ddata->spi->dev, 211 - "LCD panel %senabled by bootloader (status 0x%04x)\n", 212 - enabled ? "" : "not ", disp_status); 213 - return enabled; 214 - } 215 - 216 - static int panel_detect(struct panel_drv_data *ddata) 217 - { 218 - acx565akm_read(ddata, MIPID_CMD_READ_DISP_ID, ddata->display_id, 3); 219 - dev_dbg(&ddata->spi->dev, "MIPI display ID: %02x%02x%02x\n", 220 - ddata->display_id[0], 221 - ddata->display_id[1], 222 - ddata->display_id[2]); 223 - 224 - switch (ddata->display_id[0]) { 225 - case 0x10: 226 - ddata->model = MIPID_VER_ACX565AKM; 227 - ddata->name = "acx565akm"; 228 - ddata->has_bc = 1; 229 - ddata->has_cabc = 1; 230 - break; 231 - case 0x29: 232 - ddata->model = MIPID_VER_L4F00311; 233 - ddata->name = "l4f00311"; 234 - break; 235 - case 0x45: 236 - ddata->model = MIPID_VER_LPH8923; 237 - ddata->name = "lph8923"; 238 - break; 239 - case 0x83: 240 - ddata->model = MIPID_VER_LS041Y3; 241 - ddata->name = "ls041y3"; 242 - break; 243 - default: 244 - ddata->name = "unknown"; 245 - dev_err(&ddata->spi->dev, "invalid display ID\n"); 246 - return -ENODEV; 247 - } 248 - 249 - ddata->revision = ddata->display_id[1]; 250 - 251 - dev_info(&ddata->spi->dev, "omapfb: %s rev %02x LCD detected\n", 252 - ddata->name, ddata->revision); 253 - 254 - return 0; 255 - } 256 - 257 - /*----------------------Backlight Control-------------------------*/ 258 - 259 - static void enable_backlight_ctrl(struct panel_drv_data *ddata, int enable) 260 - { 261 - u16 ctrl; 262 - 263 - acx565akm_read(ddata, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1); 264 - if (enable) { 265 - ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON | 266 - CTRL_DISP_BACKLIGHT_ON; 267 - } else { 268 - ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON | 269 - CTRL_DISP_BACKLIGHT_ON); 270 - } 271 - 272 - ctrl |= 1 << 8; 273 - acx565akm_write(ddata, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2); 274 - } 275 - 276 - static void set_cabc_mode(struct panel_drv_data *ddata, unsigned int mode) 277 - { 278 - u16 cabc_ctrl; 279 - 280 - ddata->cabc_mode = mode; 281 - if (!ddata->enabled) 282 - return; 283 - cabc_ctrl = 0; 284 - acx565akm_read(ddata, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1); 285 - cabc_ctrl &= ~3; 286 - cabc_ctrl |= (1 << 8) | (mode & 3); 287 - acx565akm_write(ddata, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2); 288 - } 289 - 290 - static unsigned int get_cabc_mode(struct panel_drv_data *ddata) 291 - { 292 - return ddata->cabc_mode; 293 - } 294 - 295 - static unsigned int get_hw_cabc_mode(struct panel_drv_data *ddata) 296 - { 297 - u8 cabc_ctrl; 298 - 299 - acx565akm_read(ddata, MIPID_CMD_READ_CABC, &cabc_ctrl, 1); 300 - return cabc_ctrl & 3; 301 - } 302 - 303 - static void acx565akm_set_brightness(struct panel_drv_data *ddata, int level) 304 - { 305 - int bv; 306 - 307 - bv = level | (1 << 8); 308 - acx565akm_write(ddata, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2); 309 - 310 - if (level) 311 - enable_backlight_ctrl(ddata, 1); 312 - else 313 - enable_backlight_ctrl(ddata, 0); 314 - } 315 - 316 - static int acx565akm_get_actual_brightness(struct panel_drv_data *ddata) 317 - { 318 - u8 bv; 319 - 320 - acx565akm_read(ddata, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1); 321 - 322 - return bv; 323 - } 324 - 325 - 326 - static int acx565akm_bl_update_status(struct backlight_device *dev) 327 - { 328 - struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); 329 - int level; 330 - 331 - dev_dbg(&ddata->spi->dev, "%s\n", __func__); 332 - 333 - if (dev->props.fb_blank == FB_BLANK_UNBLANK && 334 - dev->props.power == FB_BLANK_UNBLANK) 335 - level = dev->props.brightness; 336 - else 337 - level = 0; 338 - 339 - if (ddata->has_bc) 340 - acx565akm_set_brightness(ddata, level); 341 - else 342 - return -ENODEV; 343 - 344 - return 0; 345 - } 346 - 347 - static int acx565akm_bl_get_intensity(struct backlight_device *dev) 348 - { 349 - struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); 350 - 351 - dev_dbg(&dev->dev, "%s\n", __func__); 352 - 353 - if (!ddata->has_bc) 354 - return -ENODEV; 355 - 356 - if (dev->props.fb_blank == FB_BLANK_UNBLANK && 357 - dev->props.power == FB_BLANK_UNBLANK) { 358 - if (ddata->has_bc) 359 - return acx565akm_get_actual_brightness(ddata); 360 - else 361 - return dev->props.brightness; 362 - } 363 - 364 - return 0; 365 - } 366 - 367 - static int acx565akm_bl_update_status_locked(struct backlight_device *dev) 368 - { 369 - struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); 370 - int r; 371 - 372 - mutex_lock(&ddata->mutex); 373 - r = acx565akm_bl_update_status(dev); 374 - mutex_unlock(&ddata->mutex); 375 - 376 - return r; 377 - } 378 - 379 - static int acx565akm_bl_get_intensity_locked(struct backlight_device *dev) 380 - { 381 - struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); 382 - int r; 383 - 384 - mutex_lock(&ddata->mutex); 385 - r = acx565akm_bl_get_intensity(dev); 386 - mutex_unlock(&ddata->mutex); 387 - 388 - return r; 389 - } 390 - 391 - static const struct backlight_ops acx565akm_bl_ops = { 392 - .get_brightness = acx565akm_bl_get_intensity_locked, 393 - .update_status = acx565akm_bl_update_status_locked, 394 - }; 395 - 396 - /*--------------------Auto Brightness control via Sysfs---------------------*/ 397 - 398 - static const char * const cabc_modes[] = { 399 - "off", /* always used when CABC is not supported */ 400 - "ui", 401 - "still-image", 402 - "moving-image", 403 - }; 404 - 405 - static ssize_t show_cabc_mode(struct device *dev, 406 - struct device_attribute *attr, 407 - char *buf) 408 - { 409 - struct panel_drv_data *ddata = dev_get_drvdata(dev); 410 - const char *mode_str; 411 - int mode; 412 - int len; 413 - 414 - if (!ddata->has_cabc) 415 - mode = 0; 416 - else 417 - mode = get_cabc_mode(ddata); 418 - mode_str = "unknown"; 419 - if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes)) 420 - mode_str = cabc_modes[mode]; 421 - len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str); 422 - 423 - return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1; 424 - } 425 - 426 - static ssize_t store_cabc_mode(struct device *dev, 427 - struct device_attribute *attr, 428 - const char *buf, size_t count) 429 - { 430 - struct panel_drv_data *ddata = dev_get_drvdata(dev); 431 - int i; 432 - 433 - for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) { 434 - const char *mode_str = cabc_modes[i]; 435 - int cmp_len = strlen(mode_str); 436 - 437 - if (count > 0 && buf[count - 1] == '\n') 438 - count--; 439 - if (count != cmp_len) 440 - continue; 441 - 442 - if (strncmp(buf, mode_str, cmp_len) == 0) 443 - break; 444 - } 445 - 446 - if (i == ARRAY_SIZE(cabc_modes)) 447 - return -EINVAL; 448 - 449 - if (!ddata->has_cabc && i != 0) 450 - return -EINVAL; 451 - 452 - mutex_lock(&ddata->mutex); 453 - set_cabc_mode(ddata, i); 454 - mutex_unlock(&ddata->mutex); 455 - 456 - return count; 457 - } 458 - 459 - static ssize_t show_cabc_available_modes(struct device *dev, 460 - struct device_attribute *attr, 461 - char *buf) 462 - { 463 - struct panel_drv_data *ddata = dev_get_drvdata(dev); 464 - int len; 465 - int i; 466 - 467 - if (!ddata->has_cabc) 468 - return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]); 469 - 470 - for (i = 0, len = 0; 471 - len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++) 472 - len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s", 473 - i ? " " : "", cabc_modes[i], 474 - i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : ""); 475 - 476 - return len < PAGE_SIZE ? len : PAGE_SIZE - 1; 477 - } 478 - 479 - static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR, 480 - show_cabc_mode, store_cabc_mode); 481 - static DEVICE_ATTR(cabc_available_modes, S_IRUGO, 482 - show_cabc_available_modes, NULL); 483 - 484 - static struct attribute *bldev_attrs[] = { 485 - &dev_attr_cabc_mode.attr, 486 - &dev_attr_cabc_available_modes.attr, 487 - NULL, 488 - }; 489 - 490 - static const struct attribute_group bldev_attr_group = { 491 - .attrs = bldev_attrs, 492 - }; 493 - 494 - static int acx565akm_connect(struct omap_dss_device *src, 495 - struct omap_dss_device *dst) 496 - { 497 - return 0; 498 - } 499 - 500 - static void acx565akm_disconnect(struct omap_dss_device *src, 501 - struct omap_dss_device *dst) 502 - { 503 - } 504 - 505 - static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) 506 - { 507 - struct panel_drv_data *ddata = to_panel_data(dssdev); 508 - 509 - dev_dbg(&ddata->spi->dev, "%s\n", __func__); 510 - 511 - /*FIXME tweak me */ 512 - msleep(50); 513 - 514 - if (ddata->reset_gpio) 515 - gpiod_set_value(ddata->reset_gpio, 1); 516 - 517 - if (ddata->enabled) { 518 - dev_dbg(&ddata->spi->dev, "panel already enabled\n"); 519 - return 0; 520 - } 521 - 522 - /* 523 - * We have to meet all the following delay requirements: 524 - * 1. tRW: reset pulse width 10usec (7.12.1) 525 - * 2. tRT: reset cancel time 5msec (7.12.1) 526 - * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst 527 - * case (7.6.2) 528 - * 4. 120msec before the sleep out command (7.12.1) 529 - */ 530 - msleep(120); 531 - 532 - set_sleep_mode(ddata, 0); 533 - ddata->enabled = 1; 534 - 535 - /* 5msec between sleep out and the next command. (8.2.16) */ 536 - usleep_range(5000, 10000); 537 - set_display_state(ddata, 1); 538 - set_cabc_mode(ddata, ddata->cabc_mode); 539 - 540 - return acx565akm_bl_update_status(ddata->bl_dev); 541 - } 542 - 543 - static void acx565akm_panel_power_off(struct omap_dss_device *dssdev) 544 - { 545 - struct panel_drv_data *ddata = to_panel_data(dssdev); 546 - 547 - dev_dbg(dssdev->dev, "%s\n", __func__); 548 - 549 - if (!ddata->enabled) 550 - return; 551 - 552 - set_display_state(ddata, 0); 553 - set_sleep_mode(ddata, 1); 554 - ddata->enabled = 0; 555 - /* 556 - * We have to provide PCLK,HS,VS signals for 2 frames (worst case 557 - * ~50msec) after sending the sleep in command and asserting the 558 - * reset signal. We probably could assert the reset w/o the delay 559 - * but we still delay to avoid possible artifacts. (7.6.1) 560 - */ 561 - msleep(50); 562 - 563 - if (ddata->reset_gpio) 564 - gpiod_set_value(ddata->reset_gpio, 0); 565 - 566 - /* FIXME need to tweak this delay */ 567 - msleep(100); 568 - } 569 - 570 - static void acx565akm_enable(struct omap_dss_device *dssdev) 571 - { 572 - struct panel_drv_data *ddata = to_panel_data(dssdev); 573 - 574 - mutex_lock(&ddata->mutex); 575 - acx565akm_panel_power_on(dssdev); 576 - mutex_unlock(&ddata->mutex); 577 - } 578 - 579 - static void acx565akm_disable(struct omap_dss_device *dssdev) 580 - { 581 - struct panel_drv_data *ddata = to_panel_data(dssdev); 582 - 583 - mutex_lock(&ddata->mutex); 584 - acx565akm_panel_power_off(dssdev); 585 - mutex_unlock(&ddata->mutex); 586 - } 587 - 588 - static int acx565akm_get_modes(struct omap_dss_device *dssdev, 589 - struct drm_connector *connector) 590 - { 591 - struct panel_drv_data *ddata = to_panel_data(dssdev); 592 - 593 - return omapdss_display_get_modes(connector, &ddata->vm); 594 - } 595 - 596 - static const struct omap_dss_device_ops acx565akm_ops = { 597 - .connect = acx565akm_connect, 598 - .disconnect = acx565akm_disconnect, 599 - 600 - .enable = acx565akm_enable, 601 - .disable = acx565akm_disable, 602 - 603 - .get_modes = acx565akm_get_modes, 604 - }; 605 - 606 - static int acx565akm_probe(struct spi_device *spi) 607 - { 608 - struct panel_drv_data *ddata; 609 - struct omap_dss_device *dssdev; 610 - struct backlight_device *bldev; 611 - int max_brightness, brightness; 612 - struct backlight_properties props; 613 - struct gpio_desc *gpio; 614 - int r; 615 - 616 - dev_dbg(&spi->dev, "%s\n", __func__); 617 - 618 - spi->mode = SPI_MODE_3; 619 - 620 - ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); 621 - if (ddata == NULL) 622 - return -ENOMEM; 623 - 624 - dev_set_drvdata(&spi->dev, ddata); 625 - 626 - ddata->spi = spi; 627 - 628 - mutex_init(&ddata->mutex); 629 - 630 - gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW); 631 - if (IS_ERR(gpio)) { 632 - dev_err(&spi->dev, "failed to parse reset gpio\n"); 633 - return PTR_ERR(gpio); 634 - } 635 - 636 - ddata->reset_gpio = gpio; 637 - 638 - if (ddata->reset_gpio) 639 - gpiod_set_value(ddata->reset_gpio, 1); 640 - 641 - /* 642 - * After reset we have to wait 5 msec before the first 643 - * command can be sent. 644 - */ 645 - usleep_range(5000, 10000); 646 - 647 - ddata->enabled = panel_enabled(ddata); 648 - 649 - r = panel_detect(ddata); 650 - 651 - if (!ddata->enabled && ddata->reset_gpio) 652 - gpiod_set_value(ddata->reset_gpio, 0); 653 - 654 - if (r) { 655 - dev_err(&spi->dev, "%s panel detect error\n", __func__); 656 - return r; 657 - } 658 - 659 - memset(&props, 0, sizeof(props)); 660 - props.fb_blank = FB_BLANK_UNBLANK; 661 - props.power = FB_BLANK_UNBLANK; 662 - props.type = BACKLIGHT_RAW; 663 - 664 - bldev = backlight_device_register("acx565akm", &ddata->spi->dev, 665 - ddata, &acx565akm_bl_ops, &props); 666 - if (IS_ERR(bldev)) 667 - return PTR_ERR(bldev); 668 - ddata->bl_dev = bldev; 669 - if (ddata->has_cabc) { 670 - r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group); 671 - if (r) { 672 - dev_err(&bldev->dev, 673 - "%s failed to create sysfs files\n", __func__); 674 - goto err_backlight_unregister; 675 - } 676 - ddata->cabc_mode = get_hw_cabc_mode(ddata); 677 - } 678 - 679 - max_brightness = 255; 680 - 681 - if (ddata->has_bc) 682 - brightness = acx565akm_get_actual_brightness(ddata); 683 - else 684 - brightness = 0; 685 - 686 - bldev->props.max_brightness = max_brightness; 687 - bldev->props.brightness = brightness; 688 - 689 - acx565akm_bl_update_status(bldev); 690 - 691 - 692 - ddata->vm = acx565akm_panel_vm; 693 - 694 - dssdev = &ddata->dssdev; 695 - dssdev->dev = &spi->dev; 696 - dssdev->ops = &acx565akm_ops; 697 - dssdev->type = OMAP_DISPLAY_TYPE_SDI; 698 - dssdev->display = true; 699 - dssdev->owner = THIS_MODULE; 700 - dssdev->of_ports = BIT(0); 701 - dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; 702 - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH 703 - | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE 704 - | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; 705 - 706 - omapdss_display_init(dssdev); 707 - omapdss_device_register(dssdev); 708 - 709 - return 0; 710 - 711 - err_backlight_unregister: 712 - backlight_device_unregister(bldev); 713 - return r; 714 - } 715 - 716 - static int acx565akm_remove(struct spi_device *spi) 717 - { 718 - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); 719 - struct omap_dss_device *dssdev = &ddata->dssdev; 720 - 721 - dev_dbg(&ddata->spi->dev, "%s\n", __func__); 722 - 723 - sysfs_remove_group(&ddata->bl_dev->dev.kobj, &bldev_attr_group); 724 - backlight_device_unregister(ddata->bl_dev); 725 - 726 - omapdss_device_unregister(dssdev); 727 - 728 - if (omapdss_device_is_enabled(dssdev)) 729 - acx565akm_disable(dssdev); 730 - 731 - return 0; 732 - } 733 - 734 - static const struct of_device_id acx565akm_of_match[] = { 735 - { .compatible = "omapdss,sony,acx565akm", }, 736 - {}, 737 - }; 738 - MODULE_DEVICE_TABLE(of, acx565akm_of_match); 739 - 740 - static struct spi_driver acx565akm_driver = { 741 - .driver = { 742 - .name = "acx565akm", 743 - .of_match_table = acx565akm_of_match, 744 - .suppress_bind_attrs = true, 745 - }, 746 - .probe = acx565akm_probe, 747 - .remove = acx565akm_remove, 748 - }; 749 - 750 - module_spi_driver(acx565akm_driver); 751 - 752 - MODULE_ALIAS("spi:sony,acx565akm"); 753 - MODULE_AUTHOR("Nokia Corporation"); 754 - MODULE_DESCRIPTION("acx565akm LCD Driver"); 755 - MODULE_LICENSE("GPL");
-390
drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Toppoly TD028TTEC1 panel support 4 - * 5 - * Copyright (C) 2008 Nokia Corporation 6 - * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> 7 - * 8 - * Neo 1973 code (jbt6k74.c): 9 - * Copyright (C) 2006-2007 by OpenMoko, Inc. 10 - * Author: Harald Welte <laforge@openmoko.org> 11 - * 12 - * Ported and adapted from Neo 1973 U-Boot by: 13 - * H. Nikolaus Schaller <hns@goldelico.com> 14 - */ 15 - 16 - #include <linux/module.h> 17 - #include <linux/delay.h> 18 - #include <linux/spi/spi.h> 19 - 20 - #include "../dss/omapdss.h" 21 - 22 - struct panel_drv_data { 23 - struct omap_dss_device dssdev; 24 - 25 - struct videomode vm; 26 - 27 - struct backlight_device *backlight; 28 - 29 - struct spi_device *spi_dev; 30 - }; 31 - 32 - static const struct videomode td028ttec1_panel_vm = { 33 - .hactive = 480, 34 - .vactive = 640, 35 - .pixelclock = 22153000, 36 - .hfront_porch = 24, 37 - .hsync_len = 8, 38 - .hback_porch = 8, 39 - .vfront_porch = 4, 40 - .vsync_len = 2, 41 - .vback_porch = 2, 42 - 43 - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, 44 - }; 45 - 46 - #define JBT_COMMAND 0x000 47 - #define JBT_DATA 0x100 48 - 49 - static int jbt_ret_write_0(struct panel_drv_data *ddata, u8 reg) 50 - { 51 - int rc; 52 - u16 tx_buf = JBT_COMMAND | reg; 53 - 54 - rc = spi_write(ddata->spi_dev, (u8 *)&tx_buf, 55 - 1*sizeof(u16)); 56 - if (rc != 0) 57 - dev_err(&ddata->spi_dev->dev, 58 - "jbt_ret_write_0 spi_write ret %d\n", rc); 59 - 60 - return rc; 61 - } 62 - 63 - static int jbt_reg_write_1(struct panel_drv_data *ddata, u8 reg, u8 data) 64 - { 65 - int rc; 66 - u16 tx_buf[2]; 67 - 68 - tx_buf[0] = JBT_COMMAND | reg; 69 - tx_buf[1] = JBT_DATA | data; 70 - rc = spi_write(ddata->spi_dev, (u8 *)tx_buf, 71 - 2*sizeof(u16)); 72 - if (rc != 0) 73 - dev_err(&ddata->spi_dev->dev, 74 - "jbt_reg_write_1 spi_write ret %d\n", rc); 75 - 76 - return rc; 77 - } 78 - 79 - static int jbt_reg_write_2(struct panel_drv_data *ddata, u8 reg, u16 data) 80 - { 81 - int rc; 82 - u16 tx_buf[3]; 83 - 84 - tx_buf[0] = JBT_COMMAND | reg; 85 - tx_buf[1] = JBT_DATA | (data >> 8); 86 - tx_buf[2] = JBT_DATA | (data & 0xff); 87 - 88 - rc = spi_write(ddata->spi_dev, (u8 *)tx_buf, 89 - 3*sizeof(u16)); 90 - 91 - if (rc != 0) 92 - dev_err(&ddata->spi_dev->dev, 93 - "jbt_reg_write_2 spi_write ret %d\n", rc); 94 - 95 - return rc; 96 - } 97 - 98 - enum jbt_register { 99 - JBT_REG_SLEEP_IN = 0x10, 100 - JBT_REG_SLEEP_OUT = 0x11, 101 - 102 - JBT_REG_DISPLAY_OFF = 0x28, 103 - JBT_REG_DISPLAY_ON = 0x29, 104 - 105 - JBT_REG_RGB_FORMAT = 0x3a, 106 - JBT_REG_QUAD_RATE = 0x3b, 107 - 108 - JBT_REG_POWER_ON_OFF = 0xb0, 109 - JBT_REG_BOOSTER_OP = 0xb1, 110 - JBT_REG_BOOSTER_MODE = 0xb2, 111 - JBT_REG_BOOSTER_FREQ = 0xb3, 112 - JBT_REG_OPAMP_SYSCLK = 0xb4, 113 - JBT_REG_VSC_VOLTAGE = 0xb5, 114 - JBT_REG_VCOM_VOLTAGE = 0xb6, 115 - JBT_REG_EXT_DISPL = 0xb7, 116 - JBT_REG_OUTPUT_CONTROL = 0xb8, 117 - JBT_REG_DCCLK_DCEV = 0xb9, 118 - JBT_REG_DISPLAY_MODE1 = 0xba, 119 - JBT_REG_DISPLAY_MODE2 = 0xbb, 120 - JBT_REG_DISPLAY_MODE = 0xbc, 121 - JBT_REG_ASW_SLEW = 0xbd, 122 - JBT_REG_DUMMY_DISPLAY = 0xbe, 123 - JBT_REG_DRIVE_SYSTEM = 0xbf, 124 - 125 - JBT_REG_SLEEP_OUT_FR_A = 0xc0, 126 - JBT_REG_SLEEP_OUT_FR_B = 0xc1, 127 - JBT_REG_SLEEP_OUT_FR_C = 0xc2, 128 - JBT_REG_SLEEP_IN_LCCNT_D = 0xc3, 129 - JBT_REG_SLEEP_IN_LCCNT_E = 0xc4, 130 - JBT_REG_SLEEP_IN_LCCNT_F = 0xc5, 131 - JBT_REG_SLEEP_IN_LCCNT_G = 0xc6, 132 - 133 - JBT_REG_GAMMA1_FINE_1 = 0xc7, 134 - JBT_REG_GAMMA1_FINE_2 = 0xc8, 135 - JBT_REG_GAMMA1_INCLINATION = 0xc9, 136 - JBT_REG_GAMMA1_BLUE_OFFSET = 0xca, 137 - 138 - JBT_REG_BLANK_CONTROL = 0xcf, 139 - JBT_REG_BLANK_TH_TV = 0xd0, 140 - JBT_REG_CKV_ON_OFF = 0xd1, 141 - JBT_REG_CKV_1_2 = 0xd2, 142 - JBT_REG_OEV_TIMING = 0xd3, 143 - JBT_REG_ASW_TIMING_1 = 0xd4, 144 - JBT_REG_ASW_TIMING_2 = 0xd5, 145 - 146 - JBT_REG_HCLOCK_VGA = 0xec, 147 - JBT_REG_HCLOCK_QVGA = 0xed, 148 - }; 149 - 150 - #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) 151 - 152 - static int td028ttec1_panel_connect(struct omap_dss_device *src, 153 - struct omap_dss_device *dst) 154 - { 155 - return 0; 156 - } 157 - 158 - static void td028ttec1_panel_disconnect(struct omap_dss_device *src, 159 - struct omap_dss_device *dst) 160 - { 161 - } 162 - 163 - static void td028ttec1_panel_enable(struct omap_dss_device *dssdev) 164 - { 165 - struct panel_drv_data *ddata = to_panel_data(dssdev); 166 - int r = 0; 167 - 168 - dev_dbg(dssdev->dev, "%s: state %d\n", __func__, dssdev->state); 169 - 170 - /* three times command zero */ 171 - r |= jbt_ret_write_0(ddata, 0x00); 172 - usleep_range(1000, 2000); 173 - r |= jbt_ret_write_0(ddata, 0x00); 174 - usleep_range(1000, 2000); 175 - r |= jbt_ret_write_0(ddata, 0x00); 176 - usleep_range(1000, 2000); 177 - 178 - if (r) { 179 - dev_warn(dssdev->dev, "%s: transfer error\n", __func__); 180 - return; 181 - } 182 - 183 - /* deep standby out */ 184 - r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x17); 185 - 186 - /* RGB I/F on, RAM write off, QVGA through, SIGCON enable */ 187 - r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE, 0x80); 188 - 189 - /* Quad mode off */ 190 - r |= jbt_reg_write_1(ddata, JBT_REG_QUAD_RATE, 0x00); 191 - 192 - /* AVDD on, XVDD on */ 193 - r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x16); 194 - 195 - /* Output control */ 196 - r |= jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0xfff9); 197 - 198 - /* Sleep mode off */ 199 - r |= jbt_ret_write_0(ddata, JBT_REG_SLEEP_OUT); 200 - 201 - /* at this point we have like 50% grey */ 202 - 203 - /* initialize register set */ 204 - r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE1, 0x01); 205 - r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE2, 0x00); 206 - r |= jbt_reg_write_1(ddata, JBT_REG_RGB_FORMAT, 0x60); 207 - r |= jbt_reg_write_1(ddata, JBT_REG_DRIVE_SYSTEM, 0x10); 208 - r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_OP, 0x56); 209 - r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_MODE, 0x33); 210 - r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11); 211 - r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11); 212 - r |= jbt_reg_write_1(ddata, JBT_REG_OPAMP_SYSCLK, 0x02); 213 - r |= jbt_reg_write_1(ddata, JBT_REG_VSC_VOLTAGE, 0x2b); 214 - r |= jbt_reg_write_1(ddata, JBT_REG_VCOM_VOLTAGE, 0x40); 215 - r |= jbt_reg_write_1(ddata, JBT_REG_EXT_DISPL, 0x03); 216 - r |= jbt_reg_write_1(ddata, JBT_REG_DCCLK_DCEV, 0x04); 217 - /* 218 - * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement 219 - * to avoid red / blue flicker 220 - */ 221 - r |= jbt_reg_write_1(ddata, JBT_REG_ASW_SLEW, 0x04); 222 - r |= jbt_reg_write_1(ddata, JBT_REG_DUMMY_DISPLAY, 0x00); 223 - 224 - r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_A, 0x11); 225 - r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_B, 0x11); 226 - r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_C, 0x11); 227 - r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040); 228 - r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0); 229 - r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020); 230 - r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0); 231 - 232 - r |= jbt_reg_write_2(ddata, JBT_REG_GAMMA1_FINE_1, 0x5533); 233 - r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_FINE_2, 0x00); 234 - r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_INCLINATION, 0x00); 235 - r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00); 236 - 237 - r |= jbt_reg_write_2(ddata, JBT_REG_HCLOCK_VGA, 0x1f0); 238 - r |= jbt_reg_write_1(ddata, JBT_REG_BLANK_CONTROL, 0x02); 239 - r |= jbt_reg_write_2(ddata, JBT_REG_BLANK_TH_TV, 0x0804); 240 - 241 - r |= jbt_reg_write_1(ddata, JBT_REG_CKV_ON_OFF, 0x01); 242 - r |= jbt_reg_write_2(ddata, JBT_REG_CKV_1_2, 0x0000); 243 - 244 - r |= jbt_reg_write_2(ddata, JBT_REG_OEV_TIMING, 0x0d0e); 245 - r |= jbt_reg_write_2(ddata, JBT_REG_ASW_TIMING_1, 0x11a4); 246 - r |= jbt_reg_write_1(ddata, JBT_REG_ASW_TIMING_2, 0x0e); 247 - 248 - r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON); 249 - 250 - if (r) 251 - dev_err(dssdev->dev, "%s: write error\n", __func__); 252 - 253 - backlight_enable(ddata->backlight); 254 - } 255 - 256 - static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) 257 - { 258 - struct panel_drv_data *ddata = to_panel_data(dssdev); 259 - 260 - backlight_disable(ddata->backlight); 261 - 262 - dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n"); 263 - 264 - jbt_ret_write_0(ddata, JBT_REG_DISPLAY_OFF); 265 - jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002); 266 - jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN); 267 - jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00); 268 - } 269 - 270 - static int td028ttec1_panel_get_modes(struct omap_dss_device *dssdev, 271 - struct drm_connector *connector) 272 - { 273 - struct panel_drv_data *ddata = to_panel_data(dssdev); 274 - 275 - return omapdss_display_get_modes(connector, &ddata->vm); 276 - } 277 - 278 - static const struct omap_dss_device_ops td028ttec1_ops = { 279 - .connect = td028ttec1_panel_connect, 280 - .disconnect = td028ttec1_panel_disconnect, 281 - 282 - .enable = td028ttec1_panel_enable, 283 - .disable = td028ttec1_panel_disable, 284 - 285 - .get_modes = td028ttec1_panel_get_modes, 286 - }; 287 - 288 - static int td028ttec1_panel_probe(struct spi_device *spi) 289 - { 290 - struct panel_drv_data *ddata; 291 - struct omap_dss_device *dssdev; 292 - int r; 293 - 294 - dev_dbg(&spi->dev, "%s\n", __func__); 295 - 296 - spi->bits_per_word = 9; 297 - spi->mode = SPI_MODE_3; 298 - 299 - r = spi_setup(spi); 300 - if (r < 0) { 301 - dev_err(&spi->dev, "spi_setup failed: %d\n", r); 302 - return r; 303 - } 304 - 305 - ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); 306 - if (ddata == NULL) 307 - return -ENOMEM; 308 - 309 - ddata->backlight = devm_of_find_backlight(&spi->dev); 310 - if (IS_ERR(ddata->backlight)) 311 - return PTR_ERR(ddata->backlight); 312 - 313 - dev_set_drvdata(&spi->dev, ddata); 314 - 315 - ddata->spi_dev = spi; 316 - 317 - ddata->vm = td028ttec1_panel_vm; 318 - 319 - dssdev = &ddata->dssdev; 320 - dssdev->dev = &spi->dev; 321 - dssdev->ops = &td028ttec1_ops; 322 - dssdev->type = OMAP_DISPLAY_TYPE_DPI; 323 - dssdev->display = true; 324 - dssdev->owner = THIS_MODULE; 325 - dssdev->of_ports = BIT(0); 326 - dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; 327 - 328 - /* 329 - * Note: According to the panel documentation: 330 - * SYNC needs to be driven on the FALLING edge 331 - */ 332 - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH 333 - | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE 334 - | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; 335 - 336 - omapdss_display_init(dssdev); 337 - omapdss_device_register(dssdev); 338 - 339 - return 0; 340 - } 341 - 342 - static int td028ttec1_panel_remove(struct spi_device *spi) 343 - { 344 - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); 345 - struct omap_dss_device *dssdev = &ddata->dssdev; 346 - 347 - dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__); 348 - 349 - omapdss_device_unregister(dssdev); 350 - 351 - td028ttec1_panel_disable(dssdev); 352 - 353 - return 0; 354 - } 355 - 356 - static const struct of_device_id td028ttec1_of_match[] = { 357 - { .compatible = "omapdss,tpo,td028ttec1", }, 358 - /* keep to not break older DTB */ 359 - { .compatible = "omapdss,toppoly,td028ttec1", }, 360 - {}, 361 - }; 362 - 363 - MODULE_DEVICE_TABLE(of, td028ttec1_of_match); 364 - 365 - static const struct spi_device_id td028ttec1_ids[] = { 366 - { "toppoly,td028ttec1", 0 }, 367 - { "tpo,td028ttec1", 0}, 368 - { /* sentinel */ } 369 - }; 370 - 371 - MODULE_DEVICE_TABLE(spi, td028ttec1_ids); 372 - 373 - 374 - static struct spi_driver td028ttec1_spi_driver = { 375 - .probe = td028ttec1_panel_probe, 376 - .remove = td028ttec1_panel_remove, 377 - .id_table = td028ttec1_ids, 378 - 379 - .driver = { 380 - .name = "panel-tpo-td028ttec1", 381 - .of_match_table = td028ttec1_of_match, 382 - .suppress_bind_attrs = true, 383 - }, 384 - }; 385 - 386 - module_spi_driver(td028ttec1_spi_driver); 387 - 388 - MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>"); 389 - MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver"); 390 - MODULE_LICENSE("GPL");
-513
drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - /* 3 - * TPO TD043MTEA1 Panel driver 4 - * 5 - * Author: Gražvydas Ignotas <notasas@gmail.com> 6 - * Converted to new DSS device model: Tomi Valkeinen <tomi.valkeinen@ti.com> 7 - */ 8 - 9 - #include <linux/delay.h> 10 - #include <linux/err.h> 11 - #include <linux/gpio/consumer.h> 12 - #include <linux/module.h> 13 - #include <linux/regulator/consumer.h> 14 - #include <linux/slab.h> 15 - #include <linux/spi/spi.h> 16 - 17 - #include "../dss/omapdss.h" 18 - 19 - #define TPO_R02_MODE(x) ((x) & 7) 20 - #define TPO_R02_MODE_800x480 7 21 - #define TPO_R02_NCLK_RISING BIT(3) 22 - #define TPO_R02_HSYNC_HIGH BIT(4) 23 - #define TPO_R02_VSYNC_HIGH BIT(5) 24 - 25 - #define TPO_R03_NSTANDBY BIT(0) 26 - #define TPO_R03_EN_CP_CLK BIT(1) 27 - #define TPO_R03_EN_VGL_PUMP BIT(2) 28 - #define TPO_R03_EN_PWM BIT(3) 29 - #define TPO_R03_DRIVING_CAP_100 BIT(4) 30 - #define TPO_R03_EN_PRE_CHARGE BIT(6) 31 - #define TPO_R03_SOFTWARE_CTL BIT(7) 32 - 33 - #define TPO_R04_NFLIP_H BIT(0) 34 - #define TPO_R04_NFLIP_V BIT(1) 35 - #define TPO_R04_CP_CLK_FREQ_1H BIT(2) 36 - #define TPO_R04_VGL_FREQ_1H BIT(4) 37 - 38 - #define TPO_R03_VAL_NORMAL (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | \ 39 - TPO_R03_EN_VGL_PUMP | TPO_R03_EN_PWM | \ 40 - TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \ 41 - TPO_R03_SOFTWARE_CTL) 42 - 43 - #define TPO_R03_VAL_STANDBY (TPO_R03_DRIVING_CAP_100 | \ 44 - TPO_R03_EN_PRE_CHARGE | TPO_R03_SOFTWARE_CTL) 45 - 46 - static const u16 tpo_td043_def_gamma[12] = { 47 - 105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023 48 - }; 49 - 50 - struct panel_drv_data { 51 - struct omap_dss_device dssdev; 52 - 53 - struct videomode vm; 54 - 55 - struct spi_device *spi; 56 - struct regulator *vcc_reg; 57 - struct gpio_desc *reset_gpio; 58 - u16 gamma[12]; 59 - u32 mode; 60 - u32 vmirror:1; 61 - u32 powered_on:1; 62 - u32 spi_suspended:1; 63 - u32 power_on_resume:1; 64 - }; 65 - 66 - static const struct videomode tpo_td043_vm = { 67 - .hactive = 800, 68 - .vactive = 480, 69 - 70 - .pixelclock = 36000000, 71 - 72 - .hsync_len = 1, 73 - .hfront_porch = 68, 74 - .hback_porch = 214, 75 - 76 - .vsync_len = 1, 77 - .vfront_porch = 39, 78 - .vback_porch = 34, 79 - 80 - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, 81 - }; 82 - 83 - #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) 84 - 85 - static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data) 86 - { 87 - struct spi_message m; 88 - struct spi_transfer xfer; 89 - u16 w; 90 - int r; 91 - 92 - spi_message_init(&m); 93 - 94 - memset(&xfer, 0, sizeof(xfer)); 95 - 96 - w = ((u16)addr << 10) | (1 << 8) | data; 97 - xfer.tx_buf = &w; 98 - xfer.bits_per_word = 16; 99 - xfer.len = 2; 100 - spi_message_add_tail(&xfer, &m); 101 - 102 - r = spi_sync(spi, &m); 103 - if (r < 0) 104 - dev_warn(&spi->dev, "failed to write to LCD reg (%d)\n", r); 105 - return r; 106 - } 107 - 108 - static void tpo_td043_write_gamma(struct spi_device *spi, u16 gamma[12]) 109 - { 110 - u8 i, val; 111 - 112 - /* gamma bits [9:8] */ 113 - for (val = i = 0; i < 4; i++) 114 - val |= (gamma[i] & 0x300) >> ((i + 1) * 2); 115 - tpo_td043_write(spi, 0x11, val); 116 - 117 - for (val = i = 0; i < 4; i++) 118 - val |= (gamma[i+4] & 0x300) >> ((i + 1) * 2); 119 - tpo_td043_write(spi, 0x12, val); 120 - 121 - for (val = i = 0; i < 4; i++) 122 - val |= (gamma[i+8] & 0x300) >> ((i + 1) * 2); 123 - tpo_td043_write(spi, 0x13, val); 124 - 125 - /* gamma bits [7:0] */ 126 - for (val = i = 0; i < 12; i++) 127 - tpo_td043_write(spi, 0x14 + i, gamma[i] & 0xff); 128 - } 129 - 130 - static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v) 131 - { 132 - u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V | 133 - TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H; 134 - if (h) 135 - reg4 &= ~TPO_R04_NFLIP_H; 136 - if (v) 137 - reg4 &= ~TPO_R04_NFLIP_V; 138 - 139 - return tpo_td043_write(spi, 4, reg4); 140 - } 141 - 142 - static ssize_t tpo_td043_vmirror_show(struct device *dev, 143 - struct device_attribute *attr, char *buf) 144 - { 145 - struct panel_drv_data *ddata = dev_get_drvdata(dev); 146 - 147 - return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror); 148 - } 149 - 150 - static ssize_t tpo_td043_vmirror_store(struct device *dev, 151 - struct device_attribute *attr, const char *buf, size_t count) 152 - { 153 - struct panel_drv_data *ddata = dev_get_drvdata(dev); 154 - int val; 155 - int ret; 156 - 157 - ret = kstrtoint(buf, 0, &val); 158 - if (ret < 0) 159 - return ret; 160 - 161 - val = !!val; 162 - 163 - ret = tpo_td043_write_mirror(ddata->spi, false, val); 164 - if (ret < 0) 165 - return ret; 166 - 167 - ddata->vmirror = val; 168 - 169 - return count; 170 - } 171 - 172 - static ssize_t tpo_td043_mode_show(struct device *dev, 173 - struct device_attribute *attr, char *buf) 174 - { 175 - struct panel_drv_data *ddata = dev_get_drvdata(dev); 176 - 177 - return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode); 178 - } 179 - 180 - static ssize_t tpo_td043_mode_store(struct device *dev, 181 - struct device_attribute *attr, const char *buf, size_t count) 182 - { 183 - struct panel_drv_data *ddata = dev_get_drvdata(dev); 184 - long val; 185 - int ret; 186 - 187 - ret = kstrtol(buf, 0, &val); 188 - if (ret != 0 || val & ~7) 189 - return -EINVAL; 190 - 191 - ddata->mode = val; 192 - 193 - val |= TPO_R02_NCLK_RISING; 194 - tpo_td043_write(ddata->spi, 2, val); 195 - 196 - return count; 197 - } 198 - 199 - static ssize_t tpo_td043_gamma_show(struct device *dev, 200 - struct device_attribute *attr, char *buf) 201 - { 202 - struct panel_drv_data *ddata = dev_get_drvdata(dev); 203 - ssize_t len = 0; 204 - int ret; 205 - int i; 206 - 207 - for (i = 0; i < ARRAY_SIZE(ddata->gamma); i++) { 208 - ret = snprintf(buf + len, PAGE_SIZE - len, "%u ", 209 - ddata->gamma[i]); 210 - if (ret < 0) 211 - return ret; 212 - len += ret; 213 - } 214 - buf[len - 1] = '\n'; 215 - 216 - return len; 217 - } 218 - 219 - static ssize_t tpo_td043_gamma_store(struct device *dev, 220 - struct device_attribute *attr, const char *buf, size_t count) 221 - { 222 - struct panel_drv_data *ddata = dev_get_drvdata(dev); 223 - unsigned int g[12]; 224 - int ret; 225 - int i; 226 - 227 - ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u", 228 - &g[0], &g[1], &g[2], &g[3], &g[4], &g[5], 229 - &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]); 230 - 231 - if (ret != 12) 232 - return -EINVAL; 233 - 234 - for (i = 0; i < 12; i++) 235 - ddata->gamma[i] = g[i]; 236 - 237 - tpo_td043_write_gamma(ddata->spi, ddata->gamma); 238 - 239 - return count; 240 - } 241 - 242 - static DEVICE_ATTR(vmirror, S_IRUGO | S_IWUSR, 243 - tpo_td043_vmirror_show, tpo_td043_vmirror_store); 244 - static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, 245 - tpo_td043_mode_show, tpo_td043_mode_store); 246 - static DEVICE_ATTR(gamma, S_IRUGO | S_IWUSR, 247 - tpo_td043_gamma_show, tpo_td043_gamma_store); 248 - 249 - static struct attribute *tpo_td043_attrs[] = { 250 - &dev_attr_vmirror.attr, 251 - &dev_attr_mode.attr, 252 - &dev_attr_gamma.attr, 253 - NULL, 254 - }; 255 - 256 - static const struct attribute_group tpo_td043_attr_group = { 257 - .attrs = tpo_td043_attrs, 258 - }; 259 - 260 - static int tpo_td043_power_on(struct panel_drv_data *ddata) 261 - { 262 - int r; 263 - 264 - if (ddata->powered_on) 265 - return 0; 266 - 267 - r = regulator_enable(ddata->vcc_reg); 268 - if (r != 0) 269 - return r; 270 - 271 - /* wait for panel to stabilize */ 272 - msleep(160); 273 - 274 - gpiod_set_value(ddata->reset_gpio, 0); 275 - 276 - tpo_td043_write(ddata->spi, 2, 277 - TPO_R02_MODE(ddata->mode) | TPO_R02_NCLK_RISING); 278 - tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_NORMAL); 279 - tpo_td043_write(ddata->spi, 0x20, 0xf0); 280 - tpo_td043_write(ddata->spi, 0x21, 0xf0); 281 - tpo_td043_write_mirror(ddata->spi, false, ddata->vmirror); 282 - tpo_td043_write_gamma(ddata->spi, ddata->gamma); 283 - 284 - ddata->powered_on = 1; 285 - return 0; 286 - } 287 - 288 - static void tpo_td043_power_off(struct panel_drv_data *ddata) 289 - { 290 - if (!ddata->powered_on) 291 - return; 292 - 293 - tpo_td043_write(ddata->spi, 3, 294 - TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM); 295 - 296 - gpiod_set_value(ddata->reset_gpio, 1); 297 - 298 - /* wait for at least 2 vsyncs before cutting off power */ 299 - msleep(50); 300 - 301 - tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_STANDBY); 302 - 303 - regulator_disable(ddata->vcc_reg); 304 - 305 - ddata->powered_on = 0; 306 - } 307 - 308 - static int tpo_td043_connect(struct omap_dss_device *src, 309 - struct omap_dss_device *dst) 310 - { 311 - return 0; 312 - } 313 - 314 - static void tpo_td043_disconnect(struct omap_dss_device *src, 315 - struct omap_dss_device *dst) 316 - { 317 - } 318 - 319 - static void tpo_td043_enable(struct omap_dss_device *dssdev) 320 - { 321 - struct panel_drv_data *ddata = to_panel_data(dssdev); 322 - int r; 323 - 324 - /* 325 - * If we are resuming from system suspend, SPI clocks might not be 326 - * enabled yet, so we'll program the LCD from SPI PM resume callback. 327 - */ 328 - if (!ddata->spi_suspended) { 329 - r = tpo_td043_power_on(ddata); 330 - if (r) { 331 - dev_err(&ddata->spi->dev, "%s: power on failed (%d)\n", 332 - __func__, r); 333 - return; 334 - } 335 - } 336 - } 337 - 338 - static void tpo_td043_disable(struct omap_dss_device *dssdev) 339 - { 340 - struct panel_drv_data *ddata = to_panel_data(dssdev); 341 - 342 - if (!ddata->spi_suspended) 343 - tpo_td043_power_off(ddata); 344 - } 345 - 346 - static int tpo_td043_get_modes(struct omap_dss_device *dssdev, 347 - struct drm_connector *connector) 348 - { 349 - struct panel_drv_data *ddata = to_panel_data(dssdev); 350 - 351 - return omapdss_display_get_modes(connector, &ddata->vm); 352 - } 353 - 354 - static const struct omap_dss_device_ops tpo_td043_ops = { 355 - .connect = tpo_td043_connect, 356 - .disconnect = tpo_td043_disconnect, 357 - 358 - .enable = tpo_td043_enable, 359 - .disable = tpo_td043_disable, 360 - 361 - .get_modes = tpo_td043_get_modes, 362 - }; 363 - 364 - static int tpo_td043_probe(struct spi_device *spi) 365 - { 366 - struct panel_drv_data *ddata; 367 - struct omap_dss_device *dssdev; 368 - struct gpio_desc *gpio; 369 - int r; 370 - 371 - dev_dbg(&spi->dev, "%s\n", __func__); 372 - 373 - spi->bits_per_word = 16; 374 - spi->mode = SPI_MODE_0; 375 - 376 - r = spi_setup(spi); 377 - if (r < 0) { 378 - dev_err(&spi->dev, "spi_setup failed: %d\n", r); 379 - return r; 380 - } 381 - 382 - ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); 383 - if (ddata == NULL) 384 - return -ENOMEM; 385 - 386 - dev_set_drvdata(&spi->dev, ddata); 387 - 388 - ddata->spi = spi; 389 - 390 - ddata->mode = TPO_R02_MODE_800x480; 391 - memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma)); 392 - 393 - ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc"); 394 - if (IS_ERR(ddata->vcc_reg)) { 395 - dev_err(&spi->dev, "failed to get LCD VCC regulator\n"); 396 - return PTR_ERR(ddata->vcc_reg); 397 - } 398 - 399 - gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH); 400 - if (IS_ERR(gpio)) { 401 - dev_err(&spi->dev, "failed to get reset gpio\n"); 402 - return PTR_ERR(gpio); 403 - } 404 - 405 - ddata->reset_gpio = gpio; 406 - 407 - r = sysfs_create_group(&spi->dev.kobj, &tpo_td043_attr_group); 408 - if (r) { 409 - dev_err(&spi->dev, "failed to create sysfs files\n"); 410 - return r; 411 - } 412 - 413 - ddata->vm = tpo_td043_vm; 414 - 415 - dssdev = &ddata->dssdev; 416 - dssdev->dev = &spi->dev; 417 - dssdev->ops = &tpo_td043_ops; 418 - dssdev->type = OMAP_DISPLAY_TYPE_DPI; 419 - dssdev->display = true; 420 - dssdev->owner = THIS_MODULE; 421 - dssdev->of_ports = BIT(0); 422 - dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; 423 - 424 - /* 425 - * Note: According to the panel documentation: 426 - * SYNC needs to be driven on the FALLING edge 427 - */ 428 - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH 429 - | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE 430 - | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; 431 - 432 - omapdss_display_init(dssdev); 433 - omapdss_device_register(dssdev); 434 - 435 - return 0; 436 - } 437 - 438 - static int tpo_td043_remove(struct spi_device *spi) 439 - { 440 - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); 441 - struct omap_dss_device *dssdev = &ddata->dssdev; 442 - 443 - dev_dbg(&ddata->spi->dev, "%s\n", __func__); 444 - 445 - omapdss_device_unregister(dssdev); 446 - 447 - if (omapdss_device_is_enabled(dssdev)) 448 - tpo_td043_disable(dssdev); 449 - 450 - sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group); 451 - 452 - return 0; 453 - } 454 - 455 - #ifdef CONFIG_PM_SLEEP 456 - static int tpo_td043_spi_suspend(struct device *dev) 457 - { 458 - struct panel_drv_data *ddata = dev_get_drvdata(dev); 459 - 460 - dev_dbg(dev, "tpo_td043_spi_suspend, tpo %p\n", ddata); 461 - 462 - ddata->power_on_resume = ddata->powered_on; 463 - tpo_td043_power_off(ddata); 464 - ddata->spi_suspended = 1; 465 - 466 - return 0; 467 - } 468 - 469 - static int tpo_td043_spi_resume(struct device *dev) 470 - { 471 - struct panel_drv_data *ddata = dev_get_drvdata(dev); 472 - int ret; 473 - 474 - dev_dbg(dev, "tpo_td043_spi_resume\n"); 475 - 476 - if (ddata->power_on_resume) { 477 - ret = tpo_td043_power_on(ddata); 478 - if (ret) 479 - return ret; 480 - } 481 - ddata->spi_suspended = 0; 482 - 483 - return 0; 484 - } 485 - #endif 486 - 487 - static SIMPLE_DEV_PM_OPS(tpo_td043_spi_pm, 488 - tpo_td043_spi_suspend, tpo_td043_spi_resume); 489 - 490 - static const struct of_device_id tpo_td043_of_match[] = { 491 - { .compatible = "omapdss,tpo,td043mtea1", }, 492 - {}, 493 - }; 494 - 495 - MODULE_DEVICE_TABLE(of, tpo_td043_of_match); 496 - 497 - static struct spi_driver tpo_td043_spi_driver = { 498 - .driver = { 499 - .name = "panel-tpo-td043mtea1", 500 - .pm = &tpo_td043_spi_pm, 501 - .of_match_table = tpo_td043_of_match, 502 - .suppress_bind_attrs = true, 503 - }, 504 - .probe = tpo_td043_probe, 505 - .remove = tpo_td043_remove, 506 - }; 507 - 508 - module_spi_driver(tpo_td043_spi_driver); 509 - 510 - MODULE_ALIAS("spi:tpo,td043mtea1"); 511 - MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>"); 512 - MODULE_DESCRIPTION("TPO TD043MTEA1 LCD Driver"); 513 - MODULE_LICENSE("GPL");
-7
drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
··· 176 176 static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = { 177 177 { .compatible = "composite-video-connector" }, 178 178 { .compatible = "hdmi-connector" }, 179 - { .compatible = "lgphilips,lb035q02" }, 180 - { .compatible = "nec,nl8048hl11" }, 181 179 { .compatible = "panel-dsi-cm" }, 182 - { .compatible = "sharp,ls037v7dw01" }, 183 - { .compatible = "sony,acx565akm" }, 184 180 { .compatible = "svideo-connector" }, 185 181 { .compatible = "ti,opa362" }, 186 182 { .compatible = "ti,tpd12s015" }, 187 - { .compatible = "toppoly,td028ttec1" }, 188 - { .compatible = "tpo,td028ttec1" }, 189 - { .compatible = "tpo,td043mtea1" }, 190 183 {}, 191 184 }; 192 185
+46
drivers/gpu/drm/panel/Kconfig
··· 103 103 depends on OF && SPI 104 104 select VIDEOMODE_HELPERS 105 105 106 + config DRM_PANEL_LG_LB035Q02 107 + tristate "LG LB035Q024573 RGB panel" 108 + depends on GPIOLIB && OF && SPI 109 + help 110 + Say Y here if you want to enable support for the LB035Q02 RGB panel 111 + (found on the Gumstix Overo Palo35 board). To compile this driver as 112 + a module, choose M here. 113 + 106 114 config DRM_PANEL_LG_LG4573 107 115 tristate "LG4573 RGB/SPI panel" 108 116 depends on OF && SPI ··· 118 110 help 119 111 Say Y here if you want to enable support for LG4573 RGB panel. 120 112 To compile this driver as a module, choose M here. 113 + 114 + config DRM_PANEL_NEC_NL8048HL11 115 + tristate "NEC NL8048HL11 RGB panel" 116 + depends on GPIOLIB && OF && SPI 117 + help 118 + Say Y here if you want to enable support for the NEC NL8048HL11 RGB 119 + panel (found on the Zoom2/3/3630 SDP boards). To compile this driver 120 + as a module, choose M here. 121 121 122 122 config DRM_PANEL_NOVATEK_NT39016 123 123 tristate "Novatek NT39016 RGB/SPI panel" ··· 282 266 To compile this driver as a module, choose M here: the module 283 267 will be called panel-sharp-lq101r1sx01. 284 268 269 + config DRM_PANEL_SHARP_LS037V7DW01 270 + tristate "Sharp LS037V7DW01 VGA LCD panel" 271 + depends on GPIOLIB && OF && REGULATOR 272 + help 273 + Say Y here if you want to enable support for Sharp LS037V7DW01 VGA 274 + (480x640) LCD panel (found on the TI SDP3430 board). 275 + 285 276 config DRM_PANEL_SHARP_LS043T1LE01 286 277 tristate "Sharp LS043T1LE01 qHD video mode panel" 287 278 depends on OF ··· 315 292 help 316 293 Say Y here if you want to enable support for the Sitronix 317 294 ST7789V controller for 240x320 LCD panels 295 + 296 + config DRM_PANEL_SONY_ACX565AKM 297 + tristate "Sony ACX565AKM panel" 298 + depends on GPIOLIB && OF && SPI 299 + depends on BACKLIGHT_CLASS_DEVICE 300 + help 301 + Say Y here if you want to enable support for the Sony ACX565AKM 302 + 800x600 3.5" panel (found on the Nokia N900). 303 + 304 + config DRM_PANEL_TPO_TD028TTEC1 305 + tristate "Toppoly (TPO) TD028TTEC1 panel driver" 306 + depends on OF && SPI 307 + depends on BACKLIGHT_CLASS_DEVICE 308 + help 309 + Say Y here if you want to enable support for TPO TD028TTEC1 480x640 310 + 2.8" panel (found on the OpenMoko Neo FreeRunner and Neo 1973). 311 + 312 + config DRM_PANEL_TPO_TD043MTEA1 313 + tristate "Toppoly (TPO) TD043MTEA1 panel driver" 314 + depends on GPIOLIB && OF && REGULATOR && SPI 315 + help 316 + Say Y here if you want to enable support for TPO TD043MTEA1 800x480 317 + 4.3" panel (found on the OMAP3 Pandora board). 318 318 319 319 config DRM_PANEL_TPO_TPG110 320 320 tristate "TPO TPG 800x400 panel"
+6
drivers/gpu/drm/panel/Makefile
··· 8 8 obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o 9 9 obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o 10 10 obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o 11 + obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o 11 12 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o 13 + obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o 12 14 obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o 13 15 obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o 14 16 obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o ··· 29 27 obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o 30 28 obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o 31 29 obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o 30 + obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o 32 31 obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o 33 32 obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o 34 33 obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o 34 + obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o 35 + obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o 36 + obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o 35 37 obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o 36 38 obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+16 -18
drivers/gpu/drm/panel/panel-ilitek-ili9322.c
··· 349 349 350 350 static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili) 351 351 { 352 - struct drm_connector *connector = panel->connector; 353 352 u8 reg; 354 353 int ret; 355 354 int i; ··· 406 407 * Polarity and inverted color order for RGB input. 407 408 * None of this applies in the BT.656 mode. 408 409 */ 409 - if (ili->conf->dclk_active_high) { 410 + reg = 0; 411 + if (ili->conf->dclk_active_high) 410 412 reg = ILI9322_POL_DCLK; 411 - connector->display_info.bus_flags |= 412 - DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; 413 - } else { 414 - reg = 0; 415 - connector->display_info.bus_flags |= 416 - DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; 417 - } 418 - if (ili->conf->de_active_high) { 413 + if (ili->conf->de_active_high) 419 414 reg |= ILI9322_POL_DE; 420 - connector->display_info.bus_flags |= 421 - DRM_BUS_FLAG_DE_HIGH; 422 - } else { 423 - connector->display_info.bus_flags |= 424 - DRM_BUS_FLAG_DE_LOW; 425 - } 426 415 if (ili->conf->hsync_active_high) 427 416 reg |= ILI9322_POL_HSYNC; 428 417 if (ili->conf->vsync_active_high) ··· 646 659 struct drm_connector *connector = panel->connector; 647 660 struct ili9322 *ili = panel_to_ili9322(panel); 648 661 struct drm_display_mode *mode; 662 + struct drm_display_info *info; 649 663 650 - connector->display_info.width_mm = ili->conf->width_mm; 651 - connector->display_info.height_mm = ili->conf->height_mm; 664 + info = &connector->display_info; 665 + info->width_mm = ili->conf->width_mm; 666 + info->height_mm = ili->conf->height_mm; 667 + if (ili->conf->dclk_active_high) 668 + info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; 669 + else 670 + info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; 671 + 672 + if (ili->conf->de_active_high) 673 + info->bus_flags |= DRM_BUS_FLAG_DE_HIGH; 674 + else 675 + info->bus_flags |= DRM_BUS_FLAG_DE_LOW; 652 676 653 677 switch (ili->input) { 654 678 case ILI9322_INPUT_SRGB_DUMMY_320X240:
+237
drivers/gpu/drm/panel/panel-lg-lb035q02.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * LG.Philips LB035Q02 LCD Panel Driver 4 + * 5 + * Copyright (C) 2019 Texas Instruments Incorporated 6 + * 7 + * Based on the omapdrm-specific panel-lgphilips-lb035q02 driver 8 + * 9 + * Copyright (C) 2013 Texas Instruments Incorporated 10 + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> 11 + * 12 + * Based on a driver by: Steve Sakoman <steve@sakoman.com> 13 + */ 14 + 15 + #include <linux/gpio/consumer.h> 16 + #include <linux/module.h> 17 + #include <linux/spi/spi.h> 18 + 19 + #include <drm/drm_connector.h> 20 + #include <drm/drm_modes.h> 21 + #include <drm/drm_panel.h> 22 + 23 + struct lb035q02_device { 24 + struct drm_panel panel; 25 + 26 + struct spi_device *spi; 27 + struct gpio_desc *enable_gpio; 28 + }; 29 + 30 + #define to_lb035q02_device(p) container_of(p, struct lb035q02_device, panel) 31 + 32 + static int lb035q02_write(struct lb035q02_device *lcd, u16 reg, u16 val) 33 + { 34 + struct spi_message msg; 35 + struct spi_transfer index_xfer = { 36 + .len = 3, 37 + .cs_change = 1, 38 + }; 39 + struct spi_transfer value_xfer = { 40 + .len = 3, 41 + }; 42 + u8 buffer[16]; 43 + 44 + spi_message_init(&msg); 45 + 46 + /* register index */ 47 + buffer[0] = 0x70; 48 + buffer[1] = 0x00; 49 + buffer[2] = reg & 0x7f; 50 + index_xfer.tx_buf = buffer; 51 + spi_message_add_tail(&index_xfer, &msg); 52 + 53 + /* register value */ 54 + buffer[4] = 0x72; 55 + buffer[5] = val >> 8; 56 + buffer[6] = val; 57 + value_xfer.tx_buf = buffer + 4; 58 + spi_message_add_tail(&value_xfer, &msg); 59 + 60 + return spi_sync(lcd->spi, &msg); 61 + } 62 + 63 + static int lb035q02_init(struct lb035q02_device *lcd) 64 + { 65 + /* Init sequence from page 28 of the lb035q02 spec. */ 66 + static const struct { 67 + u16 index; 68 + u16 value; 69 + } init_data[] = { 70 + { 0x01, 0x6300 }, 71 + { 0x02, 0x0200 }, 72 + { 0x03, 0x0177 }, 73 + { 0x04, 0x04c7 }, 74 + { 0x05, 0xffc0 }, 75 + { 0x06, 0xe806 }, 76 + { 0x0a, 0x4008 }, 77 + { 0x0b, 0x0000 }, 78 + { 0x0d, 0x0030 }, 79 + { 0x0e, 0x2800 }, 80 + { 0x0f, 0x0000 }, 81 + { 0x16, 0x9f80 }, 82 + { 0x17, 0x0a0f }, 83 + { 0x1e, 0x00c1 }, 84 + { 0x30, 0x0300 }, 85 + { 0x31, 0x0007 }, 86 + { 0x32, 0x0000 }, 87 + { 0x33, 0x0000 }, 88 + { 0x34, 0x0707 }, 89 + { 0x35, 0x0004 }, 90 + { 0x36, 0x0302 }, 91 + { 0x37, 0x0202 }, 92 + { 0x3a, 0x0a0d }, 93 + { 0x3b, 0x0806 }, 94 + }; 95 + 96 + unsigned int i; 97 + int ret; 98 + 99 + for (i = 0; i < ARRAY_SIZE(init_data); ++i) { 100 + ret = lb035q02_write(lcd, init_data[i].index, 101 + init_data[i].value); 102 + if (ret < 0) 103 + return ret; 104 + } 105 + 106 + return 0; 107 + } 108 + 109 + static int lb035q02_disable(struct drm_panel *panel) 110 + { 111 + struct lb035q02_device *lcd = to_lb035q02_device(panel); 112 + 113 + gpiod_set_value_cansleep(lcd->enable_gpio, 0); 114 + 115 + return 0; 116 + } 117 + 118 + static int lb035q02_enable(struct drm_panel *panel) 119 + { 120 + struct lb035q02_device *lcd = to_lb035q02_device(panel); 121 + 122 + gpiod_set_value_cansleep(lcd->enable_gpio, 1); 123 + 124 + return 0; 125 + } 126 + 127 + static const struct drm_display_mode lb035q02_mode = { 128 + .clock = 6500, 129 + .hdisplay = 320, 130 + .hsync_start = 320 + 20, 131 + .hsync_end = 320 + 20 + 2, 132 + .htotal = 320 + 20 + 2 + 68, 133 + .vdisplay = 240, 134 + .vsync_start = 240 + 4, 135 + .vsync_end = 240 + 4 + 2, 136 + .vtotal = 240 + 4 + 2 + 18, 137 + .vrefresh = 60, 138 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 139 + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 140 + .width_mm = 70, 141 + .height_mm = 53, 142 + }; 143 + 144 + static int lb035q02_get_modes(struct drm_panel *panel) 145 + { 146 + struct drm_connector *connector = panel->connector; 147 + struct drm_display_mode *mode; 148 + 149 + mode = drm_mode_duplicate(panel->drm, &lb035q02_mode); 150 + if (!mode) 151 + return -ENOMEM; 152 + 153 + drm_mode_set_name(mode); 154 + drm_mode_probed_add(connector, mode); 155 + 156 + connector->display_info.width_mm = lb035q02_mode.width_mm; 157 + connector->display_info.height_mm = lb035q02_mode.height_mm; 158 + /* 159 + * FIXME: According to the datasheet pixel data is sampled on the 160 + * rising edge of the clock, but the code running on the Gumstix Overo 161 + * Palo35 indicates sampling on the negative edge. This should be 162 + * tested on a real device. 163 + */ 164 + connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH 165 + | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE 166 + | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE; 167 + 168 + return 1; 169 + } 170 + 171 + static const struct drm_panel_funcs lb035q02_funcs = { 172 + .disable = lb035q02_disable, 173 + .enable = lb035q02_enable, 174 + .get_modes = lb035q02_get_modes, 175 + }; 176 + 177 + static int lb035q02_probe(struct spi_device *spi) 178 + { 179 + struct lb035q02_device *lcd; 180 + int ret; 181 + 182 + lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL); 183 + if (!lcd) 184 + return -ENOMEM; 185 + 186 + spi_set_drvdata(spi, lcd); 187 + lcd->spi = spi; 188 + 189 + lcd->enable_gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW); 190 + if (IS_ERR(lcd->enable_gpio)) { 191 + dev_err(&spi->dev, "failed to parse enable gpio\n"); 192 + return PTR_ERR(lcd->enable_gpio); 193 + } 194 + 195 + ret = lb035q02_init(lcd); 196 + if (ret < 0) 197 + return ret; 198 + 199 + drm_panel_init(&lcd->panel); 200 + lcd->panel.dev = &lcd->spi->dev; 201 + lcd->panel.funcs = &lb035q02_funcs; 202 + 203 + return drm_panel_add(&lcd->panel); 204 + } 205 + 206 + static int lb035q02_remove(struct spi_device *spi) 207 + { 208 + struct lb035q02_device *lcd = spi_get_drvdata(spi); 209 + 210 + drm_panel_remove(&lcd->panel); 211 + drm_panel_disable(&lcd->panel); 212 + 213 + return 0; 214 + } 215 + 216 + static const struct of_device_id lb035q02_of_match[] = { 217 + { .compatible = "lgphilips,lb035q02", }, 218 + { /* sentinel */ }, 219 + }; 220 + 221 + MODULE_DEVICE_TABLE(of, lb035q02_of_match); 222 + 223 + static struct spi_driver lb035q02_driver = { 224 + .probe = lb035q02_probe, 225 + .remove = lb035q02_remove, 226 + .driver = { 227 + .name = "panel-lg-lb035q02", 228 + .of_match_table = lb035q02_of_match, 229 + }, 230 + }; 231 + 232 + module_spi_driver(lb035q02_driver); 233 + 234 + MODULE_ALIAS("spi:lgphilips,lb035q02"); 235 + MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); 236 + MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver"); 237 + MODULE_LICENSE("GPL");
+248
drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * NEC NL8048HL11 Panel Driver 4 + * 5 + * Copyright (C) 2019 Texas Instruments Incorporated 6 + * 7 + * Based on the omapdrm-specific panel-nec-nl8048hl11 driver 8 + * 9 + * Copyright (C) 2010 Texas Instruments Incorporated 10 + * Author: Erik Gilling <konkers@android.com> 11 + */ 12 + 13 + #include <linux/delay.h> 14 + #include <linux/gpio/consumer.h> 15 + #include <linux/module.h> 16 + #include <linux/pm.h> 17 + #include <linux/spi/spi.h> 18 + 19 + #include <drm/drm_connector.h> 20 + #include <drm/drm_modes.h> 21 + #include <drm/drm_panel.h> 22 + 23 + struct nl8048_panel { 24 + struct drm_panel panel; 25 + 26 + struct spi_device *spi; 27 + struct gpio_desc *reset_gpio; 28 + }; 29 + 30 + #define to_nl8048_device(p) container_of(p, struct nl8048_panel, panel) 31 + 32 + static int nl8048_write(struct nl8048_panel *lcd, unsigned char addr, 33 + unsigned char value) 34 + { 35 + u8 data[4] = { value, 0x01, addr, 0x00 }; 36 + int ret; 37 + 38 + ret = spi_write(lcd->spi, data, sizeof(data)); 39 + if (ret) 40 + dev_err(&lcd->spi->dev, "SPI write to %u failed: %d\n", 41 + addr, ret); 42 + 43 + return ret; 44 + } 45 + 46 + static int nl8048_init(struct nl8048_panel *lcd) 47 + { 48 + static const struct { 49 + unsigned char addr; 50 + unsigned char data; 51 + } nl8048_init_seq[] = { 52 + { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 }, 53 + { 5, 0x14 }, { 6, 0x24 }, { 16, 0xd7 }, { 17, 0x00 }, 54 + { 18, 0x00 }, { 19, 0x55 }, { 20, 0x01 }, { 21, 0x70 }, 55 + { 22, 0x1e }, { 23, 0x25 }, { 24, 0x25 }, { 25, 0x02 }, 56 + { 26, 0x02 }, { 27, 0xa0 }, { 32, 0x2f }, { 33, 0x0f }, 57 + { 34, 0x0f }, { 35, 0x0f }, { 36, 0x0f }, { 37, 0x0f }, 58 + { 38, 0x0f }, { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 }, 59 + { 42, 0x02 }, { 43, 0x0f }, { 44, 0x0f }, { 45, 0x0f }, 60 + { 46, 0x0f }, { 47, 0x0f }, { 48, 0x0f }, { 49, 0x0f }, 61 + { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 }, 62 + { 80, 0x0c }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 }, 63 + { 86, 0x14 }, { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 }, 64 + { 92, 0x02 }, { 93, 0x0c }, { 94, 0x1c }, { 95, 0x27 }, 65 + { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 }, { 103, 0x27 }, 66 + { 112, 0x01 }, { 113, 0x0e }, { 114, 0x02 }, { 115, 0x0c }, 67 + { 118, 0x0c }, { 121, 0x30 }, { 130, 0x00 }, { 131, 0x00 }, 68 + { 132, 0xfc }, { 134, 0x00 }, { 136, 0x00 }, { 138, 0x00 }, 69 + { 139, 0x00 }, { 140, 0x00 }, { 141, 0xfc }, { 143, 0x00 }, 70 + { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 }, { 149, 0x00 }, 71 + { 150, 0xfc }, { 152, 0x00 }, { 154, 0x00 }, { 156, 0x00 }, 72 + { 157, 0x00 }, 73 + }; 74 + 75 + unsigned int i; 76 + int ret; 77 + 78 + for (i = 0; i < ARRAY_SIZE(nl8048_init_seq); ++i) { 79 + ret = nl8048_write(lcd, nl8048_init_seq[i].addr, 80 + nl8048_init_seq[i].data); 81 + if (ret < 0) 82 + return ret; 83 + } 84 + 85 + udelay(20); 86 + 87 + return nl8048_write(lcd, 2, 0x00); 88 + } 89 + 90 + static int nl8048_disable(struct drm_panel *panel) 91 + { 92 + struct nl8048_panel *lcd = to_nl8048_device(panel); 93 + 94 + gpiod_set_value_cansleep(lcd->reset_gpio, 0); 95 + 96 + return 0; 97 + } 98 + 99 + static int nl8048_enable(struct drm_panel *panel) 100 + { 101 + struct nl8048_panel *lcd = to_nl8048_device(panel); 102 + 103 + gpiod_set_value_cansleep(lcd->reset_gpio, 1); 104 + 105 + return 0; 106 + } 107 + 108 + static const struct drm_display_mode nl8048_mode = { 109 + /* NEC PIX Clock Ratings MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz */ 110 + .clock = 23800, 111 + .hdisplay = 800, 112 + .hsync_start = 800 + 6, 113 + .hsync_end = 800 + 6 + 1, 114 + .htotal = 800 + 6 + 1 + 4, 115 + .vdisplay = 480, 116 + .vsync_start = 480 + 3, 117 + .vsync_end = 480 + 3 + 1, 118 + .vtotal = 480 + 3 + 1 + 4, 119 + .vrefresh = 60, 120 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 121 + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 122 + .width_mm = 89, 123 + .height_mm = 53, 124 + }; 125 + 126 + static int nl8048_get_modes(struct drm_panel *panel) 127 + { 128 + struct drm_connector *connector = panel->connector; 129 + struct drm_display_mode *mode; 130 + 131 + mode = drm_mode_duplicate(panel->drm, &nl8048_mode); 132 + if (!mode) 133 + return -ENOMEM; 134 + 135 + drm_mode_set_name(mode); 136 + drm_mode_probed_add(connector, mode); 137 + 138 + connector->display_info.width_mm = nl8048_mode.width_mm; 139 + connector->display_info.height_mm = nl8048_mode.height_mm; 140 + connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH 141 + | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE 142 + | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE; 143 + 144 + return 1; 145 + } 146 + 147 + static const struct drm_panel_funcs nl8048_funcs = { 148 + .disable = nl8048_disable, 149 + .enable = nl8048_enable, 150 + .get_modes = nl8048_get_modes, 151 + }; 152 + 153 + static int __maybe_unused nl8048_suspend(struct device *dev) 154 + { 155 + struct nl8048_panel *lcd = dev_get_drvdata(dev); 156 + 157 + nl8048_write(lcd, 2, 0x01); 158 + msleep(40); 159 + 160 + return 0; 161 + } 162 + 163 + static int __maybe_unused nl8048_resume(struct device *dev) 164 + { 165 + struct nl8048_panel *lcd = dev_get_drvdata(dev); 166 + 167 + /* Reinitialize the panel. */ 168 + spi_setup(lcd->spi); 169 + nl8048_write(lcd, 2, 0x00); 170 + nl8048_init(lcd); 171 + 172 + return 0; 173 + } 174 + 175 + static SIMPLE_DEV_PM_OPS(nl8048_pm_ops, nl8048_suspend, nl8048_resume); 176 + 177 + static int nl8048_probe(struct spi_device *spi) 178 + { 179 + struct nl8048_panel *lcd; 180 + int ret; 181 + 182 + lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL); 183 + if (!lcd) 184 + return -ENOMEM; 185 + 186 + spi_set_drvdata(spi, lcd); 187 + lcd->spi = spi; 188 + 189 + lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW); 190 + if (IS_ERR(lcd->reset_gpio)) { 191 + dev_err(&spi->dev, "failed to parse reset gpio\n"); 192 + return PTR_ERR(lcd->reset_gpio); 193 + } 194 + 195 + spi->mode = SPI_MODE_0; 196 + spi->bits_per_word = 32; 197 + 198 + ret = spi_setup(spi); 199 + if (ret < 0) { 200 + dev_err(&spi->dev, "failed to setup SPI: %d\n", ret); 201 + return ret; 202 + } 203 + 204 + ret = nl8048_init(lcd); 205 + if (ret < 0) 206 + return ret; 207 + 208 + drm_panel_init(&lcd->panel); 209 + lcd->panel.dev = &lcd->spi->dev; 210 + lcd->panel.funcs = &nl8048_funcs; 211 + 212 + return drm_panel_add(&lcd->panel); 213 + } 214 + 215 + static int nl8048_remove(struct spi_device *spi) 216 + { 217 + struct nl8048_panel *lcd = spi_get_drvdata(spi); 218 + 219 + drm_panel_remove(&lcd->panel); 220 + drm_panel_disable(&lcd->panel); 221 + drm_panel_unprepare(&lcd->panel); 222 + 223 + return 0; 224 + } 225 + 226 + static const struct of_device_id nl8048_of_match[] = { 227 + { .compatible = "nec,nl8048hl11", }, 228 + { /* sentinel */ }, 229 + }; 230 + 231 + MODULE_DEVICE_TABLE(of, nl8048_of_match); 232 + 233 + static struct spi_driver nl8048_driver = { 234 + .probe = nl8048_probe, 235 + .remove = nl8048_remove, 236 + .driver = { 237 + .name = "panel-nec-nl8048hl11", 238 + .pm = &nl8048_pm_ops, 239 + .of_match_table = nl8048_of_match, 240 + }, 241 + }; 242 + 243 + module_spi_driver(nl8048_driver); 244 + 245 + MODULE_ALIAS("spi:nec,nl8048hl11"); 246 + MODULE_AUTHOR("Erik Gilling <konkers@android.com>"); 247 + MODULE_DESCRIPTION("NEC-NL8048HL11 Driver"); 248 + MODULE_LICENSE("GPL");
+226
drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Sharp LS037V7DW01 LCD Panel Driver 4 + * 5 + * Copyright (C) 2019 Texas Instruments Incorporated 6 + * 7 + * Based on the omapdrm-specific panel-sharp-ls037v7dw01 driver 8 + * 9 + * Copyright (C) 2013 Texas Instruments Incorporated 10 + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> 11 + */ 12 + 13 + #include <linux/delay.h> 14 + #include <linux/gpio/consumer.h> 15 + #include <linux/module.h> 16 + #include <linux/of.h> 17 + #include <linux/platform_device.h> 18 + #include <linux/regulator/consumer.h> 19 + 20 + #include <drm/drm_connector.h> 21 + #include <drm/drm_modes.h> 22 + #include <drm/drm_panel.h> 23 + 24 + struct ls037v7dw01_panel { 25 + struct drm_panel panel; 26 + struct platform_device *pdev; 27 + 28 + struct regulator *vdd; 29 + struct gpio_desc *resb_gpio; /* low = reset active min 20 us */ 30 + struct gpio_desc *ini_gpio; /* high = power on */ 31 + struct gpio_desc *mo_gpio; /* low = 480x640, high = 240x320 */ 32 + struct gpio_desc *lr_gpio; /* high = conventional horizontal scanning */ 33 + struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */ 34 + }; 35 + 36 + #define to_ls037v7dw01_device(p) \ 37 + container_of(p, struct ls037v7dw01_panel, panel) 38 + 39 + static int ls037v7dw01_disable(struct drm_panel *panel) 40 + { 41 + struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel); 42 + 43 + gpiod_set_value_cansleep(lcd->ini_gpio, 0); 44 + gpiod_set_value_cansleep(lcd->resb_gpio, 0); 45 + 46 + /* Wait at least 5 vsyncs after disabling the LCD. */ 47 + msleep(100); 48 + 49 + return 0; 50 + } 51 + 52 + static int ls037v7dw01_unprepare(struct drm_panel *panel) 53 + { 54 + struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel); 55 + 56 + regulator_disable(lcd->vdd); 57 + return 0; 58 + } 59 + 60 + static int ls037v7dw01_prepare(struct drm_panel *panel) 61 + { 62 + struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel); 63 + int ret; 64 + 65 + ret = regulator_enable(lcd->vdd); 66 + if (ret < 0) 67 + dev_err(&lcd->pdev->dev, "%s: failed to enable regulator\n", 68 + __func__); 69 + 70 + return ret; 71 + } 72 + 73 + static int ls037v7dw01_enable(struct drm_panel *panel) 74 + { 75 + struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel); 76 + 77 + /* Wait couple of vsyncs before enabling the LCD. */ 78 + msleep(50); 79 + 80 + gpiod_set_value_cansleep(lcd->resb_gpio, 1); 81 + gpiod_set_value_cansleep(lcd->ini_gpio, 1); 82 + 83 + return 0; 84 + } 85 + 86 + static const struct drm_display_mode ls037v7dw01_mode = { 87 + .clock = 19200, 88 + .hdisplay = 480, 89 + .hsync_start = 480 + 1, 90 + .hsync_end = 480 + 1 + 2, 91 + .htotal = 480 + 1 + 2 + 28, 92 + .vdisplay = 640, 93 + .vsync_start = 640 + 1, 94 + .vsync_end = 640 + 1 + 1, 95 + .vtotal = 640 + 1 + 1 + 1, 96 + .vrefresh = 58, 97 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 98 + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 99 + .width_mm = 56, 100 + .height_mm = 75, 101 + }; 102 + 103 + static int ls037v7dw01_get_modes(struct drm_panel *panel) 104 + { 105 + struct drm_connector *connector = panel->connector; 106 + struct drm_display_mode *mode; 107 + 108 + mode = drm_mode_duplicate(panel->drm, &ls037v7dw01_mode); 109 + if (!mode) 110 + return -ENOMEM; 111 + 112 + drm_mode_set_name(mode); 113 + drm_mode_probed_add(connector, mode); 114 + 115 + connector->display_info.width_mm = ls037v7dw01_mode.width_mm; 116 + connector->display_info.height_mm = ls037v7dw01_mode.height_mm; 117 + /* 118 + * FIXME: According to the datasheet pixel data is sampled on the 119 + * rising edge of the clock, but the code running on the SDP3430 120 + * indicates sampling on the negative edge. This should be tested on a 121 + * real device. 122 + */ 123 + connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH 124 + | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE 125 + | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE; 126 + 127 + return 1; 128 + } 129 + 130 + static const struct drm_panel_funcs ls037v7dw01_funcs = { 131 + .disable = ls037v7dw01_disable, 132 + .unprepare = ls037v7dw01_unprepare, 133 + .prepare = ls037v7dw01_prepare, 134 + .enable = ls037v7dw01_enable, 135 + .get_modes = ls037v7dw01_get_modes, 136 + }; 137 + 138 + static int ls037v7dw01_probe(struct platform_device *pdev) 139 + { 140 + struct ls037v7dw01_panel *lcd; 141 + 142 + lcd = devm_kzalloc(&pdev->dev, sizeof(*lcd), GFP_KERNEL); 143 + if (!lcd) 144 + return -ENOMEM; 145 + 146 + platform_set_drvdata(pdev, lcd); 147 + lcd->pdev = pdev; 148 + 149 + lcd->vdd = devm_regulator_get(&pdev->dev, "envdd"); 150 + if (IS_ERR(lcd->vdd)) { 151 + dev_err(&pdev->dev, "failed to get regulator\n"); 152 + return PTR_ERR(lcd->vdd); 153 + } 154 + 155 + lcd->ini_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW); 156 + if (IS_ERR(lcd->ini_gpio)) { 157 + dev_err(&pdev->dev, "failed to get enable gpio\n"); 158 + return PTR_ERR(lcd->ini_gpio); 159 + } 160 + 161 + lcd->resb_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW); 162 + if (IS_ERR(lcd->resb_gpio)) { 163 + dev_err(&pdev->dev, "failed to get reset gpio\n"); 164 + return PTR_ERR(lcd->resb_gpio); 165 + } 166 + 167 + lcd->mo_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 0, 168 + GPIOD_OUT_LOW); 169 + if (IS_ERR(lcd->mo_gpio)) { 170 + dev_err(&pdev->dev, "failed to get mode[0] gpio\n"); 171 + return PTR_ERR(lcd->mo_gpio); 172 + } 173 + 174 + lcd->lr_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 1, 175 + GPIOD_OUT_LOW); 176 + if (IS_ERR(lcd->lr_gpio)) { 177 + dev_err(&pdev->dev, "failed to get mode[1] gpio\n"); 178 + return PTR_ERR(lcd->lr_gpio); 179 + } 180 + 181 + lcd->ud_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 2, 182 + GPIOD_OUT_LOW); 183 + if (IS_ERR(lcd->ud_gpio)) { 184 + dev_err(&pdev->dev, "failed to get mode[2] gpio\n"); 185 + return PTR_ERR(lcd->ud_gpio); 186 + } 187 + 188 + drm_panel_init(&lcd->panel); 189 + lcd->panel.dev = &pdev->dev; 190 + lcd->panel.funcs = &ls037v7dw01_funcs; 191 + 192 + return drm_panel_add(&lcd->panel); 193 + } 194 + 195 + static int ls037v7dw01_remove(struct platform_device *pdev) 196 + { 197 + struct ls037v7dw01_panel *lcd = platform_get_drvdata(pdev); 198 + 199 + drm_panel_remove(&lcd->panel); 200 + drm_panel_disable(&lcd->panel); 201 + drm_panel_unprepare(&lcd->panel); 202 + 203 + return 0; 204 + } 205 + 206 + static const struct of_device_id ls037v7dw01_of_match[] = { 207 + { .compatible = "sharp,ls037v7dw01", }, 208 + { /* sentinel */ }, 209 + }; 210 + 211 + MODULE_DEVICE_TABLE(of, ls037v7dw01_of_match); 212 + 213 + static struct platform_driver ls037v7dw01_driver = { 214 + .probe = ls037v7dw01_probe, 215 + .remove = ls037v7dw01_remove, 216 + .driver = { 217 + .name = "panel-sharp-ls037v7dw01", 218 + .of_match_table = ls037v7dw01_of_match, 219 + }, 220 + }; 221 + 222 + module_platform_driver(ls037v7dw01_driver); 223 + 224 + MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); 225 + MODULE_DESCRIPTION("Sharp LS037V7DW01 Panel Driver"); 226 + MODULE_LICENSE("GPL");
+64
drivers/gpu/drm/panel/panel-simple.c
··· 2833 2833 .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 2834 2834 }; 2835 2835 2836 + static const struct drm_display_mode ti_nspire_cx_lcd_mode[] = { 2837 + { 2838 + .clock = 10000, 2839 + .hdisplay = 320, 2840 + .hsync_start = 320 + 50, 2841 + .hsync_end = 320 + 50 + 6, 2842 + .htotal = 320 + 50 + 6 + 38, 2843 + .vdisplay = 240, 2844 + .vsync_start = 240 + 3, 2845 + .vsync_end = 240 + 3 + 1, 2846 + .vtotal = 240 + 3 + 1 + 17, 2847 + .vrefresh = 60, 2848 + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, 2849 + }, 2850 + }; 2851 + 2852 + static const struct panel_desc ti_nspire_cx_lcd_panel = { 2853 + .modes = ti_nspire_cx_lcd_mode, 2854 + .num_modes = 1, 2855 + .bpc = 8, 2856 + .size = { 2857 + .width = 65, 2858 + .height = 49, 2859 + }, 2860 + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, 2861 + .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE, 2862 + }; 2863 + 2864 + static const struct drm_display_mode ti_nspire_classic_lcd_mode[] = { 2865 + { 2866 + .clock = 10000, 2867 + .hdisplay = 320, 2868 + .hsync_start = 320 + 6, 2869 + .hsync_end = 320 + 6 + 6, 2870 + .htotal = 320 + 6 + 6 + 6, 2871 + .vdisplay = 240, 2872 + .vsync_start = 240 + 0, 2873 + .vsync_end = 240 + 0 + 1, 2874 + .vtotal = 240 + 0 + 1 + 0, 2875 + .vrefresh = 60, 2876 + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 2877 + }, 2878 + }; 2879 + 2880 + static const struct panel_desc ti_nspire_classic_lcd_panel = { 2881 + .modes = ti_nspire_classic_lcd_mode, 2882 + .num_modes = 1, 2883 + /* The grayscale panel has 8 bit for the color .. Y (black) */ 2884 + .bpc = 8, 2885 + .size = { 2886 + .width = 71, 2887 + .height = 53, 2888 + }, 2889 + /* This is the grayscale bus format */ 2890 + .bus_format = MEDIA_BUS_FMT_Y8_1X8, 2891 + .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, 2892 + }; 2893 + 2836 2894 static const struct drm_display_mode toshiba_lt089ac29000_mode = { 2837 2895 .clock = 79500, 2838 2896 .hdisplay = 1280, ··· 3360 3302 }, { 3361 3303 .compatible = "tianma,tm070rvhg71", 3362 3304 .data = &tianma_tm070rvhg71, 3305 + }, { 3306 + .compatible = "ti,nspire-cx-lcd-panel", 3307 + .data = &ti_nspire_cx_lcd_panel, 3308 + }, { 3309 + .compatible = "ti,nspire-classic-lcd-panel", 3310 + .data = &ti_nspire_classic_lcd_panel, 3363 3311 }, { 3364 3312 .compatible = "toshiba,lt089ac29000", 3365 3313 .data = &toshiba_lt089ac29000,
+701
drivers/gpu/drm/panel/panel-sony-acx565akm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Sony ACX565AKM LCD Panel driver 4 + * 5 + * Copyright (C) 2019 Texas Instruments Incorporated 6 + * 7 + * Based on the omapdrm-specific panel-sony-acx565akm driver 8 + * 9 + * Copyright (C) 2010 Nokia Corporation 10 + * Author: Imre Deak <imre.deak@nokia.com> 11 + */ 12 + 13 + /* 14 + * TODO (to be addressed with hardware access to test the changes): 15 + * 16 + * - Update backlight support to use backlight_update_status() etc. 17 + * - Use prepare/unprepare for the basic power on/off of the backligt 18 + */ 19 + 20 + #include <linux/backlight.h> 21 + #include <linux/delay.h> 22 + #include <linux/gpio/consumer.h> 23 + #include <linux/jiffies.h> 24 + #include <linux/module.h> 25 + #include <linux/mutex.h> 26 + #include <linux/sched.h> 27 + #include <linux/spi/spi.h> 28 + #include <video/mipi_display.h> 29 + 30 + #include <drm/drm_connector.h> 31 + #include <drm/drm_modes.h> 32 + #include <drm/drm_panel.h> 33 + 34 + #define CTRL_DISP_BRIGHTNESS_CTRL_ON BIT(5) 35 + #define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON BIT(4) 36 + #define CTRL_DISP_BACKLIGHT_ON BIT(2) 37 + #define CTRL_DISP_AUTO_BRIGHTNESS_ON BIT(1) 38 + 39 + #define MIPID_CMD_WRITE_CABC 0x55 40 + #define MIPID_CMD_READ_CABC 0x56 41 + 42 + #define MIPID_VER_LPH8923 3 43 + #define MIPID_VER_LS041Y3 4 44 + #define MIPID_VER_L4F00311 8 45 + #define MIPID_VER_ACX565AKM 9 46 + 47 + struct acx565akm_panel { 48 + struct drm_panel panel; 49 + 50 + struct spi_device *spi; 51 + struct gpio_desc *reset_gpio; 52 + struct backlight_device *backlight; 53 + 54 + struct mutex mutex; 55 + 56 + const char *name; 57 + u8 display_id[3]; 58 + int model; 59 + int revision; 60 + bool has_bc; 61 + bool has_cabc; 62 + 63 + bool enabled; 64 + unsigned int cabc_mode; 65 + /* 66 + * Next value of jiffies when we can issue the next sleep in/out 67 + * command. 68 + */ 69 + unsigned long hw_guard_end; 70 + unsigned long hw_guard_wait; /* max guard time in jiffies */ 71 + }; 72 + 73 + #define to_acx565akm_device(p) container_of(p, struct acx565akm_panel, panel) 74 + 75 + static void acx565akm_transfer(struct acx565akm_panel *lcd, int cmd, 76 + const u8 *wbuf, int wlen, u8 *rbuf, int rlen) 77 + { 78 + struct spi_message m; 79 + struct spi_transfer *x, xfer[5]; 80 + int ret; 81 + 82 + spi_message_init(&m); 83 + 84 + memset(xfer, 0, sizeof(xfer)); 85 + x = &xfer[0]; 86 + 87 + cmd &= 0xff; 88 + x->tx_buf = &cmd; 89 + x->bits_per_word = 9; 90 + x->len = 2; 91 + 92 + if (rlen > 1 && wlen == 0) { 93 + /* 94 + * Between the command and the response data there is a 95 + * dummy clock cycle. Add an extra bit after the command 96 + * word to account for this. 97 + */ 98 + x->bits_per_word = 10; 99 + cmd <<= 1; 100 + } 101 + spi_message_add_tail(x, &m); 102 + 103 + if (wlen) { 104 + x++; 105 + x->tx_buf = wbuf; 106 + x->len = wlen; 107 + x->bits_per_word = 9; 108 + spi_message_add_tail(x, &m); 109 + } 110 + 111 + if (rlen) { 112 + x++; 113 + x->rx_buf = rbuf; 114 + x->len = rlen; 115 + spi_message_add_tail(x, &m); 116 + } 117 + 118 + ret = spi_sync(lcd->spi, &m); 119 + if (ret < 0) 120 + dev_dbg(&lcd->spi->dev, "spi_sync %d\n", ret); 121 + } 122 + 123 + static inline void acx565akm_cmd(struct acx565akm_panel *lcd, int cmd) 124 + { 125 + acx565akm_transfer(lcd, cmd, NULL, 0, NULL, 0); 126 + } 127 + 128 + static inline void acx565akm_write(struct acx565akm_panel *lcd, 129 + int reg, const u8 *buf, int len) 130 + { 131 + acx565akm_transfer(lcd, reg, buf, len, NULL, 0); 132 + } 133 + 134 + static inline void acx565akm_read(struct acx565akm_panel *lcd, 135 + int reg, u8 *buf, int len) 136 + { 137 + acx565akm_transfer(lcd, reg, NULL, 0, buf, len); 138 + } 139 + 140 + /* ----------------------------------------------------------------------------- 141 + * Auto Brightness Control Via sysfs 142 + */ 143 + 144 + static unsigned int acx565akm_get_cabc_mode(struct acx565akm_panel *lcd) 145 + { 146 + return lcd->cabc_mode; 147 + } 148 + 149 + static void acx565akm_set_cabc_mode(struct acx565akm_panel *lcd, 150 + unsigned int mode) 151 + { 152 + u16 cabc_ctrl; 153 + 154 + lcd->cabc_mode = mode; 155 + if (!lcd->enabled) 156 + return; 157 + cabc_ctrl = 0; 158 + acx565akm_read(lcd, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1); 159 + cabc_ctrl &= ~3; 160 + cabc_ctrl |= (1 << 8) | (mode & 3); 161 + acx565akm_write(lcd, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2); 162 + } 163 + 164 + static unsigned int acx565akm_get_hw_cabc_mode(struct acx565akm_panel *lcd) 165 + { 166 + u8 cabc_ctrl; 167 + 168 + acx565akm_read(lcd, MIPID_CMD_READ_CABC, &cabc_ctrl, 1); 169 + return cabc_ctrl & 3; 170 + } 171 + 172 + static const char * const acx565akm_cabc_modes[] = { 173 + "off", /* always used when CABC is not supported */ 174 + "ui", 175 + "still-image", 176 + "moving-image", 177 + }; 178 + 179 + static ssize_t cabc_mode_show(struct device *dev, 180 + struct device_attribute *attr, 181 + char *buf) 182 + { 183 + struct acx565akm_panel *lcd = dev_get_drvdata(dev); 184 + const char *mode_str; 185 + int mode; 186 + 187 + if (!lcd->has_cabc) 188 + mode = 0; 189 + else 190 + mode = acx565akm_get_cabc_mode(lcd); 191 + 192 + mode_str = "unknown"; 193 + if (mode >= 0 && mode < ARRAY_SIZE(acx565akm_cabc_modes)) 194 + mode_str = acx565akm_cabc_modes[mode]; 195 + 196 + return sprintf(buf, "%s\n", mode_str); 197 + } 198 + 199 + static ssize_t cabc_mode_store(struct device *dev, 200 + struct device_attribute *attr, 201 + const char *buf, size_t count) 202 + { 203 + struct acx565akm_panel *lcd = dev_get_drvdata(dev); 204 + unsigned int i; 205 + 206 + for (i = 0; i < ARRAY_SIZE(acx565akm_cabc_modes); i++) { 207 + const char *mode_str = acx565akm_cabc_modes[i]; 208 + int cmp_len = strlen(mode_str); 209 + 210 + if (count > 0 && buf[count - 1] == '\n') 211 + count--; 212 + if (count != cmp_len) 213 + continue; 214 + 215 + if (strncmp(buf, mode_str, cmp_len) == 0) 216 + break; 217 + } 218 + 219 + if (i == ARRAY_SIZE(acx565akm_cabc_modes)) 220 + return -EINVAL; 221 + 222 + if (!lcd->has_cabc && i != 0) 223 + return -EINVAL; 224 + 225 + mutex_lock(&lcd->mutex); 226 + acx565akm_set_cabc_mode(lcd, i); 227 + mutex_unlock(&lcd->mutex); 228 + 229 + return count; 230 + } 231 + 232 + static ssize_t cabc_available_modes_show(struct device *dev, 233 + struct device_attribute *attr, 234 + char *buf) 235 + { 236 + struct acx565akm_panel *lcd = dev_get_drvdata(dev); 237 + unsigned int i; 238 + size_t len = 0; 239 + 240 + if (!lcd->has_cabc) 241 + return sprintf(buf, "%s\n", acx565akm_cabc_modes[0]); 242 + 243 + for (i = 0; i < ARRAY_SIZE(acx565akm_cabc_modes); i++) 244 + len += sprintf(&buf[len], "%s%s", i ? " " : "", 245 + acx565akm_cabc_modes[i]); 246 + 247 + buf[len++] = '\n'; 248 + 249 + return len; 250 + } 251 + 252 + static DEVICE_ATTR_RW(cabc_mode); 253 + static DEVICE_ATTR_RO(cabc_available_modes); 254 + 255 + static struct attribute *acx565akm_cabc_attrs[] = { 256 + &dev_attr_cabc_mode.attr, 257 + &dev_attr_cabc_available_modes.attr, 258 + NULL, 259 + }; 260 + 261 + static const struct attribute_group acx565akm_cabc_attr_group = { 262 + .attrs = acx565akm_cabc_attrs, 263 + }; 264 + 265 + /* ----------------------------------------------------------------------------- 266 + * Backlight Device 267 + */ 268 + 269 + static int acx565akm_get_actual_brightness(struct acx565akm_panel *lcd) 270 + { 271 + u8 bv; 272 + 273 + acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, &bv, 1); 274 + 275 + return bv; 276 + } 277 + 278 + static void acx565akm_set_brightness(struct acx565akm_panel *lcd, int level) 279 + { 280 + u16 ctrl; 281 + int bv; 282 + 283 + bv = level | (1 << 8); 284 + acx565akm_write(lcd, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, (u8 *)&bv, 2); 285 + 286 + acx565akm_read(lcd, MIPI_DCS_GET_CONTROL_DISPLAY, (u8 *)&ctrl, 1); 287 + if (level) 288 + ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON | 289 + CTRL_DISP_BACKLIGHT_ON; 290 + else 291 + ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON | 292 + CTRL_DISP_BACKLIGHT_ON); 293 + 294 + ctrl |= 1 << 8; 295 + acx565akm_write(lcd, MIPI_DCS_WRITE_CONTROL_DISPLAY, (u8 *)&ctrl, 2); 296 + } 297 + 298 + static int acx565akm_bl_update_status_locked(struct backlight_device *dev) 299 + { 300 + struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev); 301 + int level; 302 + 303 + if (dev->props.fb_blank == FB_BLANK_UNBLANK && 304 + dev->props.power == FB_BLANK_UNBLANK) 305 + level = dev->props.brightness; 306 + else 307 + level = 0; 308 + 309 + acx565akm_set_brightness(lcd, level); 310 + 311 + return 0; 312 + } 313 + 314 + static int acx565akm_bl_update_status(struct backlight_device *dev) 315 + { 316 + struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev); 317 + int ret; 318 + 319 + mutex_lock(&lcd->mutex); 320 + ret = acx565akm_bl_update_status_locked(dev); 321 + mutex_unlock(&lcd->mutex); 322 + 323 + return ret; 324 + } 325 + 326 + static int acx565akm_bl_get_intensity(struct backlight_device *dev) 327 + { 328 + struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev); 329 + unsigned int intensity; 330 + 331 + mutex_lock(&lcd->mutex); 332 + 333 + if (dev->props.fb_blank == FB_BLANK_UNBLANK && 334 + dev->props.power == FB_BLANK_UNBLANK) 335 + intensity = acx565akm_get_actual_brightness(lcd); 336 + else 337 + intensity = 0; 338 + 339 + mutex_unlock(&lcd->mutex); 340 + 341 + return intensity; 342 + } 343 + 344 + static const struct backlight_ops acx565akm_bl_ops = { 345 + .get_brightness = acx565akm_bl_get_intensity, 346 + .update_status = acx565akm_bl_update_status, 347 + }; 348 + 349 + static int acx565akm_backlight_init(struct acx565akm_panel *lcd) 350 + { 351 + struct backlight_properties props = { 352 + .fb_blank = FB_BLANK_UNBLANK, 353 + .power = FB_BLANK_UNBLANK, 354 + .type = BACKLIGHT_RAW, 355 + }; 356 + int ret; 357 + 358 + lcd->backlight = backlight_device_register(lcd->name, &lcd->spi->dev, 359 + lcd, &acx565akm_bl_ops, 360 + &props); 361 + if (IS_ERR(lcd->backlight)) { 362 + ret = PTR_ERR(lcd->backlight); 363 + lcd->backlight = NULL; 364 + return ret; 365 + } 366 + 367 + if (lcd->has_cabc) { 368 + ret = sysfs_create_group(&lcd->backlight->dev.kobj, 369 + &acx565akm_cabc_attr_group); 370 + if (ret < 0) { 371 + dev_err(&lcd->spi->dev, 372 + "%s failed to create sysfs files\n", __func__); 373 + backlight_device_unregister(lcd->backlight); 374 + return ret; 375 + } 376 + 377 + lcd->cabc_mode = acx565akm_get_hw_cabc_mode(lcd); 378 + } 379 + 380 + lcd->backlight->props.max_brightness = 255; 381 + lcd->backlight->props.brightness = acx565akm_get_actual_brightness(lcd); 382 + 383 + acx565akm_bl_update_status_locked(lcd->backlight); 384 + 385 + return 0; 386 + } 387 + 388 + static void acx565akm_backlight_cleanup(struct acx565akm_panel *lcd) 389 + { 390 + if (lcd->has_cabc) 391 + sysfs_remove_group(&lcd->backlight->dev.kobj, 392 + &acx565akm_cabc_attr_group); 393 + 394 + backlight_device_unregister(lcd->backlight); 395 + } 396 + 397 + /* ----------------------------------------------------------------------------- 398 + * DRM Bridge Operations 399 + */ 400 + 401 + static void acx565akm_set_sleep_mode(struct acx565akm_panel *lcd, int on) 402 + { 403 + int cmd = on ? MIPI_DCS_ENTER_SLEEP_MODE : MIPI_DCS_EXIT_SLEEP_MODE; 404 + unsigned long wait; 405 + 406 + /* 407 + * We have to keep 120msec between sleep in/out commands. 408 + * (8.2.15, 8.2.16). 409 + */ 410 + wait = lcd->hw_guard_end - jiffies; 411 + if ((long)wait > 0 && wait <= lcd->hw_guard_wait) { 412 + set_current_state(TASK_UNINTERRUPTIBLE); 413 + schedule_timeout(wait); 414 + } 415 + 416 + acx565akm_cmd(lcd, cmd); 417 + 418 + lcd->hw_guard_wait = msecs_to_jiffies(120); 419 + lcd->hw_guard_end = jiffies + lcd->hw_guard_wait; 420 + } 421 + 422 + static void acx565akm_set_display_state(struct acx565akm_panel *lcd, 423 + int enabled) 424 + { 425 + int cmd = enabled ? MIPI_DCS_SET_DISPLAY_ON : MIPI_DCS_SET_DISPLAY_OFF; 426 + 427 + acx565akm_cmd(lcd, cmd); 428 + } 429 + 430 + static int acx565akm_power_on(struct acx565akm_panel *lcd) 431 + { 432 + /*FIXME tweak me */ 433 + msleep(50); 434 + 435 + gpiod_set_value(lcd->reset_gpio, 1); 436 + 437 + if (lcd->enabled) { 438 + dev_dbg(&lcd->spi->dev, "panel already enabled\n"); 439 + return 0; 440 + } 441 + 442 + /* 443 + * We have to meet all the following delay requirements: 444 + * 1. tRW: reset pulse width 10usec (7.12.1) 445 + * 2. tRT: reset cancel time 5msec (7.12.1) 446 + * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst 447 + * case (7.6.2) 448 + * 4. 120msec before the sleep out command (7.12.1) 449 + */ 450 + msleep(120); 451 + 452 + acx565akm_set_sleep_mode(lcd, 0); 453 + lcd->enabled = true; 454 + 455 + /* 5msec between sleep out and the next command. (8.2.16) */ 456 + usleep_range(5000, 10000); 457 + acx565akm_set_display_state(lcd, 1); 458 + acx565akm_set_cabc_mode(lcd, lcd->cabc_mode); 459 + 460 + return acx565akm_bl_update_status_locked(lcd->backlight); 461 + } 462 + 463 + static void acx565akm_power_off(struct acx565akm_panel *lcd) 464 + { 465 + if (!lcd->enabled) 466 + return; 467 + 468 + acx565akm_set_display_state(lcd, 0); 469 + acx565akm_set_sleep_mode(lcd, 1); 470 + lcd->enabled = false; 471 + /* 472 + * We have to provide PCLK,HS,VS signals for 2 frames (worst case 473 + * ~50msec) after sending the sleep in command and asserting the 474 + * reset signal. We probably could assert the reset w/o the delay 475 + * but we still delay to avoid possible artifacts. (7.6.1) 476 + */ 477 + msleep(50); 478 + 479 + gpiod_set_value(lcd->reset_gpio, 0); 480 + 481 + /* FIXME need to tweak this delay */ 482 + msleep(100); 483 + } 484 + 485 + static int acx565akm_disable(struct drm_panel *panel) 486 + { 487 + struct acx565akm_panel *lcd = to_acx565akm_device(panel); 488 + 489 + mutex_lock(&lcd->mutex); 490 + acx565akm_power_off(lcd); 491 + mutex_unlock(&lcd->mutex); 492 + 493 + return 0; 494 + } 495 + 496 + static int acx565akm_enable(struct drm_panel *panel) 497 + { 498 + struct acx565akm_panel *lcd = to_acx565akm_device(panel); 499 + 500 + mutex_lock(&lcd->mutex); 501 + acx565akm_power_on(lcd); 502 + mutex_unlock(&lcd->mutex); 503 + 504 + return 0; 505 + } 506 + 507 + static const struct drm_display_mode acx565akm_mode = { 508 + .clock = 24000, 509 + .hdisplay = 800, 510 + .hsync_start = 800 + 28, 511 + .hsync_end = 800 + 28 + 4, 512 + .htotal = 800 + 28 + 4 + 24, 513 + .vdisplay = 480, 514 + .vsync_start = 480 + 3, 515 + .vsync_end = 480 + 3 + 3, 516 + .vtotal = 480 + 3 + 3 + 4, 517 + .vrefresh = 57, 518 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 519 + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 520 + .width_mm = 77, 521 + .height_mm = 46, 522 + }; 523 + 524 + static int acx565akm_get_modes(struct drm_panel *panel) 525 + { 526 + struct drm_connector *connector = panel->connector; 527 + struct drm_display_mode *mode; 528 + 529 + mode = drm_mode_duplicate(panel->drm, &acx565akm_mode); 530 + if (!mode) 531 + return -ENOMEM; 532 + 533 + drm_mode_set_name(mode); 534 + drm_mode_probed_add(connector, mode); 535 + 536 + connector->display_info.width_mm = acx565akm_mode.width_mm; 537 + connector->display_info.height_mm = acx565akm_mode.height_mm; 538 + connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH 539 + | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE 540 + | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE; 541 + 542 + return 1; 543 + } 544 + 545 + static const struct drm_panel_funcs acx565akm_funcs = { 546 + .disable = acx565akm_disable, 547 + .enable = acx565akm_enable, 548 + .get_modes = acx565akm_get_modes, 549 + }; 550 + 551 + /* ----------------------------------------------------------------------------- 552 + * Probe, Detect and Remove 553 + */ 554 + 555 + static int acx565akm_detect(struct acx565akm_panel *lcd) 556 + { 557 + __be32 value; 558 + u32 status; 559 + int ret = 0; 560 + 561 + /* 562 + * After being taken out of reset the panel needs 5ms before the first 563 + * command can be sent. 564 + */ 565 + gpiod_set_value(lcd->reset_gpio, 1); 566 + usleep_range(5000, 10000); 567 + 568 + acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_STATUS, (u8 *)&value, 4); 569 + status = __be32_to_cpu(value); 570 + lcd->enabled = (status & (1 << 17)) && (status & (1 << 10)); 571 + 572 + dev_dbg(&lcd->spi->dev, 573 + "LCD panel %s by bootloader (status 0x%04x)\n", 574 + lcd->enabled ? "enabled" : "disabled ", status); 575 + 576 + acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_ID, lcd->display_id, 3); 577 + dev_dbg(&lcd->spi->dev, "MIPI display ID: %02x%02x%02x\n", 578 + lcd->display_id[0], lcd->display_id[1], lcd->display_id[2]); 579 + 580 + switch (lcd->display_id[0]) { 581 + case 0x10: 582 + lcd->model = MIPID_VER_ACX565AKM; 583 + lcd->name = "acx565akm"; 584 + lcd->has_bc = 1; 585 + lcd->has_cabc = 1; 586 + break; 587 + case 0x29: 588 + lcd->model = MIPID_VER_L4F00311; 589 + lcd->name = "l4f00311"; 590 + break; 591 + case 0x45: 592 + lcd->model = MIPID_VER_LPH8923; 593 + lcd->name = "lph8923"; 594 + break; 595 + case 0x83: 596 + lcd->model = MIPID_VER_LS041Y3; 597 + lcd->name = "ls041y3"; 598 + break; 599 + default: 600 + lcd->name = "unknown"; 601 + dev_err(&lcd->spi->dev, "unknown display ID\n"); 602 + ret = -ENODEV; 603 + goto done; 604 + } 605 + 606 + lcd->revision = lcd->display_id[1]; 607 + 608 + dev_info(&lcd->spi->dev, "%s rev %02x panel detected\n", 609 + lcd->name, lcd->revision); 610 + 611 + done: 612 + if (!lcd->enabled) 613 + gpiod_set_value(lcd->reset_gpio, 0); 614 + 615 + return ret; 616 + } 617 + 618 + static int acx565akm_probe(struct spi_device *spi) 619 + { 620 + struct acx565akm_panel *lcd; 621 + int ret; 622 + 623 + lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL); 624 + if (!lcd) 625 + return -ENOMEM; 626 + 627 + spi_set_drvdata(spi, lcd); 628 + spi->mode = SPI_MODE_3; 629 + 630 + lcd->spi = spi; 631 + mutex_init(&lcd->mutex); 632 + 633 + lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW); 634 + if (IS_ERR(lcd->reset_gpio)) { 635 + dev_err(&spi->dev, "failed to get reset GPIO\n"); 636 + return PTR_ERR(lcd->reset_gpio); 637 + } 638 + 639 + ret = acx565akm_detect(lcd); 640 + if (ret < 0) { 641 + dev_err(&spi->dev, "panel detection failed\n"); 642 + return ret; 643 + } 644 + 645 + if (lcd->has_bc) { 646 + ret = acx565akm_backlight_init(lcd); 647 + if (ret < 0) 648 + return ret; 649 + } 650 + 651 + drm_panel_init(&lcd->panel); 652 + lcd->panel.dev = &lcd->spi->dev; 653 + lcd->panel.funcs = &acx565akm_funcs; 654 + 655 + ret = drm_panel_add(&lcd->panel); 656 + if (ret < 0) { 657 + if (lcd->has_bc) 658 + acx565akm_backlight_cleanup(lcd); 659 + return ret; 660 + } 661 + 662 + return 0; 663 + } 664 + 665 + static int acx565akm_remove(struct spi_device *spi) 666 + { 667 + struct acx565akm_panel *lcd = spi_get_drvdata(spi); 668 + 669 + drm_panel_remove(&lcd->panel); 670 + 671 + if (lcd->has_bc) 672 + acx565akm_backlight_cleanup(lcd); 673 + 674 + drm_panel_disable(&lcd->panel); 675 + drm_panel_unprepare(&lcd->panel); 676 + 677 + return 0; 678 + } 679 + 680 + static const struct of_device_id acx565akm_of_match[] = { 681 + { .compatible = "sony,acx565akm", }, 682 + { /* sentinel */ }, 683 + }; 684 + 685 + MODULE_DEVICE_TABLE(of, acx565akm_of_match); 686 + 687 + static struct spi_driver acx565akm_driver = { 688 + .probe = acx565akm_probe, 689 + .remove = acx565akm_remove, 690 + .driver = { 691 + .name = "panel-sony-acx565akm", 692 + .of_match_table = acx565akm_of_match, 693 + }, 694 + }; 695 + 696 + module_spi_driver(acx565akm_driver); 697 + 698 + MODULE_ALIAS("spi:sony,acx565akm"); 699 + MODULE_AUTHOR("Nokia Corporation"); 700 + MODULE_DESCRIPTION("Sony ACX565AKM LCD Panel Driver"); 701 + MODULE_LICENSE("GPL");
+399
drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Toppoly TD028TTEC1 Panel Driver 4 + * 5 + * Copyright (C) 2019 Texas Instruments Incorporated 6 + * 7 + * Based on the omapdrm-specific panel-tpo-td028ttec1 driver 8 + * 9 + * Copyright (C) 2008 Nokia Corporation 10 + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> 11 + * 12 + * Neo 1973 code (jbt6k74.c): 13 + * Copyright (C) 2006-2007 OpenMoko, Inc. 14 + * Author: Harald Welte <laforge@openmoko.org> 15 + * 16 + * Ported and adapted from Neo 1973 U-Boot by: 17 + * H. Nikolaus Schaller <hns@goldelico.com> 18 + */ 19 + 20 + #include <linux/backlight.h> 21 + #include <linux/delay.h> 22 + #include <linux/module.h> 23 + #include <linux/spi/spi.h> 24 + 25 + #include <drm/drm_connector.h> 26 + #include <drm/drm_modes.h> 27 + #include <drm/drm_panel.h> 28 + 29 + #define JBT_COMMAND 0x000 30 + #define JBT_DATA 0x100 31 + 32 + #define JBT_REG_SLEEP_IN 0x10 33 + #define JBT_REG_SLEEP_OUT 0x11 34 + 35 + #define JBT_REG_DISPLAY_OFF 0x28 36 + #define JBT_REG_DISPLAY_ON 0x29 37 + 38 + #define JBT_REG_RGB_FORMAT 0x3a 39 + #define JBT_REG_QUAD_RATE 0x3b 40 + 41 + #define JBT_REG_POWER_ON_OFF 0xb0 42 + #define JBT_REG_BOOSTER_OP 0xb1 43 + #define JBT_REG_BOOSTER_MODE 0xb2 44 + #define JBT_REG_BOOSTER_FREQ 0xb3 45 + #define JBT_REG_OPAMP_SYSCLK 0xb4 46 + #define JBT_REG_VSC_VOLTAGE 0xb5 47 + #define JBT_REG_VCOM_VOLTAGE 0xb6 48 + #define JBT_REG_EXT_DISPL 0xb7 49 + #define JBT_REG_OUTPUT_CONTROL 0xb8 50 + #define JBT_REG_DCCLK_DCEV 0xb9 51 + #define JBT_REG_DISPLAY_MODE1 0xba 52 + #define JBT_REG_DISPLAY_MODE2 0xbb 53 + #define JBT_REG_DISPLAY_MODE 0xbc 54 + #define JBT_REG_ASW_SLEW 0xbd 55 + #define JBT_REG_DUMMY_DISPLAY 0xbe 56 + #define JBT_REG_DRIVE_SYSTEM 0xbf 57 + 58 + #define JBT_REG_SLEEP_OUT_FR_A 0xc0 59 + #define JBT_REG_SLEEP_OUT_FR_B 0xc1 60 + #define JBT_REG_SLEEP_OUT_FR_C 0xc2 61 + #define JBT_REG_SLEEP_IN_LCCNT_D 0xc3 62 + #define JBT_REG_SLEEP_IN_LCCNT_E 0xc4 63 + #define JBT_REG_SLEEP_IN_LCCNT_F 0xc5 64 + #define JBT_REG_SLEEP_IN_LCCNT_G 0xc6 65 + 66 + #define JBT_REG_GAMMA1_FINE_1 0xc7 67 + #define JBT_REG_GAMMA1_FINE_2 0xc8 68 + #define JBT_REG_GAMMA1_INCLINATION 0xc9 69 + #define JBT_REG_GAMMA1_BLUE_OFFSET 0xca 70 + 71 + #define JBT_REG_BLANK_CONTROL 0xcf 72 + #define JBT_REG_BLANK_TH_TV 0xd0 73 + #define JBT_REG_CKV_ON_OFF 0xd1 74 + #define JBT_REG_CKV_1_2 0xd2 75 + #define JBT_REG_OEV_TIMING 0xd3 76 + #define JBT_REG_ASW_TIMING_1 0xd4 77 + #define JBT_REG_ASW_TIMING_2 0xd5 78 + 79 + #define JBT_REG_HCLOCK_VGA 0xec 80 + #define JBT_REG_HCLOCK_QVGA 0xed 81 + 82 + struct td028ttec1_panel { 83 + struct drm_panel panel; 84 + 85 + struct spi_device *spi; 86 + struct backlight_device *backlight; 87 + }; 88 + 89 + #define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel) 90 + 91 + static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err) 92 + { 93 + struct spi_device *spi = lcd->spi; 94 + u16 tx_buf = JBT_COMMAND | reg; 95 + int ret; 96 + 97 + if (err && *err) 98 + return *err; 99 + 100 + ret = spi_write(spi, (u8 *)&tx_buf, sizeof(tx_buf)); 101 + if (ret < 0) { 102 + dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret); 103 + if (err) 104 + *err = ret; 105 + } 106 + 107 + return ret; 108 + } 109 + 110 + static int jbt_reg_write_1(struct td028ttec1_panel *lcd, 111 + u8 reg, u8 data, int *err) 112 + { 113 + struct spi_device *spi = lcd->spi; 114 + u16 tx_buf[2]; 115 + int ret; 116 + 117 + if (err && *err) 118 + return *err; 119 + 120 + tx_buf[0] = JBT_COMMAND | reg; 121 + tx_buf[1] = JBT_DATA | data; 122 + 123 + ret = spi_write(spi, (u8 *)tx_buf, sizeof(tx_buf)); 124 + if (ret < 0) { 125 + dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret); 126 + if (err) 127 + *err = ret; 128 + } 129 + 130 + return ret; 131 + } 132 + 133 + static int jbt_reg_write_2(struct td028ttec1_panel *lcd, 134 + u8 reg, u16 data, int *err) 135 + { 136 + struct spi_device *spi = lcd->spi; 137 + u16 tx_buf[3]; 138 + int ret; 139 + 140 + if (err && *err) 141 + return *err; 142 + 143 + tx_buf[0] = JBT_COMMAND | reg; 144 + tx_buf[1] = JBT_DATA | (data >> 8); 145 + tx_buf[2] = JBT_DATA | (data & 0xff); 146 + 147 + ret = spi_write(spi, (u8 *)tx_buf, sizeof(tx_buf)); 148 + if (ret < 0) { 149 + dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret); 150 + if (err) 151 + *err = ret; 152 + } 153 + 154 + return ret; 155 + } 156 + 157 + static int td028ttec1_prepare(struct drm_panel *panel) 158 + { 159 + struct td028ttec1_panel *lcd = to_td028ttec1_device(panel); 160 + unsigned int i; 161 + int ret = 0; 162 + 163 + /* Three times command zero */ 164 + for (i = 0; i < 3; ++i) { 165 + jbt_ret_write_0(lcd, 0x00, &ret); 166 + usleep_range(1000, 2000); 167 + } 168 + 169 + /* deep standby out */ 170 + jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x17, &ret); 171 + 172 + /* RGB I/F on, RAM write off, QVGA through, SIGCON enable */ 173 + jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE, 0x80, &ret); 174 + 175 + /* Quad mode off */ 176 + jbt_reg_write_1(lcd, JBT_REG_QUAD_RATE, 0x00, &ret); 177 + 178 + /* AVDD on, XVDD on */ 179 + jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x16, &ret); 180 + 181 + /* Output control */ 182 + jbt_reg_write_2(lcd, JBT_REG_OUTPUT_CONTROL, 0xfff9, &ret); 183 + 184 + /* Sleep mode off */ 185 + jbt_ret_write_0(lcd, JBT_REG_SLEEP_OUT, &ret); 186 + 187 + /* at this point we have like 50% grey */ 188 + 189 + /* initialize register set */ 190 + jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE1, 0x01, &ret); 191 + jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE2, 0x00, &ret); 192 + jbt_reg_write_1(lcd, JBT_REG_RGB_FORMAT, 0x60, &ret); 193 + jbt_reg_write_1(lcd, JBT_REG_DRIVE_SYSTEM, 0x10, &ret); 194 + jbt_reg_write_1(lcd, JBT_REG_BOOSTER_OP, 0x56, &ret); 195 + jbt_reg_write_1(lcd, JBT_REG_BOOSTER_MODE, 0x33, &ret); 196 + jbt_reg_write_1(lcd, JBT_REG_BOOSTER_FREQ, 0x11, &ret); 197 + jbt_reg_write_1(lcd, JBT_REG_BOOSTER_FREQ, 0x11, &ret); 198 + jbt_reg_write_1(lcd, JBT_REG_OPAMP_SYSCLK, 0x02, &ret); 199 + jbt_reg_write_1(lcd, JBT_REG_VSC_VOLTAGE, 0x2b, &ret); 200 + jbt_reg_write_1(lcd, JBT_REG_VCOM_VOLTAGE, 0x40, &ret); 201 + jbt_reg_write_1(lcd, JBT_REG_EXT_DISPL, 0x03, &ret); 202 + jbt_reg_write_1(lcd, JBT_REG_DCCLK_DCEV, 0x04, &ret); 203 + /* 204 + * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement 205 + * to avoid red / blue flicker 206 + */ 207 + jbt_reg_write_1(lcd, JBT_REG_ASW_SLEW, 0x04, &ret); 208 + jbt_reg_write_1(lcd, JBT_REG_DUMMY_DISPLAY, 0x00, &ret); 209 + 210 + jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_A, 0x11, &ret); 211 + jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_B, 0x11, &ret); 212 + jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_C, 0x11, &ret); 213 + jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040, &ret); 214 + jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0, &ret); 215 + jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020, &ret); 216 + jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0, &ret); 217 + 218 + jbt_reg_write_2(lcd, JBT_REG_GAMMA1_FINE_1, 0x5533, &ret); 219 + jbt_reg_write_1(lcd, JBT_REG_GAMMA1_FINE_2, 0x00, &ret); 220 + jbt_reg_write_1(lcd, JBT_REG_GAMMA1_INCLINATION, 0x00, &ret); 221 + jbt_reg_write_1(lcd, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00, &ret); 222 + 223 + jbt_reg_write_2(lcd, JBT_REG_HCLOCK_VGA, 0x1f0, &ret); 224 + jbt_reg_write_1(lcd, JBT_REG_BLANK_CONTROL, 0x02, &ret); 225 + jbt_reg_write_2(lcd, JBT_REG_BLANK_TH_TV, 0x0804, &ret); 226 + 227 + jbt_reg_write_1(lcd, JBT_REG_CKV_ON_OFF, 0x01, &ret); 228 + jbt_reg_write_2(lcd, JBT_REG_CKV_1_2, 0x0000, &ret); 229 + 230 + jbt_reg_write_2(lcd, JBT_REG_OEV_TIMING, 0x0d0e, &ret); 231 + jbt_reg_write_2(lcd, JBT_REG_ASW_TIMING_1, 0x11a4, &ret); 232 + jbt_reg_write_1(lcd, JBT_REG_ASW_TIMING_2, 0x0e, &ret); 233 + 234 + return ret; 235 + } 236 + 237 + static int td028ttec1_enable(struct drm_panel *panel) 238 + { 239 + struct td028ttec1_panel *lcd = to_td028ttec1_device(panel); 240 + int ret; 241 + 242 + ret = jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL); 243 + if (ret) 244 + return ret; 245 + 246 + backlight_enable(lcd->backlight); 247 + 248 + return 0; 249 + } 250 + 251 + static int td028ttec1_disable(struct drm_panel *panel) 252 + { 253 + struct td028ttec1_panel *lcd = to_td028ttec1_device(panel); 254 + 255 + backlight_disable(lcd->backlight); 256 + 257 + jbt_ret_write_0(lcd, JBT_REG_DISPLAY_OFF, NULL); 258 + 259 + return 0; 260 + } 261 + 262 + static int td028ttec1_unprepare(struct drm_panel *panel) 263 + { 264 + struct td028ttec1_panel *lcd = to_td028ttec1_device(panel); 265 + 266 + jbt_reg_write_2(lcd, JBT_REG_OUTPUT_CONTROL, 0x8002, NULL); 267 + jbt_ret_write_0(lcd, JBT_REG_SLEEP_IN, NULL); 268 + jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x00, NULL); 269 + 270 + return 0; 271 + } 272 + 273 + static const struct drm_display_mode td028ttec1_mode = { 274 + .clock = 22153, 275 + .hdisplay = 480, 276 + .hsync_start = 480 + 24, 277 + .hsync_end = 480 + 24 + 8, 278 + .htotal = 480 + 24 + 8 + 8, 279 + .vdisplay = 640, 280 + .vsync_start = 640 + 4, 281 + .vsync_end = 640 + 4 + 2, 282 + .vtotal = 640 + 4 + 2 + 2, 283 + .vrefresh = 66, 284 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 285 + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 286 + .width_mm = 43, 287 + .height_mm = 58, 288 + }; 289 + 290 + static int td028ttec1_get_modes(struct drm_panel *panel) 291 + { 292 + struct drm_connector *connector = panel->connector; 293 + struct drm_display_mode *mode; 294 + 295 + mode = drm_mode_duplicate(panel->drm, &td028ttec1_mode); 296 + if (!mode) 297 + return -ENOMEM; 298 + 299 + drm_mode_set_name(mode); 300 + drm_mode_probed_add(connector, mode); 301 + 302 + connector->display_info.width_mm = td028ttec1_mode.width_mm; 303 + connector->display_info.height_mm = td028ttec1_mode.height_mm; 304 + /* 305 + * FIXME: According to the datasheet sync signals are sampled on the 306 + * rising edge of the clock, but the code running on the OpenMoko Neo 307 + * FreeRunner and Neo 1973 indicates sampling on the falling edge. This 308 + * should be tested on a real device. 309 + */ 310 + connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH 311 + | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE 312 + | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE; 313 + 314 + return 1; 315 + } 316 + 317 + static const struct drm_panel_funcs td028ttec1_funcs = { 318 + .prepare = td028ttec1_prepare, 319 + .enable = td028ttec1_enable, 320 + .disable = td028ttec1_disable, 321 + .unprepare = td028ttec1_unprepare, 322 + .get_modes = td028ttec1_get_modes, 323 + }; 324 + 325 + static int td028ttec1_probe(struct spi_device *spi) 326 + { 327 + struct td028ttec1_panel *lcd; 328 + int ret; 329 + 330 + lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL); 331 + if (!lcd) 332 + return -ENOMEM; 333 + 334 + spi_set_drvdata(spi, lcd); 335 + lcd->spi = spi; 336 + 337 + lcd->backlight = devm_of_find_backlight(&spi->dev); 338 + if (IS_ERR(lcd->backlight)) 339 + return PTR_ERR(lcd->backlight); 340 + 341 + spi->mode = SPI_MODE_3; 342 + spi->bits_per_word = 9; 343 + 344 + ret = spi_setup(spi); 345 + if (ret < 0) { 346 + dev_err(&spi->dev, "failed to setup SPI: %d\n", ret); 347 + return ret; 348 + } 349 + 350 + drm_panel_init(&lcd->panel); 351 + lcd->panel.dev = &lcd->spi->dev; 352 + lcd->panel.funcs = &td028ttec1_funcs; 353 + 354 + return drm_panel_add(&lcd->panel); 355 + } 356 + 357 + static int td028ttec1_remove(struct spi_device *spi) 358 + { 359 + struct td028ttec1_panel *lcd = spi_get_drvdata(spi); 360 + 361 + drm_panel_remove(&lcd->panel); 362 + drm_panel_disable(&lcd->panel); 363 + drm_panel_unprepare(&lcd->panel); 364 + 365 + return 0; 366 + } 367 + 368 + static const struct of_device_id td028ttec1_of_match[] = { 369 + { .compatible = "tpo,td028ttec1", }, 370 + /* DT backward compatibility. */ 371 + { .compatible = "toppoly,td028ttec1", }, 372 + { /* sentinel */ }, 373 + }; 374 + 375 + MODULE_DEVICE_TABLE(of, td028ttec1_of_match); 376 + 377 + static const struct spi_device_id td028ttec1_ids[] = { 378 + { "tpo,td028ttec1", 0}, 379 + { "toppoly,td028ttec1", 0 }, 380 + { /* sentinel */ } 381 + }; 382 + 383 + MODULE_DEVICE_TABLE(spi, td028ttec1_ids); 384 + 385 + static struct spi_driver td028ttec1_driver = { 386 + .probe = td028ttec1_probe, 387 + .remove = td028ttec1_remove, 388 + .id_table = td028ttec1_ids, 389 + .driver = { 390 + .name = "panel-tpo-td028ttec1", 391 + .of_match_table = td028ttec1_of_match, 392 + }, 393 + }; 394 + 395 + module_spi_driver(td028ttec1_driver); 396 + 397 + MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>"); 398 + MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver"); 399 + MODULE_LICENSE("GPL");
+509
drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Toppoly TD043MTEA1 Panel Driver 4 + * 5 + * Copyright (C) 2019 Texas Instruments Incorporated 6 + * 7 + * Based on the omapdrm-specific panel-tpo-td043mtea1 driver 8 + * 9 + * Author: Gražvydas Ignotas <notasas@gmail.com> 10 + */ 11 + 12 + #include <linux/delay.h> 13 + #include <linux/module.h> 14 + #include <linux/regulator/consumer.h> 15 + #include <linux/spi/spi.h> 16 + 17 + #include <drm/drm_connector.h> 18 + #include <drm/drm_modes.h> 19 + #include <drm/drm_panel.h> 20 + 21 + #define TPO_R02_MODE(x) ((x) & 7) 22 + #define TPO_R02_MODE_800x480 7 23 + #define TPO_R02_NCLK_RISING BIT(3) 24 + #define TPO_R02_HSYNC_HIGH BIT(4) 25 + #define TPO_R02_VSYNC_HIGH BIT(5) 26 + 27 + #define TPO_R03_NSTANDBY BIT(0) 28 + #define TPO_R03_EN_CP_CLK BIT(1) 29 + #define TPO_R03_EN_VGL_PUMP BIT(2) 30 + #define TPO_R03_EN_PWM BIT(3) 31 + #define TPO_R03_DRIVING_CAP_100 BIT(4) 32 + #define TPO_R03_EN_PRE_CHARGE BIT(6) 33 + #define TPO_R03_SOFTWARE_CTL BIT(7) 34 + 35 + #define TPO_R04_NFLIP_H BIT(0) 36 + #define TPO_R04_NFLIP_V BIT(1) 37 + #define TPO_R04_CP_CLK_FREQ_1H BIT(2) 38 + #define TPO_R04_VGL_FREQ_1H BIT(4) 39 + 40 + #define TPO_R03_VAL_NORMAL \ 41 + (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | TPO_R03_EN_VGL_PUMP | \ 42 + TPO_R03_EN_PWM | TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \ 43 + TPO_R03_SOFTWARE_CTL) 44 + 45 + #define TPO_R03_VAL_STANDBY \ 46 + (TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \ 47 + TPO_R03_SOFTWARE_CTL) 48 + 49 + static const u16 td043mtea1_def_gamma[12] = { 50 + 105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023 51 + }; 52 + 53 + struct td043mtea1_panel { 54 + struct drm_panel panel; 55 + 56 + struct spi_device *spi; 57 + struct regulator *vcc_reg; 58 + struct gpio_desc *reset_gpio; 59 + 60 + unsigned int mode; 61 + u16 gamma[12]; 62 + bool vmirror; 63 + bool powered_on; 64 + bool spi_suspended; 65 + bool power_on_resume; 66 + }; 67 + 68 + #define to_td043mtea1_device(p) container_of(p, struct td043mtea1_panel, panel) 69 + 70 + /* ----------------------------------------------------------------------------- 71 + * Hardware Access 72 + */ 73 + 74 + static int td043mtea1_write(struct td043mtea1_panel *lcd, u8 addr, u8 value) 75 + { 76 + struct spi_message msg; 77 + struct spi_transfer xfer; 78 + u16 data; 79 + int ret; 80 + 81 + spi_message_init(&msg); 82 + 83 + memset(&xfer, 0, sizeof(xfer)); 84 + 85 + data = ((u16)addr << 10) | (1 << 8) | value; 86 + xfer.tx_buf = &data; 87 + xfer.bits_per_word = 16; 88 + xfer.len = 2; 89 + spi_message_add_tail(&xfer, &msg); 90 + 91 + ret = spi_sync(lcd->spi, &msg); 92 + if (ret < 0) 93 + dev_warn(&lcd->spi->dev, "failed to write to LCD reg (%d)\n", 94 + ret); 95 + 96 + return ret; 97 + } 98 + 99 + static void td043mtea1_write_gamma(struct td043mtea1_panel *lcd) 100 + { 101 + const u16 *gamma = lcd->gamma; 102 + unsigned int i; 103 + u8 val; 104 + 105 + /* gamma bits [9:8] */ 106 + for (val = i = 0; i < 4; i++) 107 + val |= (gamma[i] & 0x300) >> ((i + 1) * 2); 108 + td043mtea1_write(lcd, 0x11, val); 109 + 110 + for (val = i = 0; i < 4; i++) 111 + val |= (gamma[i + 4] & 0x300) >> ((i + 1) * 2); 112 + td043mtea1_write(lcd, 0x12, val); 113 + 114 + for (val = i = 0; i < 4; i++) 115 + val |= (gamma[i + 8] & 0x300) >> ((i + 1) * 2); 116 + td043mtea1_write(lcd, 0x13, val); 117 + 118 + /* gamma bits [7:0] */ 119 + for (i = 0; i < 12; i++) 120 + td043mtea1_write(lcd, 0x14 + i, gamma[i] & 0xff); 121 + } 122 + 123 + static int td043mtea1_write_mirror(struct td043mtea1_panel *lcd) 124 + { 125 + u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V | 126 + TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H; 127 + if (lcd->vmirror) 128 + reg4 &= ~TPO_R04_NFLIP_V; 129 + 130 + return td043mtea1_write(lcd, 4, reg4); 131 + } 132 + 133 + static int td043mtea1_power_on(struct td043mtea1_panel *lcd) 134 + { 135 + int ret; 136 + 137 + if (lcd->powered_on) 138 + return 0; 139 + 140 + ret = regulator_enable(lcd->vcc_reg); 141 + if (ret < 0) 142 + return ret; 143 + 144 + /* Wait for the panel to stabilize. */ 145 + msleep(160); 146 + 147 + gpiod_set_value(lcd->reset_gpio, 0); 148 + 149 + td043mtea1_write(lcd, 2, TPO_R02_MODE(lcd->mode) | TPO_R02_NCLK_RISING); 150 + td043mtea1_write(lcd, 3, TPO_R03_VAL_NORMAL); 151 + td043mtea1_write(lcd, 0x20, 0xf0); 152 + td043mtea1_write(lcd, 0x21, 0xf0); 153 + td043mtea1_write_mirror(lcd); 154 + td043mtea1_write_gamma(lcd); 155 + 156 + lcd->powered_on = true; 157 + 158 + return 0; 159 + } 160 + 161 + static void td043mtea1_power_off(struct td043mtea1_panel *lcd) 162 + { 163 + if (!lcd->powered_on) 164 + return; 165 + 166 + td043mtea1_write(lcd, 3, TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM); 167 + 168 + gpiod_set_value(lcd->reset_gpio, 1); 169 + 170 + /* wait for at least 2 vsyncs before cutting off power */ 171 + msleep(50); 172 + 173 + td043mtea1_write(lcd, 3, TPO_R03_VAL_STANDBY); 174 + 175 + regulator_disable(lcd->vcc_reg); 176 + 177 + lcd->powered_on = false; 178 + } 179 + 180 + /* ----------------------------------------------------------------------------- 181 + * sysfs 182 + */ 183 + 184 + static ssize_t vmirror_show(struct device *dev, struct device_attribute *attr, 185 + char *buf) 186 + { 187 + struct td043mtea1_panel *lcd = dev_get_drvdata(dev); 188 + 189 + return snprintf(buf, PAGE_SIZE, "%d\n", lcd->vmirror); 190 + } 191 + 192 + static ssize_t vmirror_store(struct device *dev, struct device_attribute *attr, 193 + const char *buf, size_t count) 194 + { 195 + struct td043mtea1_panel *lcd = dev_get_drvdata(dev); 196 + int val; 197 + int ret; 198 + 199 + ret = kstrtoint(buf, 0, &val); 200 + if (ret < 0) 201 + return ret; 202 + 203 + lcd->vmirror = !!val; 204 + 205 + ret = td043mtea1_write_mirror(lcd); 206 + if (ret < 0) 207 + return ret; 208 + 209 + return count; 210 + } 211 + 212 + static ssize_t mode_show(struct device *dev, struct device_attribute *attr, 213 + char *buf) 214 + { 215 + struct td043mtea1_panel *lcd = dev_get_drvdata(dev); 216 + 217 + return snprintf(buf, PAGE_SIZE, "%d\n", lcd->mode); 218 + } 219 + 220 + static ssize_t mode_store(struct device *dev, struct device_attribute *attr, 221 + const char *buf, size_t count) 222 + { 223 + struct td043mtea1_panel *lcd = dev_get_drvdata(dev); 224 + long val; 225 + int ret; 226 + 227 + ret = kstrtol(buf, 0, &val); 228 + if (ret != 0 || val & ~7) 229 + return -EINVAL; 230 + 231 + lcd->mode = val; 232 + 233 + val |= TPO_R02_NCLK_RISING; 234 + td043mtea1_write(lcd, 2, val); 235 + 236 + return count; 237 + } 238 + 239 + static ssize_t gamma_show(struct device *dev, struct device_attribute *attr, 240 + char *buf) 241 + { 242 + struct td043mtea1_panel *lcd = dev_get_drvdata(dev); 243 + ssize_t len = 0; 244 + unsigned int i; 245 + int ret; 246 + 247 + for (i = 0; i < ARRAY_SIZE(lcd->gamma); i++) { 248 + ret = snprintf(buf + len, PAGE_SIZE - len, "%u ", 249 + lcd->gamma[i]); 250 + if (ret < 0) 251 + return ret; 252 + len += ret; 253 + } 254 + buf[len - 1] = '\n'; 255 + 256 + return len; 257 + } 258 + 259 + static ssize_t gamma_store(struct device *dev, struct device_attribute *attr, 260 + const char *buf, size_t count) 261 + { 262 + struct td043mtea1_panel *lcd = dev_get_drvdata(dev); 263 + unsigned int g[12]; 264 + unsigned int i; 265 + int ret; 266 + 267 + ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u", 268 + &g[0], &g[1], &g[2], &g[3], &g[4], &g[5], 269 + &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]); 270 + if (ret != 12) 271 + return -EINVAL; 272 + 273 + for (i = 0; i < 12; i++) 274 + lcd->gamma[i] = g[i]; 275 + 276 + td043mtea1_write_gamma(lcd); 277 + 278 + return count; 279 + } 280 + 281 + static DEVICE_ATTR_RW(vmirror); 282 + static DEVICE_ATTR_RW(mode); 283 + static DEVICE_ATTR_RW(gamma); 284 + 285 + static struct attribute *td043mtea1_attrs[] = { 286 + &dev_attr_vmirror.attr, 287 + &dev_attr_mode.attr, 288 + &dev_attr_gamma.attr, 289 + NULL, 290 + }; 291 + 292 + static const struct attribute_group td043mtea1_attr_group = { 293 + .attrs = td043mtea1_attrs, 294 + }; 295 + 296 + /* ----------------------------------------------------------------------------- 297 + * Panel Operations 298 + */ 299 + 300 + static int td043mtea1_unprepare(struct drm_panel *panel) 301 + { 302 + struct td043mtea1_panel *lcd = to_td043mtea1_device(panel); 303 + 304 + if (!lcd->spi_suspended) 305 + td043mtea1_power_off(lcd); 306 + 307 + return 0; 308 + } 309 + 310 + static int td043mtea1_prepare(struct drm_panel *panel) 311 + { 312 + struct td043mtea1_panel *lcd = to_td043mtea1_device(panel); 313 + int ret; 314 + 315 + /* 316 + * If we are resuming from system suspend, SPI might not be enabled 317 + * yet, so we'll program the LCD from SPI PM resume callback. 318 + */ 319 + if (lcd->spi_suspended) 320 + return 0; 321 + 322 + ret = td043mtea1_power_on(lcd); 323 + if (ret) { 324 + dev_err(&lcd->spi->dev, "%s: power on failed (%d)\n", 325 + __func__, ret); 326 + return ret; 327 + } 328 + 329 + return 0; 330 + } 331 + 332 + static const struct drm_display_mode td043mtea1_mode = { 333 + .clock = 36000, 334 + .hdisplay = 800, 335 + .hsync_start = 800 + 68, 336 + .hsync_end = 800 + 68 + 1, 337 + .htotal = 800 + 68 + 1 + 214, 338 + .vdisplay = 480, 339 + .vsync_start = 480 + 39, 340 + .vsync_end = 480 + 39 + 1, 341 + .vtotal = 480 + 39 + 1 + 34, 342 + .vrefresh = 60, 343 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 344 + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 345 + .width_mm = 94, 346 + .height_mm = 56, 347 + }; 348 + 349 + static int td043mtea1_get_modes(struct drm_panel *panel) 350 + { 351 + struct drm_connector *connector = panel->connector; 352 + struct drm_display_mode *mode; 353 + 354 + mode = drm_mode_duplicate(panel->drm, &td043mtea1_mode); 355 + if (!mode) 356 + return -ENOMEM; 357 + 358 + drm_mode_set_name(mode); 359 + drm_mode_probed_add(connector, mode); 360 + 361 + connector->display_info.width_mm = td043mtea1_mode.width_mm; 362 + connector->display_info.height_mm = td043mtea1_mode.height_mm; 363 + /* 364 + * FIXME: According to the datasheet sync signals are sampled on the 365 + * rising edge of the clock, but the code running on the OMAP3 Pandora 366 + * indicates sampling on the falling edge. This should be tested on a 367 + * real device. 368 + */ 369 + connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH 370 + | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE 371 + | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE; 372 + 373 + return 1; 374 + } 375 + 376 + static const struct drm_panel_funcs td043mtea1_funcs = { 377 + .unprepare = td043mtea1_unprepare, 378 + .prepare = td043mtea1_prepare, 379 + .get_modes = td043mtea1_get_modes, 380 + }; 381 + 382 + /* ----------------------------------------------------------------------------- 383 + * Power Management, Probe and Remove 384 + */ 385 + 386 + static int __maybe_unused td043mtea1_suspend(struct device *dev) 387 + { 388 + struct td043mtea1_panel *lcd = dev_get_drvdata(dev); 389 + 390 + if (lcd->powered_on) { 391 + td043mtea1_power_off(lcd); 392 + lcd->powered_on = true; 393 + } 394 + 395 + lcd->spi_suspended = true; 396 + 397 + return 0; 398 + } 399 + 400 + static int __maybe_unused td043mtea1_resume(struct device *dev) 401 + { 402 + struct td043mtea1_panel *lcd = dev_get_drvdata(dev); 403 + int ret; 404 + 405 + lcd->spi_suspended = false; 406 + 407 + if (lcd->powered_on) { 408 + lcd->powered_on = false; 409 + ret = td043mtea1_power_on(lcd); 410 + if (ret) 411 + return ret; 412 + } 413 + 414 + return 0; 415 + } 416 + 417 + static SIMPLE_DEV_PM_OPS(td043mtea1_pm_ops, td043mtea1_suspend, 418 + td043mtea1_resume); 419 + 420 + static int td043mtea1_probe(struct spi_device *spi) 421 + { 422 + struct td043mtea1_panel *lcd; 423 + int ret; 424 + 425 + lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL); 426 + if (lcd == NULL) 427 + return -ENOMEM; 428 + 429 + spi_set_drvdata(spi, lcd); 430 + lcd->spi = spi; 431 + lcd->mode = TPO_R02_MODE_800x480; 432 + memcpy(lcd->gamma, td043mtea1_def_gamma, sizeof(lcd->gamma)); 433 + 434 + lcd->vcc_reg = devm_regulator_get(&spi->dev, "vcc"); 435 + if (IS_ERR(lcd->vcc_reg)) { 436 + dev_err(&spi->dev, "failed to get VCC regulator\n"); 437 + return PTR_ERR(lcd->vcc_reg); 438 + } 439 + 440 + lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH); 441 + if (IS_ERR(lcd->reset_gpio)) { 442 + dev_err(&spi->dev, "failed to get reset GPIO\n"); 443 + return PTR_ERR(lcd->reset_gpio); 444 + } 445 + 446 + spi->bits_per_word = 16; 447 + spi->mode = SPI_MODE_0; 448 + 449 + ret = spi_setup(spi); 450 + if (ret < 0) { 451 + dev_err(&spi->dev, "failed to setup SPI: %d\n", ret); 452 + return ret; 453 + } 454 + 455 + ret = sysfs_create_group(&spi->dev.kobj, &td043mtea1_attr_group); 456 + if (ret < 0) { 457 + dev_err(&spi->dev, "failed to create sysfs files\n"); 458 + return ret; 459 + } 460 + 461 + drm_panel_init(&lcd->panel); 462 + lcd->panel.dev = &lcd->spi->dev; 463 + lcd->panel.funcs = &td043mtea1_funcs; 464 + 465 + ret = drm_panel_add(&lcd->panel); 466 + if (ret < 0) { 467 + sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group); 468 + return ret; 469 + } 470 + 471 + return 0; 472 + } 473 + 474 + static int td043mtea1_remove(struct spi_device *spi) 475 + { 476 + struct td043mtea1_panel *lcd = spi_get_drvdata(spi); 477 + 478 + drm_panel_remove(&lcd->panel); 479 + drm_panel_disable(&lcd->panel); 480 + drm_panel_unprepare(&lcd->panel); 481 + 482 + sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group); 483 + 484 + return 0; 485 + } 486 + 487 + static const struct of_device_id td043mtea1_of_match[] = { 488 + { .compatible = "tpo,td043mtea1", }, 489 + { /* sentinel */ }, 490 + }; 491 + 492 + MODULE_DEVICE_TABLE(of, td043mtea1_of_match); 493 + 494 + static struct spi_driver td043mtea1_driver = { 495 + .probe = td043mtea1_probe, 496 + .remove = td043mtea1_remove, 497 + .driver = { 498 + .name = "panel-tpo-td043mtea1", 499 + .pm = &td043mtea1_pm_ops, 500 + .of_match_table = td043mtea1_of_match, 501 + }, 502 + }; 503 + 504 + module_spi_driver(td043mtea1_driver); 505 + 506 + MODULE_ALIAS("spi:tpo,td043mtea1"); 507 + MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>"); 508 + MODULE_DESCRIPTION("TPO TD043MTEA1 Panel Driver"); 509 + MODULE_LICENSE("GPL");
+1
drivers/gpu/drm/panfrost/Makefile
··· 5 5 panfrost_device.o \ 6 6 panfrost_devfreq.o \ 7 7 panfrost_gem.o \ 8 + panfrost_gem_shrinker.o \ 8 9 panfrost_gpu.o \ 9 10 panfrost_job.o \ 10 11 panfrost_mmu.o \
-11
drivers/gpu/drm/panfrost/TODO
··· 6 6 - Bifrost specific feature and issue handling 7 7 - Coherent DMA support 8 8 9 - - Support for 2MB pages. The io-pgtable code already supports this. Finishing 10 - support involves either copying or adapting the iommu API to handle passing 11 - aligned addresses and sizes to the io-pgtable code. 12 - 13 9 - Per FD address space support. The h/w supports multiple addresses spaces. 14 10 The hard part is handling when more address spaces are needed than what 15 11 the h/w provides. 16 12 17 - - Support pinning pages on demand (GPU page faults). 18 - 19 13 - Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu) 20 - 21 - - Support for madvise and a shrinker. 22 14 23 15 - Compute job support. So called 'compute only' jobs need to be plumbed up to 24 16 userspace. 25 - 26 - - Performance counter support. (Boris) 27 -
+10 -6
drivers/gpu/drm/panfrost/panfrost_device.c
··· 254 254 return "UNKNOWN"; 255 255 } 256 256 257 + void panfrost_device_reset(struct panfrost_device *pfdev) 258 + { 259 + panfrost_gpu_soft_reset(pfdev); 260 + 261 + panfrost_gpu_power_on(pfdev); 262 + panfrost_mmu_reset(pfdev); 263 + panfrost_job_enable_interrupts(pfdev); 264 + } 265 + 257 266 #ifdef CONFIG_PM 258 267 int panfrost_device_resume(struct device *dev) 259 268 { 260 269 struct platform_device *pdev = to_platform_device(dev); 261 270 struct panfrost_device *pfdev = platform_get_drvdata(pdev); 262 271 263 - panfrost_gpu_soft_reset(pfdev); 264 - 265 - /* TODO: Re-enable all other address spaces */ 266 - panfrost_gpu_power_on(pfdev); 267 - panfrost_mmu_enable(pfdev, 0); 268 - panfrost_job_enable_interrupts(pfdev); 272 + panfrost_device_reset(pfdev); 269 273 panfrost_devfreq_resume(pfdev); 270 274 271 275 return 0;
+5
drivers/gpu/drm/panfrost/panfrost_device.h
··· 85 85 struct mutex sched_lock; 86 86 struct mutex reset_lock; 87 87 88 + struct mutex shrinker_lock; 89 + struct list_head shrinker_list; 90 + struct shrinker shrinker; 91 + 88 92 struct { 89 93 struct devfreq *devfreq; 90 94 struct thermal_cooling_device *cooling; ··· 132 128 133 129 int panfrost_device_init(struct panfrost_device *pfdev); 134 130 void panfrost_device_fini(struct panfrost_device *pfdev); 131 + void panfrost_device_reset(struct panfrost_device *pfdev); 135 132 136 133 int panfrost_device_resume(struct device *dev); 137 134 int panfrost_device_suspend(struct device *dev);
+88 -17
drivers/gpu/drm/panfrost/panfrost_drv.c
··· 78 78 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, 79 79 struct drm_file *file) 80 80 { 81 - int ret; 82 - struct drm_gem_shmem_object *shmem; 81 + struct panfrost_gem_object *bo; 83 82 struct drm_panfrost_create_bo *args = data; 84 83 85 - if (!args->size || args->flags || args->pad) 84 + if (!args->size || args->pad || 85 + (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) 86 86 return -EINVAL; 87 87 88 - shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, 89 - &args->handle); 90 - if (IS_ERR(shmem)) 91 - return PTR_ERR(shmem); 88 + /* Heaps should never be executable */ 89 + if ((args->flags & PANFROST_BO_HEAP) && 90 + !(args->flags & PANFROST_BO_NOEXEC)) 91 + return -EINVAL; 92 92 93 - ret = panfrost_mmu_map(to_panfrost_bo(&shmem->base)); 94 - if (ret) 95 - goto err_free; 93 + bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags, 94 + &args->handle); 95 + if (IS_ERR(bo)) 96 + return PTR_ERR(bo); 96 97 97 - args->offset = to_panfrost_bo(&shmem->base)->node.start << PAGE_SHIFT; 98 + args->offset = bo->node.start << PAGE_SHIFT; 98 99 99 100 return 0; 100 - 101 - err_free: 102 - drm_gem_handle_delete(file, args->handle); 103 - return ret; 104 101 } 105 102 106 103 /** ··· 274 277 if (!gem_obj) 275 278 return -ENOENT; 276 279 277 - ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true, 280 + ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true, 278 281 true, timeout); 279 282 if (!ret) 280 283 ret = timeout ? -ETIMEDOUT : -EBUSY; ··· 301 304 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 302 305 return -ENOENT; 303 306 } 307 + 308 + /* Don't allow mmapping of heap objects as pages are not pinned. */ 309 + if (to_panfrost_bo(gem_obj)->is_heap) 310 + return -EINVAL; 304 311 305 312 ret = drm_gem_create_mmap_offset(gem_obj); 306 313 if (ret == 0) ··· 334 333 return 0; 335 334 } 336 335 336 + static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, 337 + struct drm_file *file_priv) 338 + { 339 + struct drm_panfrost_madvise *args = data; 340 + struct panfrost_device *pfdev = dev->dev_private; 341 + struct drm_gem_object *gem_obj; 342 + 343 + gem_obj = drm_gem_object_lookup(file_priv, args->handle); 344 + if (!gem_obj) { 345 + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 346 + return -ENOENT; 347 + } 348 + 349 + args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); 350 + 351 + if (args->retained) { 352 + struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj); 353 + 354 + mutex_lock(&pfdev->shrinker_lock); 355 + 356 + if (args->madv == PANFROST_MADV_DONTNEED) 357 + list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list); 358 + else if (args->madv == PANFROST_MADV_WILLNEED) 359 + list_del_init(&bo->base.madv_list); 360 + 361 + mutex_unlock(&pfdev->shrinker_lock); 362 + } 363 + 364 + drm_gem_object_put_unlocked(gem_obj); 365 + return 0; 366 + } 367 + 337 368 int panfrost_unstable_ioctl_check(void) 338 369 { 339 370 if (!unstable_ioctls) 340 371 return -ENOSYS; 341 372 342 373 return 0; 374 + } 375 + 376 + #define PFN_4G (SZ_4G >> PAGE_SHIFT) 377 + #define PFN_4G_MASK (PFN_4G - 1) 378 + #define PFN_16M (SZ_16M >> PAGE_SHIFT) 379 + 380 + static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node, 381 + unsigned long color, 382 + u64 *start, u64 *end) 383 + { 384 + /* Executable buffers can't start or end on a 4GB boundary */ 385 + if (!(color & PANFROST_BO_NOEXEC)) { 386 + u64 next_seg; 387 + 388 + if ((*start & PFN_4G_MASK) == 0) 389 + (*start)++; 390 + 391 + if ((*end & PFN_4G_MASK) == 0) 392 + (*end)--; 393 + 394 + next_seg = ALIGN(*start, PFN_4G); 395 + if (next_seg - *start <= PFN_16M) 396 + *start = next_seg + 1; 397 + 398 + *end = min(*end, ALIGN(*start, PFN_4G) - 1); 399 + } 343 400 } 344 401 345 402 static int ··· 443 384 PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW), 444 385 PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW), 445 386 PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW), 387 + PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW), 446 388 }; 447 389 448 390 DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops); 449 391 392 + /* 393 + * Panfrost driver version: 394 + * - 1.0 - initial interface 395 + * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO 396 + */ 450 397 static struct drm_driver panfrost_drm_driver = { 451 398 .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, 452 399 .open = panfrost_open, ··· 464 399 .desc = "panfrost DRM", 465 400 .date = "20180908", 466 401 .major = 1, 467 - .minor = 0, 402 + .minor = 1, 468 403 469 404 .gem_create_object = panfrost_gem_create_object, 470 405 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, ··· 497 432 pfdev->ddev = ddev; 498 433 499 434 spin_lock_init(&pfdev->mm_lock); 435 + mutex_init(&pfdev->shrinker_lock); 436 + INIT_LIST_HEAD(&pfdev->shrinker_list); 500 437 501 438 /* 4G enough for now. can be 48-bit */ 502 439 drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT); 440 + pfdev->mm.color_adjust = panfrost_drm_mm_color_adjust; 503 441 504 442 pm_runtime_use_autosuspend(pfdev->dev); 505 443 pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */ ··· 530 462 if (err < 0) 531 463 goto err_out1; 532 464 465 + panfrost_gem_shrinker_init(ddev); 466 + 533 467 return 0; 534 468 535 469 err_out1: ··· 548 478 struct drm_device *ddev = pfdev->ddev; 549 479 550 480 drm_dev_unregister(ddev); 481 + panfrost_gem_shrinker_cleanup(ddev); 551 482 pm_runtime_get_sync(pfdev->dev); 552 483 pm_runtime_put_sync_autosuspend(pfdev->dev); 553 484 pm_runtime_disable(pfdev->dev);
+115 -26
drivers/gpu/drm/panfrost/panfrost_gem.c
··· 19 19 struct panfrost_gem_object *bo = to_panfrost_bo(obj); 20 20 struct panfrost_device *pfdev = obj->dev->dev_private; 21 21 22 - if (bo->is_mapped) 23 - panfrost_mmu_unmap(bo); 22 + if (bo->sgts) { 23 + int i; 24 + int n_sgt = bo->base.base.size / SZ_2M; 24 25 25 - spin_lock(&pfdev->mm_lock); 26 - drm_mm_remove_node(&bo->node); 27 - spin_unlock(&pfdev->mm_lock); 26 + for (i = 0; i < n_sgt; i++) { 27 + if (bo->sgts[i].sgl) { 28 + dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl, 29 + bo->sgts[i].nents, DMA_BIDIRECTIONAL); 30 + sg_free_table(&bo->sgts[i]); 31 + } 32 + } 33 + kfree(bo->sgts); 34 + } 35 + 36 + mutex_lock(&pfdev->shrinker_lock); 37 + if (!list_empty(&bo->base.madv_list)) 38 + list_del(&bo->base.madv_list); 39 + mutex_unlock(&pfdev->shrinker_lock); 28 40 29 41 drm_gem_shmem_free_object(obj); 30 42 } 31 43 44 + static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) 45 + { 46 + int ret; 47 + size_t size = obj->size; 48 + u64 align; 49 + struct panfrost_gem_object *bo = to_panfrost_bo(obj); 50 + struct panfrost_device *pfdev = obj->dev->dev_private; 51 + unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0; 52 + 53 + /* 54 + * Executable buffers cannot cross a 16MB boundary as the program 55 + * counter is 24-bits. We assume executable buffers will be less than 56 + * 16MB and aligning executable buffers to their size will avoid 57 + * crossing a 16MB boundary. 58 + */ 59 + if (!bo->noexec) 60 + align = size >> PAGE_SHIFT; 61 + else 62 + align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; 63 + 64 + spin_lock(&pfdev->mm_lock); 65 + ret = drm_mm_insert_node_generic(&pfdev->mm, &bo->node, 66 + size >> PAGE_SHIFT, align, color, 0); 67 + if (ret) 68 + goto out; 69 + 70 + if (!bo->is_heap) { 71 + ret = panfrost_mmu_map(bo); 72 + if (ret) 73 + drm_mm_remove_node(&bo->node); 74 + } 75 + out: 76 + spin_unlock(&pfdev->mm_lock); 77 + return ret; 78 + } 79 + 80 + static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) 81 + { 82 + struct panfrost_gem_object *bo = to_panfrost_bo(obj); 83 + struct panfrost_device *pfdev = obj->dev->dev_private; 84 + 85 + if (bo->is_mapped) 86 + panfrost_mmu_unmap(bo); 87 + 88 + spin_lock(&pfdev->mm_lock); 89 + if (drm_mm_node_allocated(&bo->node)) 90 + drm_mm_remove_node(&bo->node); 91 + spin_unlock(&pfdev->mm_lock); 92 + } 93 + 94 + static int panfrost_gem_pin(struct drm_gem_object *obj) 95 + { 96 + if (to_panfrost_bo(obj)->is_heap) 97 + return -EINVAL; 98 + 99 + return drm_gem_shmem_pin(obj); 100 + } 101 + 32 102 static const struct drm_gem_object_funcs panfrost_gem_funcs = { 33 103 .free = panfrost_gem_free_object, 104 + .open = panfrost_gem_open, 105 + .close = panfrost_gem_close, 34 106 .print_info = drm_gem_shmem_print_info, 35 - .pin = drm_gem_shmem_pin, 107 + .pin = panfrost_gem_pin, 36 108 .unpin = drm_gem_shmem_unpin, 37 109 .get_sg_table = drm_gem_shmem_get_sg_table, 38 110 .vmap = drm_gem_shmem_vmap, ··· 122 50 */ 123 51 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size) 124 52 { 125 - int ret; 126 - struct panfrost_device *pfdev = dev->dev_private; 127 53 struct panfrost_gem_object *obj; 128 - u64 align; 129 54 130 55 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 131 56 if (!obj) ··· 130 61 131 62 obj->base.base.funcs = &panfrost_gem_funcs; 132 63 133 - size = roundup(size, PAGE_SIZE); 134 - align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; 135 - 136 - spin_lock(&pfdev->mm_lock); 137 - ret = drm_mm_insert_node_generic(&pfdev->mm, &obj->node, 138 - size >> PAGE_SHIFT, align, 0, 0); 139 - spin_unlock(&pfdev->mm_lock); 140 - if (ret) 141 - goto free_obj; 142 - 143 64 return &obj->base.base; 65 + } 144 66 145 - free_obj: 146 - kfree(obj); 147 - return ERR_PTR(ret); 67 + struct panfrost_gem_object * 68 + panfrost_gem_create_with_handle(struct drm_file *file_priv, 69 + struct drm_device *dev, size_t size, 70 + u32 flags, 71 + uint32_t *handle) 72 + { 73 + int ret; 74 + struct drm_gem_shmem_object *shmem; 75 + struct panfrost_gem_object *bo; 76 + 77 + /* Round up heap allocations to 2MB to keep fault handling simple */ 78 + if (flags & PANFROST_BO_HEAP) 79 + size = roundup(size, SZ_2M); 80 + 81 + shmem = drm_gem_shmem_create(dev, size); 82 + if (IS_ERR(shmem)) 83 + return ERR_CAST(shmem); 84 + 85 + bo = to_panfrost_bo(&shmem->base); 86 + bo->noexec = !!(flags & PANFROST_BO_NOEXEC); 87 + bo->is_heap = !!(flags & PANFROST_BO_HEAP); 88 + 89 + /* 90 + * Allocate an id of idr table where the obj is registered 91 + * and handle has the id what user can see. 92 + */ 93 + ret = drm_gem_handle_create(file_priv, &shmem->base, handle); 94 + /* drop reference from allocate - handle holds it now. */ 95 + drm_gem_object_put_unlocked(&shmem->base); 96 + if (ret) 97 + return ERR_PTR(ret); 98 + 99 + return bo; 148 100 } 149 101 150 102 struct drm_gem_object * ··· 174 84 struct sg_table *sgt) 175 85 { 176 86 struct drm_gem_object *obj; 177 - struct panfrost_gem_object *pobj; 87 + struct panfrost_gem_object *bo; 178 88 179 89 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); 180 90 if (IS_ERR(obj)) 181 91 return ERR_CAST(obj); 182 92 183 - pobj = to_panfrost_bo(obj); 184 - 185 - panfrost_mmu_map(pobj); 93 + bo = to_panfrost_bo(obj); 94 + bo->noexec = true; 186 95 187 96 return obj; 188 97 }
+19 -1
drivers/gpu/drm/panfrost/panfrost_gem.h
··· 9 9 10 10 struct panfrost_gem_object { 11 11 struct drm_gem_shmem_object base; 12 + struct sg_table *sgts; 12 13 13 14 struct drm_mm_node node; 14 - bool is_mapped; 15 + bool is_mapped :1; 16 + bool noexec :1; 17 + bool is_heap :1; 15 18 }; 16 19 17 20 static inline ··· 23 20 return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base); 24 21 } 25 22 23 + static inline 24 + struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node) 25 + { 26 + return container_of(node, struct panfrost_gem_object, node); 27 + } 28 + 26 29 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size); 27 30 28 31 struct drm_gem_object * 29 32 panfrost_gem_prime_import_sg_table(struct drm_device *dev, 30 33 struct dma_buf_attachment *attach, 31 34 struct sg_table *sgt); 35 + 36 + struct panfrost_gem_object * 37 + panfrost_gem_create_with_handle(struct drm_file *file_priv, 38 + struct drm_device *dev, size_t size, 39 + u32 flags, 40 + uint32_t *handle); 41 + 42 + void panfrost_gem_shrinker_init(struct drm_device *dev); 43 + void panfrost_gem_shrinker_cleanup(struct drm_device *dev); 32 44 33 45 #endif /* __PANFROST_GEM_H__ */
+107
drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2019 Arm Ltd. 3 + * 4 + * Based on msm_gem_freedreno.c: 5 + * Copyright (C) 2016 Red Hat 6 + * Author: Rob Clark <robdclark@gmail.com> 7 + */ 8 + 9 + #include <linux/list.h> 10 + 11 + #include <drm/drm_device.h> 12 + #include <drm/drm_gem_shmem_helper.h> 13 + 14 + #include "panfrost_device.h" 15 + #include "panfrost_gem.h" 16 + #include "panfrost_mmu.h" 17 + 18 + static unsigned long 19 + panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) 20 + { 21 + struct panfrost_device *pfdev = 22 + container_of(shrinker, struct panfrost_device, shrinker); 23 + struct drm_gem_shmem_object *shmem; 24 + unsigned long count = 0; 25 + 26 + if (!mutex_trylock(&pfdev->shrinker_lock)) 27 + return 0; 28 + 29 + list_for_each_entry(shmem, &pfdev->shrinker_list, madv_list) { 30 + if (drm_gem_shmem_is_purgeable(shmem)) 31 + count += shmem->base.size >> PAGE_SHIFT; 32 + } 33 + 34 + mutex_unlock(&pfdev->shrinker_lock); 35 + 36 + return count; 37 + } 38 + 39 + static void panfrost_gem_purge(struct drm_gem_object *obj) 40 + { 41 + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 42 + mutex_lock(&shmem->pages_lock); 43 + 44 + panfrost_mmu_unmap(to_panfrost_bo(obj)); 45 + drm_gem_shmem_purge_locked(obj); 46 + 47 + mutex_unlock(&shmem->pages_lock); 48 + } 49 + 50 + static unsigned long 51 + panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) 52 + { 53 + struct panfrost_device *pfdev = 54 + container_of(shrinker, struct panfrost_device, shrinker); 55 + struct drm_gem_shmem_object *shmem, *tmp; 56 + unsigned long freed = 0; 57 + 58 + if (!mutex_trylock(&pfdev->shrinker_lock)) 59 + return SHRINK_STOP; 60 + 61 + list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) { 62 + if (freed >= sc->nr_to_scan) 63 + break; 64 + if (drm_gem_shmem_is_purgeable(shmem)) { 65 + panfrost_gem_purge(&shmem->base); 66 + freed += shmem->base.size >> PAGE_SHIFT; 67 + list_del_init(&shmem->madv_list); 68 + } 69 + } 70 + 71 + mutex_unlock(&pfdev->shrinker_lock); 72 + 73 + if (freed > 0) 74 + pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT); 75 + 76 + return freed; 77 + } 78 + 79 + /** 80 + * panfrost_gem_shrinker_init - Initialize panfrost shrinker 81 + * @dev: DRM device 82 + * 83 + * This function registers and sets up the panfrost shrinker. 84 + */ 85 + void panfrost_gem_shrinker_init(struct drm_device *dev) 86 + { 87 + struct panfrost_device *pfdev = dev->dev_private; 88 + pfdev->shrinker.count_objects = panfrost_gem_shrinker_count; 89 + pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan; 90 + pfdev->shrinker.seeks = DEFAULT_SEEKS; 91 + WARN_ON(register_shrinker(&pfdev->shrinker)); 92 + } 93 + 94 + /** 95 + * panfrost_gem_shrinker_cleanup - Clean up panfrost shrinker 96 + * @dev: DRM device 97 + * 98 + * This function unregisters the panfrost shrinker. 99 + */ 100 + void panfrost_gem_shrinker_cleanup(struct drm_device *dev) 101 + { 102 + struct panfrost_device *pfdev = dev->dev_private; 103 + 104 + if (pfdev->shrinker.nr_deferred) { 105 + unregister_shrinker(&pfdev->shrinker); 106 + } 107 + }
+4 -9
drivers/gpu/drm/panfrost/panfrost_job.c
··· 6 6 #include <linux/io.h> 7 7 #include <linux/platform_device.h> 8 8 #include <linux/pm_runtime.h> 9 - #include <linux/reservation.h> 9 + #include <linux/dma-resv.h> 10 10 #include <drm/gpu_scheduler.h> 11 11 #include <drm/panfrost_drm.h> 12 12 ··· 199 199 int i; 200 200 201 201 for (i = 0; i < bo_count; i++) 202 - implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv); 202 + implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv); 203 203 } 204 204 205 205 static void panfrost_attach_object_fences(struct drm_gem_object **bos, ··· 209 209 int i; 210 210 211 211 for (i = 0; i < bo_count; i++) 212 - reservation_object_add_excl_fence(bos[i]->resv, fence); 212 + dma_resv_add_excl_fence(bos[i]->resv, fence); 213 213 } 214 214 215 215 int panfrost_job_push(struct panfrost_job *job) ··· 395 395 /* panfrost_core_dump(pfdev); */ 396 396 397 397 panfrost_devfreq_record_transition(pfdev, js); 398 - panfrost_gpu_soft_reset(pfdev); 399 - 400 - /* TODO: Re-enable all other address spaces */ 401 - panfrost_mmu_enable(pfdev, 0); 402 - panfrost_gpu_power_on(pfdev); 403 - panfrost_job_enable_interrupts(pfdev); 398 + panfrost_device_reset(pfdev); 404 399 405 400 for (i = 0; i < NUM_JOB_SLOTS; i++) 406 401 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
+176 -40
drivers/gpu/drm/panfrost/panfrost_mmu.c
··· 2 2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ 3 3 #include <linux/bitfield.h> 4 4 #include <linux/delay.h> 5 + #include <linux/dma-mapping.h> 5 6 #include <linux/interrupt.h> 6 7 #include <linux/io.h> 7 8 #include <linux/iopoll.h> ··· 10 9 #include <linux/iommu.h> 11 10 #include <linux/platform_device.h> 12 11 #include <linux/pm_runtime.h> 12 + #include <linux/shmem_fs.h> 13 13 #include <linux/sizes.h> 14 14 15 15 #include "panfrost_device.h" ··· 107 105 return ret; 108 106 } 109 107 110 - void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr) 108 + static void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr) 111 109 { 112 110 struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg; 113 111 u64 transtab = cfg->arm_mali_lpae_cfg.transtab; 114 112 u64 memattr = cfg->arm_mali_lpae_cfg.memattr; 115 - 116 - mmu_write(pfdev, MMU_INT_CLEAR, ~0); 117 - mmu_write(pfdev, MMU_INT_MASK, ~0); 118 113 119 114 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL); 120 115 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32); ··· 136 137 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); 137 138 } 138 139 140 + void panfrost_mmu_reset(struct panfrost_device *pfdev) 141 + { 142 + panfrost_mmu_enable(pfdev, 0); 143 + 144 + mmu_write(pfdev, MMU_INT_CLEAR, ~0); 145 + mmu_write(pfdev, MMU_INT_MASK, ~0); 146 + } 147 + 139 148 static size_t get_pgsize(u64 addr, size_t size) 140 149 { 141 150 if (addr & (SZ_2M - 1) || size < SZ_2M) ··· 152 145 return SZ_2M; 153 146 } 154 147 155 - int panfrost_mmu_map(struct panfrost_gem_object *bo) 148 + static int mmu_map_sg(struct panfrost_device *pfdev, u64 iova, 149 + int prot, struct sg_table *sgt) 156 150 { 157 - struct drm_gem_object *obj = &bo->base.base; 158 - struct panfrost_device *pfdev = to_panfrost_device(obj->dev); 159 - struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops; 160 - u64 iova = bo->node.start << PAGE_SHIFT; 161 151 unsigned int count; 162 152 struct scatterlist *sgl; 163 - struct sg_table *sgt; 164 - int ret; 165 - 166 - if (WARN_ON(bo->is_mapped)) 167 - return 0; 168 - 169 - sgt = drm_gem_shmem_get_pages_sgt(obj); 170 - if (WARN_ON(IS_ERR(sgt))) 171 - return PTR_ERR(sgt); 172 - 173 - ret = pm_runtime_get_sync(pfdev->dev); 174 - if (ret < 0) 175 - return ret; 153 + struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops; 154 + u64 start_iova = iova; 176 155 177 156 mutex_lock(&pfdev->mmu->lock); 178 157 ··· 171 178 while (len) { 172 179 size_t pgsize = get_pgsize(iova | paddr, len); 173 180 174 - ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ); 181 + ops->map(ops, iova, paddr, pgsize, prot); 175 182 iova += pgsize; 176 183 paddr += pgsize; 177 184 len -= pgsize; 178 185 } 179 186 } 180 187 181 - mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT, 182 - bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT); 188 + mmu_hw_do_operation(pfdev, 0, start_iova, iova - start_iova, 189 + AS_COMMAND_FLUSH_PT); 183 190 184 191 mutex_unlock(&pfdev->mmu->lock); 192 + 193 + return 0; 194 + } 195 + 196 + int panfrost_mmu_map(struct panfrost_gem_object *bo) 197 + { 198 + struct drm_gem_object *obj = &bo->base.base; 199 + struct panfrost_device *pfdev = to_panfrost_device(obj->dev); 200 + struct sg_table *sgt; 201 + int ret; 202 + int prot = IOMMU_READ | IOMMU_WRITE; 203 + 204 + if (WARN_ON(bo->is_mapped)) 205 + return 0; 206 + 207 + if (bo->noexec) 208 + prot |= IOMMU_NOEXEC; 209 + 210 + sgt = drm_gem_shmem_get_pages_sgt(obj); 211 + if (WARN_ON(IS_ERR(sgt))) 212 + return PTR_ERR(sgt); 213 + 214 + ret = pm_runtime_get_sync(pfdev->dev); 215 + if (ret < 0) 216 + return ret; 217 + 218 + mmu_map_sg(pfdev, bo->node.start << PAGE_SHIFT, prot, sgt); 185 219 186 220 pm_runtime_mark_last_busy(pfdev->dev); 187 221 pm_runtime_put_autosuspend(pfdev->dev); ··· 242 222 size_t unmapped_page; 243 223 size_t pgsize = get_pgsize(iova, len - unmapped_len); 244 224 245 - unmapped_page = ops->unmap(ops, iova, pgsize); 246 - if (!unmapped_page) 247 - break; 248 - 249 - iova += unmapped_page; 250 - unmapped_len += unmapped_page; 225 + if (ops->iova_to_phys(ops, iova)) { 226 + unmapped_page = ops->unmap(ops, iova, pgsize); 227 + WARN_ON(unmapped_page != pgsize); 228 + } 229 + iova += pgsize; 230 + unmapped_len += pgsize; 251 231 } 252 232 253 233 mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT, ··· 283 263 .tlb_sync = mmu_tlb_sync_context, 284 264 }; 285 265 266 + static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) 267 + { 268 + struct drm_mm_node *node; 269 + u64 offset = addr >> PAGE_SHIFT; 270 + 271 + drm_mm_for_each_node(node, &pfdev->mm) { 272 + if (offset >= node->start && offset < (node->start + node->size)) 273 + return node; 274 + } 275 + return NULL; 276 + } 277 + 278 + #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) 279 + 280 + int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) 281 + { 282 + int ret, i; 283 + struct drm_mm_node *node; 284 + struct panfrost_gem_object *bo; 285 + struct address_space *mapping; 286 + pgoff_t page_offset; 287 + struct sg_table *sgt; 288 + struct page **pages; 289 + 290 + node = addr_to_drm_mm_node(pfdev, as, addr); 291 + if (!node) 292 + return -ENOENT; 293 + 294 + bo = drm_mm_node_to_panfrost_bo(node); 295 + if (!bo->is_heap) { 296 + dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", 297 + node->start << PAGE_SHIFT); 298 + return -EINVAL; 299 + } 300 + /* Assume 2MB alignment and size multiple */ 301 + addr &= ~((u64)SZ_2M - 1); 302 + page_offset = addr >> PAGE_SHIFT; 303 + page_offset -= node->start; 304 + 305 + mutex_lock(&bo->base.pages_lock); 306 + 307 + if (!bo->base.pages) { 308 + bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, 309 + sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); 310 + if (!bo->sgts) 311 + return -ENOMEM; 312 + 313 + pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, 314 + sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); 315 + if (!pages) { 316 + kfree(bo->sgts); 317 + bo->sgts = NULL; 318 + return -ENOMEM; 319 + } 320 + bo->base.pages = pages; 321 + bo->base.pages_use_count = 1; 322 + } else 323 + pages = bo->base.pages; 324 + 325 + mapping = bo->base.base.filp->f_mapping; 326 + mapping_set_unevictable(mapping); 327 + 328 + for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { 329 + pages[i] = shmem_read_mapping_page(mapping, i); 330 + if (IS_ERR(pages[i])) { 331 + mutex_unlock(&bo->base.pages_lock); 332 + ret = PTR_ERR(pages[i]); 333 + goto err_pages; 334 + } 335 + } 336 + 337 + mutex_unlock(&bo->base.pages_lock); 338 + 339 + sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; 340 + ret = sg_alloc_table_from_pages(sgt, pages + page_offset, 341 + NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); 342 + if (ret) 343 + goto err_pages; 344 + 345 + if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) { 346 + ret = -EINVAL; 347 + goto err_map; 348 + } 349 + 350 + mmu_map_sg(pfdev, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); 351 + 352 + bo->is_mapped = true; 353 + 354 + dev_dbg(pfdev->dev, "mapped page fault @ %llx", addr); 355 + 356 + return 0; 357 + 358 + err_map: 359 + sg_free_table(sgt); 360 + err_pages: 361 + drm_gem_shmem_put_pages(&bo->base); 362 + return ret; 363 + } 364 + 286 365 static const char *access_type_name(struct panfrost_device *pfdev, 287 366 u32 fault_status) 288 367 { ··· 406 287 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) 407 288 { 408 289 struct panfrost_device *pfdev = data; 409 - u32 status = mmu_read(pfdev, MMU_INT_STAT); 410 - int i; 411 290 412 - if (!status) 291 + if (!mmu_read(pfdev, MMU_INT_STAT)) 413 292 return IRQ_NONE; 414 293 415 - dev_err(pfdev->dev, "mmu irq status=%x\n", status); 294 + mmu_write(pfdev, MMU_INT_MASK, 0); 295 + return IRQ_WAKE_THREAD; 296 + } 297 + 298 + static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) 299 + { 300 + struct panfrost_device *pfdev = data; 301 + u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT); 302 + int i, ret; 416 303 417 304 for (i = 0; status; i++) { 418 305 u32 mask = BIT(i) | BIT(i + 16); ··· 439 314 exception_type = fault_status & 0xFF; 440 315 access_type = (fault_status >> 8) & 0x3; 441 316 source_id = (fault_status >> 16); 317 + 318 + /* Page fault only */ 319 + if ((status & mask) == BIT(i)) { 320 + WARN_ON(exception_type < 0xC1 || exception_type > 0xC4); 321 + 322 + ret = panfrost_mmu_map_fault_addr(pfdev, i, addr); 323 + if (!ret) { 324 + mmu_write(pfdev, MMU_INT_CLEAR, BIT(i)); 325 + status &= ~mask; 326 + continue; 327 + } 328 + } 442 329 443 330 /* terminal fault, print info about the fault */ 444 331 dev_err(pfdev->dev, ··· 474 337 status &= ~mask; 475 338 } 476 339 340 + mmu_write(pfdev, MMU_INT_MASK, ~0); 477 341 return IRQ_HANDLED; 478 342 }; 479 343 ··· 493 355 if (irq <= 0) 494 356 return -ENODEV; 495 357 496 - err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, 497 - IRQF_SHARED, "mmu", pfdev); 358 + err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, 359 + panfrost_mmu_irq_handler_thread, 360 + IRQF_SHARED, "mmu", pfdev); 498 361 499 362 if (err) { 500 363 dev_err(pfdev->dev, "failed to request mmu irq"); 501 364 return err; 502 365 } 503 - mmu_write(pfdev, MMU_INT_CLEAR, ~0); 504 - mmu_write(pfdev, MMU_INT_MASK, ~0); 505 - 506 366 pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) { 507 367 .pgsize_bitmap = SZ_4K | SZ_2M, 508 368 .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
+1 -2
drivers/gpu/drm/panfrost/panfrost_mmu.h
··· 11 11 12 12 int panfrost_mmu_init(struct panfrost_device *pfdev); 13 13 void panfrost_mmu_fini(struct panfrost_device *pfdev); 14 - 15 - void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr); 14 + void panfrost_mmu_reset(struct panfrost_device *pfdev); 16 15 17 16 #endif
+27 -2
drivers/gpu/drm/pl111/pl111_display.c
··· 128 128 struct drm_framebuffer *fb = plane->state->fb; 129 129 struct drm_connector *connector = priv->connector; 130 130 struct drm_bridge *bridge = priv->bridge; 131 + bool grayscale = false; 131 132 u32 cntl; 132 133 u32 ppl, hsw, hfp, hbp; 133 134 u32 lpp, vsw, vfp, vbp; ··· 188 187 if (connector->display_info.bus_flags & 189 188 DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) 190 189 tim2 |= TIM2_IPC; 190 + 191 + if (connector->display_info.num_bus_formats == 1 && 192 + connector->display_info.bus_formats[0] == 193 + MEDIA_BUS_FMT_Y8_1X8) 194 + grayscale = true; 195 + 196 + /* 197 + * The AC pin bias frequency is set to max count when using 198 + * grayscale so at least once in a while we will reverse 199 + * polarity and get rid of any DC built up that could 200 + * damage the display. 201 + */ 202 + if (grayscale) 203 + tim2 |= TIM2_ACB_MASK; 191 204 } 192 205 193 206 if (bridge) { ··· 233 218 234 219 writel(0, priv->regs + CLCD_TIM3); 235 220 236 - /* Hard-code TFT panel */ 237 - cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1); 221 + /* 222 + * Detect grayscale bus format. We do not support a grayscale mode 223 + * toward userspace, instead we expose an RGB24 buffer and then the 224 + * hardware will activate its grayscaler to convert to the grayscale 225 + * format. 226 + */ 227 + if (grayscale) 228 + cntl = CNTL_LCDEN | CNTL_LCDMONO8; 229 + else 230 + /* Else we assume TFT display */ 231 + cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1); 232 + 238 233 /* On the ST Micro variant, assume all 24 bits are connected */ 239 234 if (priv->variant->st_bitmux_control) 240 235 cntl |= CNTL_ST_CDWID_24;
+1 -1
drivers/gpu/drm/qxl/qxl_debugfs.c
··· 57 57 struct qxl_bo *bo; 58 58 59 59 list_for_each_entry(bo, &qdev->gem.objects, list) { 60 - struct reservation_object_list *fobj; 60 + struct dma_resv_list *fobj; 61 61 int rel; 62 62 63 63 rcu_read_lock();
+3 -3
drivers/gpu/drm/qxl/qxl_release.c
··· 238 238 return ret; 239 239 } 240 240 241 - ret = reservation_object_reserve_shared(bo->tbo.base.resv, 1); 241 + ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1); 242 242 if (ret) 243 243 return ret; 244 244 ··· 458 458 list_for_each_entry(entry, &release->bos, head) { 459 459 bo = entry->bo; 460 460 461 - reservation_object_add_shared_fence(bo->base.resv, &release->base); 461 + dma_resv_add_shared_fence(bo->base.resv, &release->base); 462 462 ttm_bo_add_to_lru(bo); 463 - reservation_object_unlock(bo->base.resv); 463 + dma_resv_unlock(bo->base.resv); 464 464 } 465 465 spin_unlock(&glob->lru_lock); 466 466 ww_acquire_fini(&release->ticket);
+1 -1
drivers/gpu/drm/radeon/cik.c
··· 3659 3659 struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, 3660 3660 uint64_t src_offset, uint64_t dst_offset, 3661 3661 unsigned num_gpu_pages, 3662 - struct reservation_object *resv) 3662 + struct dma_resv *resv) 3663 3663 { 3664 3664 struct radeon_fence *fence; 3665 3665 struct radeon_sync sync;
+1 -1
drivers/gpu/drm/radeon/cik_sdma.c
··· 579 579 struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, 580 580 uint64_t src_offset, uint64_t dst_offset, 581 581 unsigned num_gpu_pages, 582 - struct reservation_object *resv) 582 + struct dma_resv *resv) 583 583 { 584 584 struct radeon_fence *fence; 585 585 struct radeon_sync sync;
+1 -1
drivers/gpu/drm/radeon/evergreen_dma.c
··· 108 108 uint64_t src_offset, 109 109 uint64_t dst_offset, 110 110 unsigned num_gpu_pages, 111 - struct reservation_object *resv) 111 + struct dma_resv *resv) 112 112 { 113 113 struct radeon_fence *fence; 114 114 struct radeon_sync sync;
+1 -1
drivers/gpu/drm/radeon/r100.c
··· 891 891 uint64_t src_offset, 892 892 uint64_t dst_offset, 893 893 unsigned num_gpu_pages, 894 - struct reservation_object *resv) 894 + struct dma_resv *resv) 895 895 { 896 896 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 897 897 struct radeon_fence *fence;
+1 -1
drivers/gpu/drm/radeon/r200.c
··· 84 84 uint64_t src_offset, 85 85 uint64_t dst_offset, 86 86 unsigned num_gpu_pages, 87 - struct reservation_object *resv) 87 + struct dma_resv *resv) 88 88 { 89 89 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 90 90 struct radeon_fence *fence;
+1 -1
drivers/gpu/drm/radeon/r600.c
··· 2963 2963 struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, 2964 2964 uint64_t src_offset, uint64_t dst_offset, 2965 2965 unsigned num_gpu_pages, 2966 - struct reservation_object *resv) 2966 + struct dma_resv *resv) 2967 2967 { 2968 2968 struct radeon_fence *fence; 2969 2969 struct radeon_sync sync;
+1 -1
drivers/gpu/drm/radeon/r600_dma.c
··· 444 444 struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, 445 445 uint64_t src_offset, uint64_t dst_offset, 446 446 unsigned num_gpu_pages, 447 - struct reservation_object *resv) 447 + struct dma_resv *resv) 448 448 { 449 449 struct radeon_fence *fence; 450 450 struct radeon_sync sync;
+4 -4
drivers/gpu/drm/radeon/radeon.h
··· 619 619 struct radeon_fence *fence); 620 620 int radeon_sync_resv(struct radeon_device *rdev, 621 621 struct radeon_sync *sync, 622 - struct reservation_object *resv, 622 + struct dma_resv *resv, 623 623 bool shared); 624 624 int radeon_sync_rings(struct radeon_device *rdev, 625 625 struct radeon_sync *sync, ··· 1912 1912 uint64_t src_offset, 1913 1913 uint64_t dst_offset, 1914 1914 unsigned num_gpu_pages, 1915 - struct reservation_object *resv); 1915 + struct dma_resv *resv); 1916 1916 u32 blit_ring_index; 1917 1917 struct radeon_fence *(*dma)(struct radeon_device *rdev, 1918 1918 uint64_t src_offset, 1919 1919 uint64_t dst_offset, 1920 1920 unsigned num_gpu_pages, 1921 - struct reservation_object *resv); 1921 + struct dma_resv *resv); 1922 1922 u32 dma_ring_index; 1923 1923 /* method used for bo copy */ 1924 1924 struct radeon_fence *(*copy)(struct radeon_device *rdev, 1925 1925 uint64_t src_offset, 1926 1926 uint64_t dst_offset, 1927 1927 unsigned num_gpu_pages, 1928 - struct reservation_object *resv); 1928 + struct dma_resv *resv); 1929 1929 /* ring used for bo copies */ 1930 1930 u32 copy_ring_index; 1931 1931 } copy;
+9 -9
drivers/gpu/drm/radeon/radeon_asic.h
··· 86 86 uint64_t src_offset, 87 87 uint64_t dst_offset, 88 88 unsigned num_gpu_pages, 89 - struct reservation_object *resv); 89 + struct dma_resv *resv); 90 90 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 91 91 uint32_t tiling_flags, uint32_t pitch, 92 92 uint32_t offset, uint32_t obj_size); ··· 157 157 uint64_t src_offset, 158 158 uint64_t dst_offset, 159 159 unsigned num_gpu_pages, 160 - struct reservation_object *resv); 160 + struct dma_resv *resv); 161 161 void r200_set_safe_registers(struct radeon_device *rdev); 162 162 163 163 /* ··· 347 347 struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, 348 348 uint64_t src_offset, uint64_t dst_offset, 349 349 unsigned num_gpu_pages, 350 - struct reservation_object *resv); 350 + struct dma_resv *resv); 351 351 struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, 352 352 uint64_t src_offset, uint64_t dst_offset, 353 353 unsigned num_gpu_pages, 354 - struct reservation_object *resv); 354 + struct dma_resv *resv); 355 355 void r600_hpd_init(struct radeon_device *rdev); 356 356 void r600_hpd_fini(struct radeon_device *rdev); 357 357 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); ··· 473 473 struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, 474 474 uint64_t src_offset, uint64_t dst_offset, 475 475 unsigned num_gpu_pages, 476 - struct reservation_object *resv); 476 + struct dma_resv *resv); 477 477 u32 rv770_get_xclk(struct radeon_device *rdev); 478 478 int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 479 479 int rv770_get_temp(struct radeon_device *rdev); ··· 547 547 struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, 548 548 uint64_t src_offset, uint64_t dst_offset, 549 549 unsigned num_gpu_pages, 550 - struct reservation_object *resv); 550 + struct dma_resv *resv); 551 551 int evergreen_get_temp(struct radeon_device *rdev); 552 552 int evergreen_get_allowed_info_register(struct radeon_device *rdev, 553 553 u32 reg, u32 *val); ··· 725 725 struct radeon_fence *si_copy_dma(struct radeon_device *rdev, 726 726 uint64_t src_offset, uint64_t dst_offset, 727 727 unsigned num_gpu_pages, 728 - struct reservation_object *resv); 728 + struct dma_resv *resv); 729 729 730 730 void si_dma_vm_copy_pages(struct radeon_device *rdev, 731 731 struct radeon_ib *ib, ··· 796 796 struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, 797 797 uint64_t src_offset, uint64_t dst_offset, 798 798 unsigned num_gpu_pages, 799 - struct reservation_object *resv); 799 + struct dma_resv *resv); 800 800 struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, 801 801 uint64_t src_offset, uint64_t dst_offset, 802 802 unsigned num_gpu_pages, 803 - struct reservation_object *resv); 803 + struct dma_resv *resv); 804 804 int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); 805 805 int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 806 806 bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+1 -1
drivers/gpu/drm/radeon/radeon_benchmark.c
··· 35 35 static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, 36 36 uint64_t saddr, uint64_t daddr, 37 37 int flag, int n, 38 - struct reservation_object *resv) 38 + struct dma_resv *resv) 39 39 { 40 40 unsigned long start_jiffies; 41 41 unsigned long end_jiffies;
+1 -1
drivers/gpu/drm/radeon/radeon_cs.c
··· 255 255 int r; 256 256 257 257 list_for_each_entry(reloc, &p->validated, tv.head) { 258 - struct reservation_object *resv; 258 + struct dma_resv *resv; 259 259 260 260 resv = reloc->robj->tbo.base.resv; 261 261 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
+1 -1
drivers/gpu/drm/radeon/radeon_display.c
··· 533 533 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 534 534 goto cleanup; 535 535 } 536 - work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.base.resv)); 536 + work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv)); 537 537 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); 538 538 radeon_bo_unreserve(new_rbo); 539 539
+3 -3
drivers/gpu/drm/radeon/radeon_gem.c
··· 114 114 } 115 115 if (domain == RADEON_GEM_DOMAIN_CPU) { 116 116 /* Asking for cpu access wait for object idle */ 117 - r = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 117 + r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 118 118 if (!r) 119 119 r = -EBUSY; 120 120 ··· 449 449 } 450 450 robj = gem_to_radeon_bo(gobj); 451 451 452 - r = reservation_object_test_signaled_rcu(robj->tbo.base.resv, true); 452 + r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); 453 453 if (r == 0) 454 454 r = -EBUSY; 455 455 else ··· 478 478 } 479 479 robj = gem_to_radeon_bo(gobj); 480 480 481 - ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 481 + ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 482 482 if (ret == 0) 483 483 r = -EBUSY; 484 484 else if (ret < 0)
+1 -1
drivers/gpu/drm/radeon/radeon_mn.c
··· 163 163 continue; 164 164 } 165 165 166 - r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, 166 + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, 167 167 true, false, MAX_SCHEDULE_TIMEOUT); 168 168 if (r <= 0) 169 169 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+7 -7
drivers/gpu/drm/radeon/radeon_object.c
··· 183 183 int radeon_bo_create(struct radeon_device *rdev, 184 184 unsigned long size, int byte_align, bool kernel, 185 185 u32 domain, u32 flags, struct sg_table *sg, 186 - struct reservation_object *resv, 186 + struct dma_resv *resv, 187 187 struct radeon_bo **bo_ptr) 188 188 { 189 189 struct radeon_bo *bo; ··· 610 610 int steal; 611 611 int i; 612 612 613 - reservation_object_assert_held(bo->tbo.base.resv); 613 + dma_resv_assert_held(bo->tbo.base.resv); 614 614 615 615 if (!bo->tiling_flags) 616 616 return 0; ··· 736 736 uint32_t *tiling_flags, 737 737 uint32_t *pitch) 738 738 { 739 - reservation_object_assert_held(bo->tbo.base.resv); 739 + dma_resv_assert_held(bo->tbo.base.resv); 740 740 741 741 if (tiling_flags) 742 742 *tiling_flags = bo->tiling_flags; ··· 748 748 bool force_drop) 749 749 { 750 750 if (!force_drop) 751 - reservation_object_assert_held(bo->tbo.base.resv); 751 + dma_resv_assert_held(bo->tbo.base.resv); 752 752 753 753 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 754 754 return 0; ··· 870 870 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, 871 871 bool shared) 872 872 { 873 - struct reservation_object *resv = bo->tbo.base.resv; 873 + struct dma_resv *resv = bo->tbo.base.resv; 874 874 875 875 if (shared) 876 - reservation_object_add_shared_fence(resv, &fence->base); 876 + dma_resv_add_shared_fence(resv, &fence->base); 877 877 else 878 - reservation_object_add_excl_fence(resv, &fence->base); 878 + dma_resv_add_excl_fence(resv, &fence->base); 879 879 }
+1 -1
drivers/gpu/drm/radeon/radeon_object.h
··· 126 126 unsigned long size, int byte_align, 127 127 bool kernel, u32 domain, u32 flags, 128 128 struct sg_table *sg, 129 - struct reservation_object *resv, 129 + struct dma_resv *resv, 130 130 struct radeon_bo **bo_ptr); 131 131 extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); 132 132 extern void radeon_bo_kunmap(struct radeon_bo *bo);
+3 -3
drivers/gpu/drm/radeon/radeon_prime.c
··· 63 63 struct dma_buf_attachment *attach, 64 64 struct sg_table *sg) 65 65 { 66 - struct reservation_object *resv = attach->dmabuf->resv; 66 + struct dma_resv *resv = attach->dmabuf->resv; 67 67 struct radeon_device *rdev = dev->dev_private; 68 68 struct radeon_bo *bo; 69 69 int ret; 70 70 71 - reservation_object_lock(resv, NULL); 71 + dma_resv_lock(resv, NULL); 72 72 ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false, 73 73 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); 74 - reservation_object_unlock(resv); 74 + dma_resv_unlock(resv); 75 75 if (ret) 76 76 return ERR_PTR(ret); 77 77
+5 -5
drivers/gpu/drm/radeon/radeon_sync.c
··· 87 87 */ 88 88 int radeon_sync_resv(struct radeon_device *rdev, 89 89 struct radeon_sync *sync, 90 - struct reservation_object *resv, 90 + struct dma_resv *resv, 91 91 bool shared) 92 92 { 93 - struct reservation_object_list *flist; 93 + struct dma_resv_list *flist; 94 94 struct dma_fence *f; 95 95 struct radeon_fence *fence; 96 96 unsigned i; 97 97 int r = 0; 98 98 99 99 /* always sync to the exclusive fence */ 100 - f = reservation_object_get_excl(resv); 100 + f = dma_resv_get_excl(resv); 101 101 fence = f ? to_radeon_fence(f) : NULL; 102 102 if (fence && fence->rdev == rdev) 103 103 radeon_sync_fence(sync, fence); 104 104 else if (f) 105 105 r = dma_fence_wait(f, true); 106 106 107 - flist = reservation_object_get_list(resv); 107 + flist = dma_resv_get_list(resv); 108 108 if (shared || !flist || r) 109 109 return r; 110 110 111 111 for (i = 0; i < flist->shared_count; ++i) { 112 112 f = rcu_dereference_protected(flist->shared[i], 113 - reservation_object_held(resv)); 113 + dma_resv_held(resv)); 114 114 fence = to_radeon_fence(f); 115 115 if (fence && fence->rdev == rdev) 116 116 radeon_sync_fence(sync, fence);
+1 -1
drivers/gpu/drm/radeon/radeon_uvd.c
··· 477 477 return -EINVAL; 478 478 } 479 479 480 - f = reservation_object_get_excl(bo->tbo.base.resv); 480 + f = dma_resv_get_excl(bo->tbo.base.resv); 481 481 if (f) { 482 482 r = radeon_fence_wait((struct radeon_fence *)f, false); 483 483 if (r) {
+1 -1
drivers/gpu/drm/radeon/radeon_vm.c
··· 831 831 int r; 832 832 833 833 radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true); 834 - r = reservation_object_reserve_shared(pt->tbo.base.resv, 1); 834 + r = dma_resv_reserve_shared(pt->tbo.base.resv, 1); 835 835 if (r) 836 836 return r; 837 837
+1 -1
drivers/gpu/drm/radeon/rv770_dma.c
··· 42 42 struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, 43 43 uint64_t src_offset, uint64_t dst_offset, 44 44 unsigned num_gpu_pages, 45 - struct reservation_object *resv) 45 + struct dma_resv *resv) 46 46 { 47 47 struct radeon_fence *fence; 48 48 struct radeon_sync sync;
+1 -1
drivers/gpu/drm/radeon/si_dma.c
··· 231 231 struct radeon_fence *si_copy_dma(struct radeon_device *rdev, 232 232 uint64_t src_offset, uint64_t dst_offset, 233 233 unsigned num_gpu_pages, 234 - struct reservation_object *resv) 234 + struct dma_resv *resv) 235 235 { 236 236 struct radeon_fence *fence; 237 237 struct radeon_sync sync;
+50 -4
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
··· 97 97 return crtcs; 98 98 } 99 99 100 + static int sun8i_dw_hdmi_find_connector_pdev(struct device *dev, 101 + struct platform_device **pdev_out) 102 + { 103 + struct platform_device *pdev; 104 + struct device_node *remote; 105 + 106 + remote = of_graph_get_remote_node(dev->of_node, 1, -1); 107 + if (!remote) 108 + return -ENODEV; 109 + 110 + if (!of_device_is_compatible(remote, "hdmi-connector")) { 111 + of_node_put(remote); 112 + return -ENODEV; 113 + } 114 + 115 + pdev = of_find_device_by_node(remote); 116 + of_node_put(remote); 117 + if (!pdev) 118 + return -ENODEV; 119 + 120 + *pdev_out = pdev; 121 + return 0; 122 + } 123 + 100 124 static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, 101 125 void *data) 102 126 { 103 - struct platform_device *pdev = to_platform_device(dev); 127 + struct platform_device *pdev = to_platform_device(dev), *connector_pdev; 104 128 struct dw_hdmi_plat_data *plat_data; 105 129 struct drm_device *drm = data; 106 130 struct device_node *phy_node; ··· 174 150 return PTR_ERR(hdmi->regulator); 175 151 } 176 152 153 + ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev); 154 + if (!ret) { 155 + hdmi->ddc_en = gpiod_get_optional(&connector_pdev->dev, 156 + "ddc-en", GPIOD_OUT_HIGH); 157 + platform_device_put(connector_pdev); 158 + 159 + if (IS_ERR(hdmi->ddc_en)) { 160 + dev_err(dev, "Couldn't get ddc-en gpio\n"); 161 + return PTR_ERR(hdmi->ddc_en); 162 + } 163 + } 164 + 177 165 ret = regulator_enable(hdmi->regulator); 178 166 if (ret) { 179 167 dev_err(dev, "Failed to enable regulator\n"); 180 - return ret; 168 + goto err_unref_ddc_en; 181 169 } 170 + 171 + gpiod_set_value(hdmi->ddc_en, 1); 182 172 183 173 ret = reset_control_deassert(hdmi->rst_ctrl); 184 174 if (ret) { 185 175 dev_err(dev, "Could not deassert ctrl reset control\n"); 186 - goto err_disable_regulator; 176 + goto err_disable_ddc_en; 187 177 } 188 178 189 179 ret = clk_prepare_enable(hdmi->clk_tmds); ··· 250 212 clk_disable_unprepare(hdmi->clk_tmds); 251 213 err_assert_ctrl_reset: 252 214 reset_control_assert(hdmi->rst_ctrl); 253 - err_disable_regulator: 215 + err_disable_ddc_en: 216 + gpiod_set_value(hdmi->ddc_en, 0); 254 217 regulator_disable(hdmi->regulator); 218 + err_unref_ddc_en: 219 + if (hdmi->ddc_en) 220 + gpiod_put(hdmi->ddc_en); 255 221 256 222 return ret; 257 223 } ··· 269 227 sun8i_hdmi_phy_remove(hdmi); 270 228 clk_disable_unprepare(hdmi->clk_tmds); 271 229 reset_control_assert(hdmi->rst_ctrl); 230 + gpiod_set_value(hdmi->ddc_en, 0); 272 231 regulator_disable(hdmi->regulator); 232 + 233 + if (hdmi->ddc_en) 234 + gpiod_put(hdmi->ddc_en); 273 235 } 274 236 275 237 static const struct component_ops sun8i_dw_hdmi_ops = {
+2
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
··· 9 9 #include <drm/bridge/dw_hdmi.h> 10 10 #include <drm/drm_encoder.h> 11 11 #include <linux/clk.h> 12 + #include <linux/gpio/consumer.h> 12 13 #include <linux/regmap.h> 13 14 #include <linux/regulator/consumer.h> 14 15 #include <linux/reset.h> ··· 191 190 struct regulator *regulator; 192 191 const struct sun8i_dw_hdmi_quirks *quirks; 193 192 struct reset_control *rst_ctrl; 193 + struct gpio_desc *ddc_en; 194 194 }; 195 195 196 196 static inline struct sun8i_dw_hdmi *
+9 -4
drivers/gpu/drm/tegra/dc.c
··· 6 6 7 7 #include <linux/clk.h> 8 8 #include <linux/debugfs.h> 9 + #include <linux/delay.h> 9 10 #include <linux/iommu.h> 11 + #include <linux/module.h> 10 12 #include <linux/of_device.h> 11 13 #include <linux/pm_runtime.h> 12 14 #include <linux/reset.h> 13 15 14 16 #include <soc/tegra/pmc.h> 15 17 18 + #include <drm/drm_atomic.h> 19 + #include <drm/drm_atomic_helper.h> 20 + #include <drm/drm_debugfs.h> 21 + #include <drm/drm_fourcc.h> 22 + #include <drm/drm_plane_helper.h> 23 + #include <drm/drm_vblank.h> 24 + 16 25 #include "dc.h" 17 26 #include "drm.h" 18 27 #include "gem.h" 19 28 #include "hub.h" 20 29 #include "plane.h" 21 - 22 - #include <drm/drm_atomic.h> 23 - #include <drm/drm_atomic_helper.h> 24 - #include <drm/drm_plane_helper.h> 25 30 26 31 static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc, 27 32 struct drm_crtc_state *state);
+3 -2
drivers/gpu/drm/tegra/dpaux.c
··· 8 8 #include <linux/gpio.h> 9 9 #include <linux/interrupt.h> 10 10 #include <linux/io.h> 11 + #include <linux/module.h> 11 12 #include <linux/of_gpio.h> 12 13 #include <linux/pinctrl/pinconf-generic.h> 13 14 #include <linux/pinctrl/pinctrl.h> 14 15 #include <linux/pinctrl/pinmux.h> 15 - #include <linux/pm_runtime.h> 16 16 #include <linux/platform_device.h> 17 - #include <linux/reset.h> 17 + #include <linux/pm_runtime.h> 18 18 #include <linux/regulator/consumer.h> 19 + #include <linux/reset.h> 19 20 #include <linux/workqueue.h> 20 21 21 22 #include <drm/drm_dp_helper.h>
+8
drivers/gpu/drm/tegra/drm.c
··· 8 8 #include <linux/host1x.h> 9 9 #include <linux/idr.h> 10 10 #include <linux/iommu.h> 11 + #include <linux/module.h> 12 + #include <linux/platform_device.h> 11 13 12 14 #include <drm/drm_atomic.h> 13 15 #include <drm/drm_atomic_helper.h> 16 + #include <drm/drm_debugfs.h> 17 + #include <drm/drm_drv.h> 18 + #include <drm/drm_fourcc.h> 19 + #include <drm/drm_ioctl.h> 20 + #include <drm/drm_prime.h> 21 + #include <drm/drm_vblank.h> 14 22 15 23 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 16 24 #include <asm/dma-iommu.h>
+1 -2
drivers/gpu/drm/tegra/drm.h
··· 7 7 #ifndef HOST1X_DRM_H 8 8 #define HOST1X_DRM_H 1 9 9 10 - #include <uapi/drm/tegra_drm.h> 11 10 #include <linux/host1x.h> 12 11 #include <linux/iova.h> 13 12 #include <linux/of_gpio.h> 14 13 15 - #include <drm/drmP.h> 16 14 #include <drm/drm_atomic.h> 17 15 #include <drm/drm_edid.h> 18 16 #include <drm/drm_encoder.h> 19 17 #include <drm/drm_fb_helper.h> 20 18 #include <drm/drm_fixed.h> 21 19 #include <drm/drm_probe_helper.h> 20 + #include <uapi/drm/tegra_drm.h> 22 21 23 22 #include "gem.h" 24 23 #include "hub.h"
+5 -3
drivers/gpu/drm/tegra/dsi.c
··· 5 5 6 6 #include <linux/clk.h> 7 7 #include <linux/debugfs.h> 8 + #include <linux/delay.h> 8 9 #include <linux/host1x.h> 9 10 #include <linux/module.h> 10 11 #include <linux/of.h> 11 12 #include <linux/of_platform.h> 12 13 #include <linux/platform_device.h> 13 14 #include <linux/pm_runtime.h> 15 + #include <linux/regulator/consumer.h> 14 16 #include <linux/reset.h> 15 17 16 - #include <linux/regulator/consumer.h> 18 + #include <video/mipi_display.h> 17 19 18 20 #include <drm/drm_atomic_helper.h> 21 + #include <drm/drm_debugfs.h> 22 + #include <drm/drm_file.h> 19 23 #include <drm/drm_mipi_dsi.h> 20 24 #include <drm/drm_panel.h> 21 - 22 - #include <video/mipi_display.h> 23 25 24 26 #include "dc.h" 25 27 #include "drm.h"
+4 -2
drivers/gpu/drm/tegra/fb.c
··· 9 9 10 10 #include <linux/console.h> 11 11 12 - #include "drm.h" 13 - #include "gem.h" 12 + #include <drm/drm_fourcc.h> 14 13 #include <drm/drm_gem_framebuffer_helper.h> 15 14 #include <drm/drm_modeset_helper.h> 15 + 16 + #include "drm.h" 17 + #include "gem.h" 16 18 17 19 #ifdef CONFIG_DRM_FBDEV_EMULATION 18 20 static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
+3
drivers/gpu/drm/tegra/gem.c
··· 12 12 13 13 #include <linux/dma-buf.h> 14 14 #include <linux/iommu.h> 15 + 16 + #include <drm/drm_drv.h> 17 + #include <drm/drm_prime.h> 15 18 #include <drm/tegra_drm.h> 16 19 17 20 #include "drm.h"
-1
drivers/gpu/drm/tegra/gem.h
··· 11 11 #include <linux/host1x.h> 12 12 13 13 #include <drm/drm.h> 14 - #include <drm/drmP.h> 15 14 #include <drm/drm_gem.h> 16 15 17 16 #define TEGRA_BO_BOTTOM_UP (1 << 0)
+1
drivers/gpu/drm/tegra/gr2d.c
··· 5 5 6 6 #include <linux/clk.h> 7 7 #include <linux/iommu.h> 8 + #include <linux/module.h> 8 9 #include <linux/of_device.h> 9 10 10 11 #include "drm.h"
+5
drivers/gpu/drm/tegra/hdmi.c
··· 6 6 7 7 #include <linux/clk.h> 8 8 #include <linux/debugfs.h> 9 + #include <linux/delay.h> 9 10 #include <linux/gpio.h> 10 11 #include <linux/hdmi.h> 11 12 #include <linux/math64.h> 13 + #include <linux/module.h> 12 14 #include <linux/of_device.h> 13 15 #include <linux/pm_runtime.h> 14 16 #include <linux/regulator/consumer.h> ··· 18 16 19 17 #include <drm/drm_atomic_helper.h> 20 18 #include <drm/drm_crtc.h> 19 + #include <drm/drm_debugfs.h> 20 + #include <drm/drm_file.h> 21 + #include <drm/drm_fourcc.h> 21 22 #include <drm/drm_probe_helper.h> 22 23 23 24 #include "hda.h"
+2 -1
drivers/gpu/drm/tegra/hub.c
··· 4 4 */ 5 5 6 6 #include <linux/clk.h> 7 + #include <linux/delay.h> 7 8 #include <linux/host1x.h> 8 9 #include <linux/module.h> 9 10 #include <linux/of.h> ··· 14 13 #include <linux/pm_runtime.h> 15 14 #include <linux/reset.h> 16 15 17 - #include <drm/drmP.h> 18 16 #include <drm/drm_atomic.h> 19 17 #include <drm/drm_atomic_helper.h> 18 + #include <drm/drm_fourcc.h> 20 19 #include <drm/drm_probe_helper.h> 21 20 22 21 #include "drm.h"
-1
drivers/gpu/drm/tegra/hub.h
··· 6 6 #ifndef TEGRA_HUB_H 7 7 #define TEGRA_HUB_H 1 8 8 9 - #include <drm/drmP.h> 10 9 #include <drm/drm_plane.h> 11 10 12 11 #include "plane.h"
+1
drivers/gpu/drm/tegra/plane.c
··· 5 5 6 6 #include <drm/drm_atomic.h> 7 7 #include <drm/drm_atomic_helper.h> 8 + #include <drm/drm_fourcc.h> 8 9 #include <drm/drm_plane_helper.h> 9 10 10 11 #include "dc.h"
+3
drivers/gpu/drm/tegra/sor.c
··· 8 8 #include <linux/debugfs.h> 9 9 #include <linux/gpio.h> 10 10 #include <linux/io.h> 11 + #include <linux/module.h> 11 12 #include <linux/of_device.h> 12 13 #include <linux/platform_device.h> 13 14 #include <linux/pm_runtime.h> ··· 18 17 #include <soc/tegra/pmc.h> 19 18 20 19 #include <drm/drm_atomic_helper.h> 20 + #include <drm/drm_debugfs.h> 21 21 #include <drm/drm_dp_helper.h> 22 + #include <drm/drm_file.h> 22 23 #include <drm/drm_panel.h> 23 24 #include <drm/drm_scdc_helper.h> 24 25
+1
drivers/gpu/drm/tegra/vic.c
··· 4 4 */ 5 5 6 6 #include <linux/clk.h> 7 + #include <linux/delay.h> 7 8 #include <linux/host1x.h> 8 9 #include <linux/iommu.h> 9 10 #include <linux/module.h>
+17 -27
drivers/gpu/drm/tiny/gm12u320.c
··· 33 33 #define DRIVER_DATE "2019" 34 34 #define DRIVER_MAJOR 1 35 35 #define DRIVER_MINOR 0 36 - #define DRIVER_PATCHLEVEL 1 37 36 38 37 /* 39 38 * The DLP has an actual width of 854 pixels, but that is not a multiple ··· 43 44 #define GM12U320_HEIGHT 480 44 45 45 46 #define GM12U320_BLOCK_COUNT 20 47 + 48 + #define GM12U320_ERR(fmt, ...) \ 49 + DRM_DEV_ERROR(&gm12u320->udev->dev, fmt, ##__VA_ARGS__) 46 50 47 51 #define MISC_RCV_EPT 1 48 52 #define DATA_RCV_EPT 2 ··· 222 220 usb_sndbulkpipe(gm12u320->udev, MISC_SND_EPT), 223 221 gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); 224 222 if (ret || len != CMD_SIZE) { 225 - dev_err(&gm12u320->udev->dev, "Misc. req. error %d\n", ret); 223 + GM12U320_ERR("Misc. req. error %d\n", ret); 226 224 return -EIO; 227 225 } 228 226 ··· 232 230 gm12u320->cmd_buf, MISC_VALUE_SIZE, &len, 233 231 DATA_TIMEOUT); 234 232 if (ret || len != MISC_VALUE_SIZE) { 235 - dev_err(&gm12u320->udev->dev, "Misc. value error %d\n", ret); 233 + GM12U320_ERR("Misc. value error %d\n", ret); 236 234 return -EIO; 237 235 } 238 236 /* cmd_buf[0] now contains the read value, which we don't use */ ··· 243 241 gm12u320->cmd_buf, READ_STATUS_SIZE, &len, 244 242 CMD_TIMEOUT); 245 243 if (ret || len != READ_STATUS_SIZE) { 246 - dev_err(&gm12u320->udev->dev, "Misc. status error %d\n", ret); 244 + GM12U320_ERR("Misc. status error %d\n", ret); 247 245 return -EIO; 248 246 } 249 247 ··· 280 278 281 279 vaddr = drm_gem_shmem_vmap(fb->obj[0]); 282 280 if (IS_ERR(vaddr)) { 283 - DRM_ERROR("failed to vmap fb: %ld\n", PTR_ERR(vaddr)); 281 + GM12U320_ERR("failed to vmap fb: %ld\n", PTR_ERR(vaddr)); 284 282 goto put_fb; 285 283 } 286 284 ··· 288 286 ret = dma_buf_begin_cpu_access( 289 287 fb->obj[0]->import_attach->dmabuf, DMA_FROM_DEVICE); 290 288 if (ret) { 291 - DRM_ERROR("dma_buf_begin_cpu_access err: %d\n", ret); 289 + GM12U320_ERR("dma_buf_begin_cpu_access err: %d\n", ret); 292 290 goto vunmap; 293 291 } 294 292 } ··· 331 329 ret = dma_buf_end_cpu_access(fb->obj[0]->import_attach->dmabuf, 332 330 DMA_FROM_DEVICE); 333 331 if (ret) 334 - DRM_ERROR("dma_buf_end_cpu_access err: %d\n", ret); 332 + GM12U320_ERR("dma_buf_end_cpu_access err: %d\n", ret); 335 333 } 336 334 vunmap: 337 335 drm_gem_shmem_vunmap(fb->obj[0], vaddr); ··· 340 338 gm12u320->fb_update.fb = NULL; 341 339 unlock: 342 340 mutex_unlock(&gm12u320->fb_update.lock); 343 - } 344 - 345 - static int gm12u320_fb_update_ready(struct gm12u320_device *gm12u320) 346 - { 347 - int ret; 348 - 349 - mutex_lock(&gm12u320->fb_update.lock); 350 - ret = !gm12u320->fb_update.run || gm12u320->fb_update.fb != NULL; 351 - mutex_unlock(&gm12u320->fb_update.lock); 352 - 353 - return ret; 354 341 } 355 342 356 343 static void gm12u320_fb_update_work(struct work_struct *work) ··· 415 424 * switches back to showing its logo. 416 425 */ 417 426 wait_event_timeout(gm12u320->fb_update.waitq, 418 - gm12u320_fb_update_ready(gm12u320), 427 + !gm12u320->fb_update.run || 428 + gm12u320->fb_update.fb != NULL, 419 429 IDLE_TIMEOUT); 420 430 } 421 431 return; 422 432 err: 423 433 /* Do not log errors caused by module unload or device unplug */ 424 - if (ret != -ECONNRESET && ret != -ESHUTDOWN) 425 - dev_err(&gm12u320->udev->dev, "Frame update error: %d\n", ret); 434 + if (ret != -ENODEV && ret != -ECONNRESET && ret != -ESHUTDOWN) 435 + GM12U320_ERR("Frame update error: %d\n", ret); 426 436 } 427 437 428 438 static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb, ··· 738 746 if (ret) 739 747 goto err_put; 740 748 741 - drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); 749 + drm_fbdev_generic_setup(dev, 0); 742 750 743 751 return 0; 744 752 ··· 757 765 drm_dev_put(dev); 758 766 } 759 767 760 - #ifdef CONFIG_PM 761 - static int gm12u320_suspend(struct usb_interface *interface, 762 - pm_message_t message) 768 + static __maybe_unused int gm12u320_suspend(struct usb_interface *interface, 769 + pm_message_t message) 763 770 { 764 771 struct drm_device *dev = usb_get_intfdata(interface); 765 772 struct gm12u320_device *gm12u320 = dev->dev_private; ··· 769 778 return 0; 770 779 } 771 780 772 - static int gm12u320_resume(struct usb_interface *interface) 781 + static __maybe_unused int gm12u320_resume(struct usb_interface *interface) 773 782 { 774 783 struct drm_device *dev = usb_get_intfdata(interface); 775 784 struct gm12u320_device *gm12u320 = dev->dev_private; ··· 780 789 781 790 return 0; 782 791 } 783 - #endif 784 792 785 793 static const struct usb_device_id id_table[] = { 786 794 { USB_DEVICE(0x1de1, 0xc102) },
+59 -59
drivers/gpu/drm/ttm/ttm_bo.c
··· 41 41 #include <linux/file.h> 42 42 #include <linux/module.h> 43 43 #include <linux/atomic.h> 44 - #include <linux/reservation.h> 44 + #include <linux/dma-resv.h> 45 45 46 46 static void ttm_bo_global_kobj_release(struct kobject *kobj); 47 47 ··· 161 161 atomic_dec(&bo->bdev->glob->bo_count); 162 162 dma_fence_put(bo->moving); 163 163 if (!ttm_bo_uses_embedded_gem_object(bo)) 164 - reservation_object_fini(&bo->base._resv); 164 + dma_resv_fini(&bo->base._resv); 165 165 mutex_destroy(&bo->wu_mutex); 166 166 bo->destroy(bo); 167 167 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); ··· 173 173 struct ttm_bo_device *bdev = bo->bdev; 174 174 struct ttm_mem_type_manager *man; 175 175 176 - reservation_object_assert_held(bo->base.resv); 176 + dma_resv_assert_held(bo->base.resv); 177 177 178 178 if (!list_empty(&bo->lru)) 179 179 return; ··· 244 244 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, 245 245 struct ttm_lru_bulk_move *bulk) 246 246 { 247 - reservation_object_assert_held(bo->base.resv); 247 + dma_resv_assert_held(bo->base.resv); 248 248 249 249 ttm_bo_del_from_lru(bo); 250 250 ttm_bo_add_to_lru(bo); ··· 277 277 if (!pos->first) 278 278 continue; 279 279 280 - reservation_object_assert_held(pos->first->base.resv); 281 - reservation_object_assert_held(pos->last->base.resv); 280 + dma_resv_assert_held(pos->first->base.resv); 281 + dma_resv_assert_held(pos->last->base.resv); 282 282 283 283 man = &pos->first->bdev->man[TTM_PL_TT]; 284 284 list_bulk_move_tail(&man->lru[i], &pos->first->lru, ··· 292 292 if (!pos->first) 293 293 continue; 294 294 295 - reservation_object_assert_held(pos->first->base.resv); 296 - reservation_object_assert_held(pos->last->base.resv); 295 + dma_resv_assert_held(pos->first->base.resv); 296 + dma_resv_assert_held(pos->last->base.resv); 297 297 298 298 man = &pos->first->bdev->man[TTM_PL_VRAM]; 299 299 list_bulk_move_tail(&man->lru[i], &pos->first->lru, ··· 307 307 if (!pos->first) 308 308 continue; 309 309 310 - reservation_object_assert_held(pos->first->base.resv); 311 - reservation_object_assert_held(pos->last->base.resv); 310 + dma_resv_assert_held(pos->first->base.resv); 311 + dma_resv_assert_held(pos->last->base.resv); 312 312 313 313 lru = &pos->first->bdev->glob->swap_lru[i]; 314 314 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); ··· 442 442 if (bo->base.resv == &bo->base._resv) 443 443 return 0; 444 444 445 - BUG_ON(!reservation_object_trylock(&bo->base._resv)); 445 + BUG_ON(!dma_resv_trylock(&bo->base._resv)); 446 446 447 - r = reservation_object_copy_fences(&bo->base._resv, bo->base.resv); 447 + r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv); 448 448 if (r) 449 - reservation_object_unlock(&bo->base._resv); 449 + dma_resv_unlock(&bo->base._resv); 450 450 451 451 return r; 452 452 } 453 453 454 454 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 455 455 { 456 - struct reservation_object_list *fobj; 456 + struct dma_resv_list *fobj; 457 457 struct dma_fence *fence; 458 458 int i; 459 459 460 - fobj = reservation_object_get_list(&bo->base._resv); 461 - fence = reservation_object_get_excl(&bo->base._resv); 460 + fobj = dma_resv_get_list(&bo->base._resv); 461 + fence = dma_resv_get_excl(&bo->base._resv); 462 462 if (fence && !fence->ops->signaled) 463 463 dma_fence_enable_sw_signaling(fence); 464 464 465 465 for (i = 0; fobj && i < fobj->shared_count; ++i) { 466 466 fence = rcu_dereference_protected(fobj->shared[i], 467 - reservation_object_held(bo->base.resv)); 467 + dma_resv_held(bo->base.resv)); 468 468 469 469 if (!fence->ops->signaled) 470 470 dma_fence_enable_sw_signaling(fence); ··· 482 482 /* Last resort, if we fail to allocate memory for the 483 483 * fences block for the BO to become idle 484 484 */ 485 - reservation_object_wait_timeout_rcu(bo->base.resv, true, false, 485 + dma_resv_wait_timeout_rcu(bo->base.resv, true, false, 486 486 30 * HZ); 487 487 spin_lock(&glob->lru_lock); 488 488 goto error; 489 489 } 490 490 491 491 spin_lock(&glob->lru_lock); 492 - ret = reservation_object_trylock(bo->base.resv) ? 0 : -EBUSY; 492 + ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY; 493 493 if (!ret) { 494 - if (reservation_object_test_signaled_rcu(&bo->base._resv, true)) { 494 + if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) { 495 495 ttm_bo_del_from_lru(bo); 496 496 spin_unlock(&glob->lru_lock); 497 497 if (bo->base.resv != &bo->base._resv) 498 - reservation_object_unlock(&bo->base._resv); 498 + dma_resv_unlock(&bo->base._resv); 499 499 500 500 ttm_bo_cleanup_memtype_use(bo); 501 - reservation_object_unlock(bo->base.resv); 501 + dma_resv_unlock(bo->base.resv); 502 502 return; 503 503 } 504 504 ··· 514 514 ttm_bo_add_to_lru(bo); 515 515 } 516 516 517 - reservation_object_unlock(bo->base.resv); 517 + dma_resv_unlock(bo->base.resv); 518 518 } 519 519 if (bo->base.resv != &bo->base._resv) 520 - reservation_object_unlock(&bo->base._resv); 520 + dma_resv_unlock(&bo->base._resv); 521 521 522 522 error: 523 523 kref_get(&bo->list_kref); ··· 546 546 bool unlock_resv) 547 547 { 548 548 struct ttm_bo_global *glob = bo->bdev->glob; 549 - struct reservation_object *resv; 549 + struct dma_resv *resv; 550 550 int ret; 551 551 552 552 if (unlikely(list_empty(&bo->ddestroy))) ··· 554 554 else 555 555 resv = &bo->base._resv; 556 556 557 - if (reservation_object_test_signaled_rcu(resv, true)) 557 + if (dma_resv_test_signaled_rcu(resv, true)) 558 558 ret = 0; 559 559 else 560 560 ret = -EBUSY; ··· 563 563 long lret; 564 564 565 565 if (unlock_resv) 566 - reservation_object_unlock(bo->base.resv); 566 + dma_resv_unlock(bo->base.resv); 567 567 spin_unlock(&glob->lru_lock); 568 568 569 - lret = reservation_object_wait_timeout_rcu(resv, true, 569 + lret = dma_resv_wait_timeout_rcu(resv, true, 570 570 interruptible, 571 571 30 * HZ); 572 572 ··· 576 576 return -EBUSY; 577 577 578 578 spin_lock(&glob->lru_lock); 579 - if (unlock_resv && !reservation_object_trylock(bo->base.resv)) { 579 + if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { 580 580 /* 581 581 * We raced, and lost, someone else holds the reservation now, 582 582 * and is probably busy in ttm_bo_cleanup_memtype_use. ··· 593 593 594 594 if (ret || unlikely(list_empty(&bo->ddestroy))) { 595 595 if (unlock_resv) 596 - reservation_object_unlock(bo->base.resv); 596 + dma_resv_unlock(bo->base.resv); 597 597 spin_unlock(&glob->lru_lock); 598 598 return ret; 599 599 } ··· 606 606 ttm_bo_cleanup_memtype_use(bo); 607 607 608 608 if (unlock_resv) 609 - reservation_object_unlock(bo->base.resv); 609 + dma_resv_unlock(bo->base.resv); 610 610 611 611 return 0; 612 612 } ··· 634 634 635 635 if (remove_all || bo->base.resv != &bo->base._resv) { 636 636 spin_unlock(&glob->lru_lock); 637 - reservation_object_lock(bo->base.resv, NULL); 637 + dma_resv_lock(bo->base.resv, NULL); 638 638 639 639 spin_lock(&glob->lru_lock); 640 640 ttm_bo_cleanup_refs(bo, false, !remove_all, true); 641 641 642 - } else if (reservation_object_trylock(bo->base.resv)) { 642 + } else if (dma_resv_trylock(bo->base.resv)) { 643 643 ttm_bo_cleanup_refs(bo, false, !remove_all, true); 644 644 } else { 645 645 spin_unlock(&glob->lru_lock); ··· 711 711 struct ttm_placement placement; 712 712 int ret = 0; 713 713 714 - reservation_object_assert_held(bo->base.resv); 714 + dma_resv_assert_held(bo->base.resv); 715 715 716 716 placement.num_placement = 0; 717 717 placement.num_busy_placement = 0; ··· 782 782 bool ret = false; 783 783 784 784 if (bo->base.resv == ctx->resv) { 785 - reservation_object_assert_held(bo->base.resv); 785 + dma_resv_assert_held(bo->base.resv); 786 786 if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT 787 787 || !list_empty(&bo->ddestroy)) 788 788 ret = true; ··· 790 790 if (busy) 791 791 *busy = false; 792 792 } else { 793 - ret = reservation_object_trylock(bo->base.resv); 793 + ret = dma_resv_trylock(bo->base.resv); 794 794 *locked = ret; 795 795 if (busy) 796 796 *busy = !ret; ··· 818 818 return -EBUSY; 819 819 820 820 if (ctx->interruptible) 821 - r = reservation_object_lock_interruptible(busy_bo->base.resv, 821 + r = dma_resv_lock_interruptible(busy_bo->base.resv, 822 822 ticket); 823 823 else 824 - r = reservation_object_lock(busy_bo->base.resv, ticket); 824 + r = dma_resv_lock(busy_bo->base.resv, ticket); 825 825 826 826 /* 827 827 * TODO: It would be better to keep the BO locked until allocation is at ··· 829 829 * of TTM. 830 830 */ 831 831 if (!r) 832 - reservation_object_unlock(busy_bo->base.resv); 832 + dma_resv_unlock(busy_bo->base.resv); 833 833 834 834 return r == -EDEADLK ? -EBUSY : r; 835 835 } ··· 855 855 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, 856 856 &busy)) { 857 857 if (busy && !busy_bo && ticket != 858 - reservation_object_locking_ctx(bo->base.resv)) 858 + dma_resv_locking_ctx(bo->base.resv)) 859 859 busy_bo = bo; 860 860 continue; 861 861 } ··· 863 863 if (place && !bdev->driver->eviction_valuable(bo, 864 864 place)) { 865 865 if (locked) 866 - reservation_object_unlock(bo->base.resv); 866 + dma_resv_unlock(bo->base.resv); 867 867 continue; 868 868 } 869 869 break; ··· 935 935 spin_unlock(&man->move_lock); 936 936 937 937 if (fence) { 938 - reservation_object_add_shared_fence(bo->base.resv, fence); 938 + dma_resv_add_shared_fence(bo->base.resv, fence); 939 939 940 - ret = reservation_object_reserve_shared(bo->base.resv, 1); 940 + ret = dma_resv_reserve_shared(bo->base.resv, 1); 941 941 if (unlikely(ret)) { 942 942 dma_fence_put(fence); 943 943 return ret; ··· 964 964 struct ww_acquire_ctx *ticket; 965 965 int ret; 966 966 967 - ticket = reservation_object_locking_ctx(bo->base.resv); 967 + ticket = dma_resv_locking_ctx(bo->base.resv); 968 968 do { 969 969 ret = (*man->func->get_node)(man, bo, place, mem); 970 970 if (unlikely(ret != 0)) ··· 1094 1094 bool type_found = false; 1095 1095 int i, ret; 1096 1096 1097 - ret = reservation_object_reserve_shared(bo->base.resv, 1); 1097 + ret = dma_resv_reserve_shared(bo->base.resv, 1); 1098 1098 if (unlikely(ret)) 1099 1099 return ret; 1100 1100 ··· 1175 1175 int ret = 0; 1176 1176 struct ttm_mem_reg mem; 1177 1177 1178 - reservation_object_assert_held(bo->base.resv); 1178 + dma_resv_assert_held(bo->base.resv); 1179 1179 1180 1180 mem.num_pages = bo->num_pages; 1181 1181 mem.size = mem.num_pages << PAGE_SHIFT; ··· 1245 1245 int ret; 1246 1246 uint32_t new_flags; 1247 1247 1248 - reservation_object_assert_held(bo->base.resv); 1248 + dma_resv_assert_held(bo->base.resv); 1249 1249 /* 1250 1250 * Check whether we need to move buffer. 1251 1251 */ ··· 1282 1282 struct ttm_operation_ctx *ctx, 1283 1283 size_t acc_size, 1284 1284 struct sg_table *sg, 1285 - struct reservation_object *resv, 1285 + struct dma_resv *resv, 1286 1286 void (*destroy) (struct ttm_buffer_object *)) 1287 1287 { 1288 1288 int ret = 0; ··· 1336 1336 bo->sg = sg; 1337 1337 if (resv) { 1338 1338 bo->base.resv = resv; 1339 - reservation_object_assert_held(bo->base.resv); 1339 + dma_resv_assert_held(bo->base.resv); 1340 1340 } else { 1341 1341 bo->base.resv = &bo->base._resv; 1342 1342 } ··· 1345 1345 * bo.gem is not initialized, so we have to setup the 1346 1346 * struct elements we want use regardless. 1347 1347 */ 1348 - reservation_object_init(&bo->base._resv); 1348 + dma_resv_init(&bo->base._resv); 1349 1349 drm_vma_node_reset(&bo->base.vma_node); 1350 1350 } 1351 1351 atomic_inc(&bo->bdev->glob->bo_count); ··· 1363 1363 * since otherwise lockdep will be angered in radeon. 1364 1364 */ 1365 1365 if (!resv) { 1366 - locked = reservation_object_trylock(bo->base.resv); 1366 + locked = dma_resv_trylock(bo->base.resv); 1367 1367 WARN_ON(!locked); 1368 1368 } 1369 1369 ··· 1397 1397 bool interruptible, 1398 1398 size_t acc_size, 1399 1399 struct sg_table *sg, 1400 - struct reservation_object *resv, 1400 + struct dma_resv *resv, 1401 1401 void (*destroy) (struct ttm_buffer_object *)) 1402 1402 { 1403 1403 struct ttm_operation_ctx ctx = { interruptible, false }; ··· 1807 1807 long timeout = 15 * HZ; 1808 1808 1809 1809 if (no_wait) { 1810 - if (reservation_object_test_signaled_rcu(bo->base.resv, true)) 1810 + if (dma_resv_test_signaled_rcu(bo->base.resv, true)) 1811 1811 return 0; 1812 1812 else 1813 1813 return -EBUSY; 1814 1814 } 1815 1815 1816 - timeout = reservation_object_wait_timeout_rcu(bo->base.resv, true, 1816 + timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true, 1817 1817 interruptible, timeout); 1818 1818 if (timeout < 0) 1819 1819 return timeout; ··· 1821 1821 if (timeout == 0) 1822 1822 return -EBUSY; 1823 1823 1824 - reservation_object_add_excl_fence(bo->base.resv, NULL); 1824 + dma_resv_add_excl_fence(bo->base.resv, NULL); 1825 1825 return 0; 1826 1826 } 1827 1827 EXPORT_SYMBOL(ttm_bo_wait); ··· 1937 1937 * already swapped buffer. 1938 1938 */ 1939 1939 if (locked) 1940 - reservation_object_unlock(bo->base.resv); 1940 + dma_resv_unlock(bo->base.resv); 1941 1941 kref_put(&bo->list_kref, ttm_bo_release_list); 1942 1942 return ret; 1943 1943 } ··· 1975 1975 ret = mutex_lock_interruptible(&bo->wu_mutex); 1976 1976 if (unlikely(ret != 0)) 1977 1977 return -ERESTARTSYS; 1978 - if (!reservation_object_is_locked(bo->base.resv)) 1978 + if (!dma_resv_is_locked(bo->base.resv)) 1979 1979 goto out_unlock; 1980 - ret = reservation_object_lock_interruptible(bo->base.resv, NULL); 1980 + ret = dma_resv_lock_interruptible(bo->base.resv, NULL); 1981 1981 if (ret == -EINTR) 1982 1982 ret = -ERESTARTSYS; 1983 1983 if (unlikely(ret != 0)) 1984 1984 goto out_unlock; 1985 - reservation_object_unlock(bo->base.resv); 1985 + dma_resv_unlock(bo->base.resv); 1986 1986 1987 1987 out_unlock: 1988 1988 mutex_unlock(&bo->wu_mutex);
+8 -8
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 38 38 #include <linux/slab.h> 39 39 #include <linux/vmalloc.h> 40 40 #include <linux/module.h> 41 - #include <linux/reservation.h> 41 + #include <linux/dma-resv.h> 42 42 43 43 struct ttm_transfer_obj { 44 44 struct ttm_buffer_object base; ··· 518 518 fbo->base.destroy = &ttm_transfered_destroy; 519 519 fbo->base.acc_size = 0; 520 520 fbo->base.base.resv = &fbo->base.base._resv; 521 - reservation_object_init(fbo->base.base.resv); 522 - ret = reservation_object_trylock(fbo->base.base.resv); 521 + dma_resv_init(fbo->base.base.resv); 522 + ret = dma_resv_trylock(fbo->base.base.resv); 523 523 WARN_ON(!ret); 524 524 525 525 *new_obj = &fbo->base; ··· 689 689 int ret; 690 690 struct ttm_buffer_object *ghost_obj; 691 691 692 - reservation_object_add_excl_fence(bo->base.resv, fence); 692 + dma_resv_add_excl_fence(bo->base.resv, fence); 693 693 if (evict) { 694 694 ret = ttm_bo_wait(bo, false, false); 695 695 if (ret) ··· 716 716 if (ret) 717 717 return ret; 718 718 719 - reservation_object_add_excl_fence(ghost_obj->base.resv, fence); 719 + dma_resv_add_excl_fence(ghost_obj->base.resv, fence); 720 720 721 721 /** 722 722 * If we're not moving to fixed memory, the TTM object ··· 752 752 753 753 int ret; 754 754 755 - reservation_object_add_excl_fence(bo->base.resv, fence); 755 + dma_resv_add_excl_fence(bo->base.resv, fence); 756 756 757 757 if (!evict) { 758 758 struct ttm_buffer_object *ghost_obj; ··· 772 772 if (ret) 773 773 return ret; 774 774 775 - reservation_object_add_excl_fence(ghost_obj->base.resv, fence); 775 + dma_resv_add_excl_fence(ghost_obj->base.resv, fence); 776 776 777 777 /** 778 778 * If we're not moving to fixed memory, the TTM object ··· 841 841 if (ret) 842 842 return ret; 843 843 844 - ret = reservation_object_copy_fences(ghost->base.resv, bo->base.resv); 844 + ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv); 845 845 /* Last resort, wait for the BO to be idle when we are OOM */ 846 846 if (ret) 847 847 ttm_bo_wait(bo, false, false);
+3 -3
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 71 71 ttm_bo_get(bo); 72 72 up_read(&vmf->vma->vm_mm->mmap_sem); 73 73 (void) dma_fence_wait(bo->moving, true); 74 - reservation_object_unlock(bo->base.resv); 74 + dma_resv_unlock(bo->base.resv); 75 75 ttm_bo_put(bo); 76 76 goto out_unlock; 77 77 } ··· 131 131 * for reserve, and if it fails, retry the fault after waiting 132 132 * for the buffer to become unreserved. 133 133 */ 134 - if (unlikely(!reservation_object_trylock(bo->base.resv))) { 134 + if (unlikely(!dma_resv_trylock(bo->base.resv))) { 135 135 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { 136 136 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 137 137 ttm_bo_get(bo); ··· 296 296 out_io_unlock: 297 297 ttm_mem_io_unlock(man); 298 298 out_unlock: 299 - reservation_object_unlock(bo->base.resv); 299 + dma_resv_unlock(bo->base.resv); 300 300 return ret; 301 301 } 302 302
+10 -10
drivers/gpu/drm/ttm/ttm_execbuf_util.c
··· 39 39 list_for_each_entry_continue_reverse(entry, list, head) { 40 40 struct ttm_buffer_object *bo = entry->bo; 41 41 42 - reservation_object_unlock(bo->base.resv); 42 + dma_resv_unlock(bo->base.resv); 43 43 } 44 44 } 45 45 ··· 71 71 72 72 if (list_empty(&bo->lru)) 73 73 ttm_bo_add_to_lru(bo); 74 - reservation_object_unlock(bo->base.resv); 74 + dma_resv_unlock(bo->base.resv); 75 75 } 76 76 spin_unlock(&glob->lru_lock); 77 77 ··· 114 114 115 115 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); 116 116 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { 117 - reservation_object_unlock(bo->base.resv); 117 + dma_resv_unlock(bo->base.resv); 118 118 119 119 ret = -EBUSY; 120 120 ··· 130 130 if (!entry->num_shared) 131 131 continue; 132 132 133 - ret = reservation_object_reserve_shared(bo->base.resv, 133 + ret = dma_resv_reserve_shared(bo->base.resv, 134 134 entry->num_shared); 135 135 if (!ret) 136 136 continue; ··· 144 144 145 145 if (ret == -EDEADLK) { 146 146 if (intr) { 147 - ret = reservation_object_lock_slow_interruptible(bo->base.resv, 147 + ret = dma_resv_lock_slow_interruptible(bo->base.resv, 148 148 ticket); 149 149 } else { 150 - reservation_object_lock_slow(bo->base.resv, ticket); 150 + dma_resv_lock_slow(bo->base.resv, ticket); 151 151 ret = 0; 152 152 } 153 153 } 154 154 155 155 if (!ret && entry->num_shared) 156 - ret = reservation_object_reserve_shared(bo->base.resv, 156 + ret = dma_resv_reserve_shared(bo->base.resv, 157 157 entry->num_shared); 158 158 159 159 if (unlikely(ret != 0)) { ··· 201 201 list_for_each_entry(entry, list, head) { 202 202 bo = entry->bo; 203 203 if (entry->num_shared) 204 - reservation_object_add_shared_fence(bo->base.resv, fence); 204 + dma_resv_add_shared_fence(bo->base.resv, fence); 205 205 else 206 - reservation_object_add_excl_fence(bo->base.resv, fence); 206 + dma_resv_add_excl_fence(bo->base.resv, fence); 207 207 if (list_empty(&bo->lru)) 208 208 ttm_bo_add_to_lru(bo); 209 209 else 210 210 ttm_bo_move_to_lru_tail(bo, NULL); 211 - reservation_object_unlock(bo->base.resv); 211 + dma_resv_unlock(bo->base.resv); 212 212 } 213 213 spin_unlock(&glob->lru_lock); 214 214 if (ticket)
+1 -1
drivers/gpu/drm/ttm/ttm_tt.c
··· 48 48 struct ttm_bo_device *bdev = bo->bdev; 49 49 uint32_t page_flags = 0; 50 50 51 - reservation_object_assert_held(bo->base.resv); 51 + dma_resv_assert_held(bo->base.resv); 52 52 53 53 if (bdev->need_dma32) 54 54 page_flags |= TTM_PAGE_FLAG_DMA32;
+2 -2
drivers/gpu/drm/v3d/v3d_gem.c
··· 409 409 if (args->pad != 0) 410 410 return -EINVAL; 411 411 412 - ret = drm_gem_reservation_object_wait(file_priv, args->handle, 412 + ret = drm_gem_dma_resv_wait(file_priv, args->handle, 413 413 true, timeout_jiffies); 414 414 415 415 /* Decrement the user's timeout, in case we got interrupted ··· 495 495 496 496 for (i = 0; i < job->bo_count; i++) { 497 497 /* XXX: Use shared fences for read-only objects. */ 498 - reservation_object_add_excl_fence(job->bo[i]->resv, 498 + dma_resv_add_excl_fence(job->bo[i]->resv, 499 499 job->done_fence); 500 500 } 501 501
+1 -1
drivers/gpu/drm/vboxvideo/vbox_drv.c
··· 32 32 }; 33 33 MODULE_DEVICE_TABLE(pci, pciidlist); 34 34 35 - static struct drm_fb_helper_funcs vbox_fb_helper_funcs = { 35 + static const struct drm_fb_helper_funcs vbox_fb_helper_funcs = { 36 36 .fb_probe = vboxfb_create, 37 37 }; 38 38
+3 -3
drivers/gpu/drm/vc4/vc4_gem.c
··· 543 543 bo = to_vc4_bo(&exec->bo[i]->base); 544 544 bo->seqno = seqno; 545 545 546 - reservation_object_add_shared_fence(bo->base.base.resv, exec->fence); 546 + dma_resv_add_shared_fence(bo->base.base.resv, exec->fence); 547 547 } 548 548 549 549 list_for_each_entry(bo, &exec->unref_list, unref_head) { ··· 554 554 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); 555 555 bo->write_seqno = seqno; 556 556 557 - reservation_object_add_excl_fence(bo->base.base.resv, exec->fence); 557 + dma_resv_add_excl_fence(bo->base.base.resv, exec->fence); 558 558 } 559 559 } 560 560 ··· 642 642 for (i = 0; i < exec->bo_count; i++) { 643 643 bo = &exec->bo[i]->base; 644 644 645 - ret = reservation_object_reserve_shared(bo->resv, 1); 645 + ret = dma_resv_reserve_shared(bo->resv, 1); 646 646 if (ret) { 647 647 vc4_unlock_bo_reservations(dev, exec, acquire_ctx); 648 648 return ret;
+8 -8
drivers/gpu/drm/vgem/vgem_fence.c
··· 21 21 */ 22 22 23 23 #include <linux/dma-buf.h> 24 - #include <linux/reservation.h> 24 + #include <linux/dma-resv.h> 25 25 26 26 #include <drm/drm_file.h> 27 27 ··· 128 128 { 129 129 struct drm_vgem_fence_attach *arg = data; 130 130 struct vgem_file *vfile = file->driver_priv; 131 - struct reservation_object *resv; 131 + struct dma_resv *resv; 132 132 struct drm_gem_object *obj; 133 133 struct dma_fence *fence; 134 134 int ret; ··· 151 151 152 152 /* Check for a conflicting fence */ 153 153 resv = obj->resv; 154 - if (!reservation_object_test_signaled_rcu(resv, 154 + if (!dma_resv_test_signaled_rcu(resv, 155 155 arg->flags & VGEM_FENCE_WRITE)) { 156 156 ret = -EBUSY; 157 157 goto err_fence; ··· 159 159 160 160 /* Expose the fence via the dma-buf */ 161 161 ret = 0; 162 - reservation_object_lock(resv, NULL); 162 + dma_resv_lock(resv, NULL); 163 163 if (arg->flags & VGEM_FENCE_WRITE) 164 - reservation_object_add_excl_fence(resv, fence); 165 - else if ((ret = reservation_object_reserve_shared(resv, 1)) == 0) 166 - reservation_object_add_shared_fence(resv, fence); 167 - reservation_object_unlock(resv); 164 + dma_resv_add_excl_fence(resv, fence); 165 + else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0) 166 + dma_resv_add_shared_fence(resv, fence); 167 + dma_resv_unlock(resv); 168 168 169 169 /* Record the fence in our idr for later signaling */ 170 170 if (ret == 0) {
+2 -2
drivers/gpu/drm/virtio/virtgpu_ioctl.c
··· 396 396 (vgdev, qobj->hw_res_handle, 397 397 vfpriv->ctx_id, offset, args->level, 398 398 &box, fence); 399 - reservation_object_add_excl_fence(qobj->tbo.base.resv, 399 + dma_resv_add_excl_fence(qobj->tbo.base.resv, 400 400 &fence->f); 401 401 402 402 dma_fence_put(&fence->f); ··· 450 450 (vgdev, qobj, 451 451 vfpriv ? vfpriv->ctx_id : 0, offset, 452 452 args->level, &box, fence); 453 - reservation_object_add_excl_fence(qobj->tbo.base.resv, 453 + dma_resv_add_excl_fence(qobj->tbo.base.resv, 454 454 &fence->f); 455 455 dma_fence_put(&fence->f); 456 456 }
+1 -1
drivers/gpu/drm/virtio/virtgpu_plane.c
··· 212 212 0, 0, vgfb->fence); 213 213 ret = virtio_gpu_object_reserve(bo, false); 214 214 if (!ret) { 215 - reservation_object_add_excl_fence(bo->tbo.base.resv, 215 + dma_resv_add_excl_fence(bo->tbo.base.resv, 216 216 &vgfb->fence->f); 217 217 dma_fence_put(&vgfb->fence->f); 218 218 vgfb->fence = NULL;
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
··· 459 459 460 460 /* Buffer objects need to be either pinned or reserved: */ 461 461 if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT)) 462 - reservation_object_assert_held(dst->base.resv); 462 + dma_resv_assert_held(dst->base.resv); 463 463 if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT)) 464 - reservation_object_assert_held(src->base.resv); 464 + dma_resv_assert_held(src->base.resv); 465 465 466 466 if (dst->ttm->state == tt_unpopulated) { 467 467 ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 341 341 uint32_t old_mem_type = bo->mem.mem_type; 342 342 int ret; 343 343 344 - reservation_object_assert_held(bo->base.resv); 344 + dma_resv_assert_held(bo->base.resv); 345 345 346 346 if (pin) { 347 347 if (vbo->pin_count++ > 0) ··· 690 690 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); 691 691 long lret; 692 692 693 - lret = reservation_object_wait_timeout_rcu 693 + lret = dma_resv_wait_timeout_rcu 694 694 (bo->base.resv, true, true, 695 695 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); 696 696 if (!lret) ··· 1008 1008 1009 1009 if (fence == NULL) { 1010 1010 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 1011 - reservation_object_add_excl_fence(bo->base.resv, &fence->base); 1011 + dma_resv_add_excl_fence(bo->base.resv, &fence->base); 1012 1012 dma_fence_put(&fence->base); 1013 1013 } else 1014 - reservation_object_add_excl_fence(bo->base.resv, &fence->base); 1014 + dma_resv_add_excl_fence(bo->base.resv, &fence->base); 1015 1015 } 1016 1016 1017 1017
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 171 171 } *cmd; 172 172 173 173 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); 174 - reservation_object_assert_held(bo->base.resv); 174 + dma_resv_assert_held(bo->base.resv); 175 175 176 176 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 177 177 if (!cmd) ··· 313 313 return 0; 314 314 315 315 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); 316 - reservation_object_assert_held(bo->base.resv); 316 + dma_resv_assert_held(bo->base.resv); 317 317 318 318 mutex_lock(&dev_priv->binding_mutex); 319 319 if (!vcotbl->scrubbed)
+3
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
··· 185 185 186 186 spin_lock(f->lock); 187 187 188 + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) 189 + goto out; 190 + 188 191 if (intr && signal_pending(current)) { 189 192 ret = -ERESTARTSYS; 190 193 goto out;
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 41 41 { 42 42 struct vmw_buffer_object *backup = res->backup; 43 43 44 - reservation_object_assert_held(backup->base.base.resv); 44 + dma_resv_assert_held(res->backup->base.base.resv); 45 45 res->used_prio = (res->res_dirty) ? res->func->dirty_prio : 46 46 res->func->prio; 47 47 list_add_tail(&res->mob_head, &backup->res_list); ··· 56 56 { 57 57 struct vmw_buffer_object *backup = res->backup; 58 58 59 - reservation_object_assert_held(backup->base.base.resv); 59 + dma_resv_assert_held(backup->base.base.resv); 60 60 if (vmw_resource_mob_attached(res)) { 61 61 list_del_init(&res->mob_head); 62 62 vmw_bo_prio_del(backup, res->used_prio); ··· 719 719 .num_shared = 0 720 720 }; 721 721 722 - reservation_object_assert_held(vbo->base.base.resv); 722 + dma_resv_assert_held(vbo->base.base.resv); 723 723 list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { 724 724 if (!res->func->unbind) 725 725 continue;
+1 -1
drivers/gpu/drm/xen/xen_drm_front_kms.c
··· 46 46 drm_gem_fb_destroy(fb); 47 47 } 48 48 49 - static struct drm_framebuffer_funcs fb_funcs = { 49 + static const struct drm_framebuffer_funcs fb_funcs = { 50 50 .destroy = fb_destroy, 51 51 }; 52 52
+5
drivers/video/fbdev/omap2/omapfb/displays/Kconfig
··· 49 49 config FB_OMAP2_PANEL_SONY_ACX565AKM 50 50 tristate "ACX565AKM Panel" 51 51 depends on SPI && BACKLIGHT_CLASS_DEVICE 52 + depends on DRM_PANEL_SONY_ACX565AKM = n 52 53 help 53 54 This is the LCD panel used on Nokia N900 54 55 ··· 62 61 config FB_OMAP2_PANEL_SHARP_LS037V7DW01 63 62 tristate "Sharp LS037V7DW01 LCD Panel" 64 63 depends on BACKLIGHT_CLASS_DEVICE 64 + depends on DRM_PANEL_SHARP_LS037V7DW01 = n 65 65 help 66 66 LCD Panel used in TI's SDP3430 and EVM boards 67 67 68 68 config FB_OMAP2_PANEL_TPO_TD028TTEC1 69 69 tristate "TPO TD028TTEC1 LCD Panel" 70 70 depends on SPI 71 + depends on DRM_PANEL_TPO_TD028TTEC1 = n 71 72 help 72 73 LCD panel used in Openmoko. 73 74 74 75 config FB_OMAP2_PANEL_TPO_TD043MTEA1 75 76 tristate "TPO TD043MTEA1 LCD Panel" 76 77 depends on SPI 78 + depends on DRM_PANEL_TPO_TD043MTEA1 = n 77 79 help 78 80 LCD Panel used in OMAP3 Pandora 79 81 ··· 84 80 tristate "NEC NL8048HL11 Panel" 85 81 depends on SPI 86 82 depends on BACKLIGHT_CLASS_DEVICE 83 + depends on DRM_PANEL_NEC_NL8048HL11 = n 87 84 help 88 85 This NEC NL8048HL11 panel is TFT LCD used in the 89 86 Zoom2/3/3630 sdp boards.
+2
include/drm/bridge/dw_hdmi.h
··· 155 155 void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); 156 156 157 157 void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); 158 + void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt); 159 + void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca); 158 160 void dw_hdmi_audio_enable(struct dw_hdmi *hdmi); 159 161 void dw_hdmi_audio_disable(struct dw_hdmi *hdmi); 160 162 void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi);
+1 -1
include/drm/drmP.h
··· 87 87 88 88 struct device_node; 89 89 struct videomode; 90 - struct reservation_object; 90 + struct dma_resv; 91 91 struct dma_buf_attachment; 92 92 93 93 struct pci_dev;
+2 -2
include/drm/drm_connector.h
··· 543 543 * 544 544 * This is also used in the atomic helpers to map encoders to their 545 545 * current and previous connectors, see 546 - * &drm_atomic_get_old_connector_for_encoder() and 547 - * &drm_atomic_get_new_connector_for_encoder(). 546 + * drm_atomic_get_old_connector_for_encoder() and 547 + * drm_atomic_get_new_connector_for_encoder(). 548 548 * 549 549 * NOTE: Atomic drivers must fill this out (either themselves or through 550 550 * helpers), for otherwise the GETCONNECTOR and GETENCODER IOCTLs will
+4 -4
include/drm/drm_gem.h
··· 35 35 */ 36 36 37 37 #include <linux/kref.h> 38 - #include <linux/reservation.h> 38 + #include <linux/dma-resv.h> 39 39 40 40 #include <drm/drm_vma_manager.h> 41 41 ··· 276 276 * 277 277 * Normally (@resv == &@_resv) except for imported GEM objects. 278 278 */ 279 - struct reservation_object *resv; 279 + struct dma_resv *resv; 280 280 281 281 /** 282 282 * @_resv: ··· 285 285 * 286 286 * This is unused for imported GEM objects. 287 287 */ 288 - struct reservation_object _resv; 288 + struct dma_resv _resv; 289 289 290 290 /** 291 291 * @funcs: ··· 390 390 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 391 391 int count, struct drm_gem_object ***objs_out); 392 392 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); 393 - long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, 393 + long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 394 394 bool wait_all, unsigned long timeout); 395 395 int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 396 396 struct ww_acquire_ctx *acquire_ctx);
+15
include/drm/drm_gem_shmem_helper.h
··· 44 44 */ 45 45 unsigned int pages_use_count; 46 46 47 + int madv; 48 + struct list_head madv_list; 49 + 47 50 /** 48 51 * @pages_mark_dirty_on_put: 49 52 * ··· 123 120 void drm_gem_shmem_unpin(struct drm_gem_object *obj); 124 121 void *drm_gem_shmem_vmap(struct drm_gem_object *obj); 125 122 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr); 123 + 124 + int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv); 125 + 126 + static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) 127 + { 128 + return (shmem->madv > 0) && 129 + !shmem->vmap_use_count && shmem->sgt && 130 + !shmem->base.dma_buf && !shmem->base.import_attach; 131 + } 132 + 133 + void drm_gem_shmem_purge_locked(struct drm_gem_object *obj); 134 + void drm_gem_shmem_purge(struct drm_gem_object *obj); 126 135 127 136 struct drm_gem_shmem_object * 128 137 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
+75 -108
include/drm/drm_panel.h
··· 36 36 37 37 /** 38 38 * struct drm_panel_funcs - perform operations on a given panel 39 - * @disable: disable panel (turn off back light, etc.) 40 - * @unprepare: turn off panel 41 - * @prepare: turn on panel and perform set up 42 - * @enable: enable panel (turn on back light, etc.) 43 - * @get_modes: add modes to the connector that the panel is attached to and 44 - * return the number of modes added 45 - * @get_timings: copy display timings into the provided array and return 46 - * the number of display timings available 47 39 * 48 40 * The .prepare() function is typically called before the display controller 49 41 * starts to transmit video data. Panel drivers can use this to turn the panel ··· 61 69 * the panel. This is the job of the .unprepare() function. 62 70 */ 63 71 struct drm_panel_funcs { 64 - int (*disable)(struct drm_panel *panel); 65 - int (*unprepare)(struct drm_panel *panel); 72 + /** 73 + * @prepare: 74 + * 75 + * Turn on panel and perform set up. 76 + */ 66 77 int (*prepare)(struct drm_panel *panel); 78 + 79 + /** 80 + * @enable: 81 + * 82 + * Enable panel (turn on back light, etc.). 83 + */ 67 84 int (*enable)(struct drm_panel *panel); 85 + 86 + /** 87 + * @disable: 88 + * 89 + * Disable panel (turn off back light, etc.). 90 + */ 91 + int (*disable)(struct drm_panel *panel); 92 + 93 + /** 94 + * @unprepare: 95 + * 96 + * Turn off panel. 97 + */ 98 + int (*unprepare)(struct drm_panel *panel); 99 + 100 + /** 101 + * @get_modes: 102 + * 103 + * Add modes to the connector that the panel is attached to and 104 + * return the number of modes added. 105 + */ 68 106 int (*get_modes)(struct drm_panel *panel); 107 + 108 + /** 109 + * @get_timings: 110 + * 111 + * Copy display timings into the provided array and return 112 + * the number of display timings available. 113 + */ 69 114 int (*get_timings)(struct drm_panel *panel, unsigned int num_timings, 70 115 struct display_timing *timings); 71 116 }; 72 117 73 118 /** 74 119 * struct drm_panel - DRM panel object 75 - * @drm: DRM device owning the panel 76 - * @connector: DRM connector that the panel is attached to 77 - * @dev: parent device of the panel 78 - * @link: link from panel device (supplier) to DRM device (consumer) 79 - * @funcs: operations that can be performed on the panel 80 - * @list: panel entry in registry 81 120 */ 82 121 struct drm_panel { 122 + /** 123 + * @drm: 124 + * 125 + * DRM device owning the panel. 126 + */ 83 127 struct drm_device *drm; 128 + 129 + /** 130 + * @connector: 131 + * 132 + * DRM connector that the panel is attached to. 133 + */ 84 134 struct drm_connector *connector; 135 + 136 + /** 137 + * @dev: 138 + * 139 + * Parent device of the panel. 140 + */ 85 141 struct device *dev; 86 142 143 + /** 144 + * @funcs: 145 + * 146 + * Operations that can be performed on the panel. 147 + */ 87 148 const struct drm_panel_funcs *funcs; 88 149 150 + /** 151 + * @list: 152 + * 153 + * Panel entry in registry. 154 + */ 89 155 struct list_head list; 90 156 }; 91 - 92 - /** 93 - * drm_disable_unprepare - power off a panel 94 - * @panel: DRM panel 95 - * 96 - * Calling this function will completely power off a panel (assert the panel's 97 - * reset, turn off power supplies, ...). After this function has completed, it 98 - * is usually no longer possible to communicate with the panel until another 99 - * call to drm_panel_prepare(). 100 - * 101 - * Return: 0 on success or a negative error code on failure. 102 - */ 103 - static inline int drm_panel_unprepare(struct drm_panel *panel) 104 - { 105 - if (panel && panel->funcs && panel->funcs->unprepare) 106 - return panel->funcs->unprepare(panel); 107 - 108 - return panel ? -ENOSYS : -EINVAL; 109 - } 110 - 111 - /** 112 - * drm_panel_disable - disable a panel 113 - * @panel: DRM panel 114 - * 115 - * This will typically turn off the panel's backlight or disable the display 116 - * drivers. For smart panels it should still be possible to communicate with 117 - * the integrated circuitry via any command bus after this call. 118 - * 119 - * Return: 0 on success or a negative error code on failure. 120 - */ 121 - static inline int drm_panel_disable(struct drm_panel *panel) 122 - { 123 - if (panel && panel->funcs && panel->funcs->disable) 124 - return panel->funcs->disable(panel); 125 - 126 - return panel ? -ENOSYS : -EINVAL; 127 - } 128 - 129 - /** 130 - * drm_panel_prepare - power on a panel 131 - * @panel: DRM panel 132 - * 133 - * Calling this function will enable power and deassert any reset signals to 134 - * the panel. After this has completed it is possible to communicate with any 135 - * integrated circuitry via a command bus. 136 - * 137 - * Return: 0 on success or a negative error code on failure. 138 - */ 139 - static inline int drm_panel_prepare(struct drm_panel *panel) 140 - { 141 - if (panel && panel->funcs && panel->funcs->prepare) 142 - return panel->funcs->prepare(panel); 143 - 144 - return panel ? -ENOSYS : -EINVAL; 145 - } 146 - 147 - /** 148 - * drm_panel_enable - enable a panel 149 - * @panel: DRM panel 150 - * 151 - * Calling this function will cause the panel display drivers to be turned on 152 - * and the backlight to be enabled. Content will be visible on screen after 153 - * this call completes. 154 - * 155 - * Return: 0 on success or a negative error code on failure. 156 - */ 157 - static inline int drm_panel_enable(struct drm_panel *panel) 158 - { 159 - if (panel && panel->funcs && panel->funcs->enable) 160 - return panel->funcs->enable(panel); 161 - 162 - return panel ? -ENOSYS : -EINVAL; 163 - } 164 - 165 - /** 166 - * drm_panel_get_modes - probe the available display modes of a panel 167 - * @panel: DRM panel 168 - * 169 - * The modes probed from the panel are automatically added to the connector 170 - * that the panel is attached to. 171 - * 172 - * Return: The number of modes available from the panel on success or a 173 - * negative error code on failure. 174 - */ 175 - static inline int drm_panel_get_modes(struct drm_panel *panel) 176 - { 177 - if (panel && panel->funcs && panel->funcs->get_modes) 178 - return panel->funcs->get_modes(panel); 179 - 180 - return panel ? -ENOSYS : -EINVAL; 181 - } 182 157 183 158 void drm_panel_init(struct drm_panel *panel); 184 159 ··· 153 194 void drm_panel_remove(struct drm_panel *panel); 154 195 155 196 int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); 156 - int drm_panel_detach(struct drm_panel *panel); 197 + void drm_panel_detach(struct drm_panel *panel); 198 + 199 + int drm_panel_prepare(struct drm_panel *panel); 200 + int drm_panel_unprepare(struct drm_panel *panel); 201 + 202 + int drm_panel_enable(struct drm_panel *panel); 203 + int drm_panel_disable(struct drm_panel *panel); 204 + 205 + int drm_panel_get_modes(struct drm_panel *panel); 157 206 158 207 #if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL) 159 208 struct drm_panel *of_drm_find_panel(const struct device_node *np);
+6 -6
include/drm/ttm/ttm_bo_api.h
··· 40 40 #include <linux/mutex.h> 41 41 #include <linux/mm.h> 42 42 #include <linux/bitmap.h> 43 - #include <linux/reservation.h> 43 + #include <linux/dma-resv.h> 44 44 45 45 struct ttm_bo_global; 46 46 ··· 273 273 struct ttm_operation_ctx { 274 274 bool interruptible; 275 275 bool no_wait_gpu; 276 - struct reservation_object *resv; 276 + struct dma_resv *resv; 277 277 uint64_t bytes_moved; 278 278 uint32_t flags; 279 279 }; ··· 493 493 * @page_alignment: Data alignment in pages. 494 494 * @ctx: TTM operation context for memory allocation. 495 495 * @acc_size: Accounted size for this object. 496 - * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. 496 + * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. 497 497 * @destroy: Destroy function. Use NULL for kfree(). 498 498 * 499 499 * This function initializes a pre-allocated struct ttm_buffer_object. ··· 526 526 struct ttm_operation_ctx *ctx, 527 527 size_t acc_size, 528 528 struct sg_table *sg, 529 - struct reservation_object *resv, 529 + struct dma_resv *resv, 530 530 void (*destroy) (struct ttm_buffer_object *)); 531 531 532 532 /** ··· 545 545 * point to the shmem object backing a GEM object if TTM is used to back a 546 546 * GEM user interface. 547 547 * @acc_size: Accounted size for this object. 548 - * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. 548 + * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. 549 549 * @destroy: Destroy function. Use NULL for kfree(). 550 550 * 551 551 * This function initializes a pre-allocated struct ttm_buffer_object. ··· 570 570 unsigned long size, enum ttm_bo_type type, 571 571 struct ttm_placement *placement, 572 572 uint32_t page_alignment, bool interrubtible, size_t acc_size, 573 - struct sg_table *sg, struct reservation_object *resv, 573 + struct sg_table *sg, struct dma_resv *resv, 574 574 void (*destroy) (struct ttm_buffer_object *)); 575 575 576 576 /**
+7 -7
include/drm/ttm/ttm_bo_driver.h
··· 35 35 #include <linux/workqueue.h> 36 36 #include <linux/fs.h> 37 37 #include <linux/spinlock.h> 38 - #include <linux/reservation.h> 38 + #include <linux/dma-resv.h> 39 39 40 40 #include "ttm_bo_api.h" 41 41 #include "ttm_memory.h" ··· 664 664 if (WARN_ON(ticket)) 665 665 return -EBUSY; 666 666 667 - success = reservation_object_trylock(bo->base.resv); 667 + success = dma_resv_trylock(bo->base.resv); 668 668 return success ? 0 : -EBUSY; 669 669 } 670 670 671 671 if (interruptible) 672 - ret = reservation_object_lock_interruptible(bo->base.resv, ticket); 672 + ret = dma_resv_lock_interruptible(bo->base.resv, ticket); 673 673 else 674 - ret = reservation_object_lock(bo->base.resv, ticket); 674 + ret = dma_resv_lock(bo->base.resv, ticket); 675 675 if (ret == -EINTR) 676 676 return -ERESTARTSYS; 677 677 return ret; ··· 755 755 WARN_ON(!kref_read(&bo->kref)); 756 756 757 757 if (interruptible) 758 - ret = reservation_object_lock_slow_interruptible(bo->base.resv, 758 + ret = dma_resv_lock_slow_interruptible(bo->base.resv, 759 759 ticket); 760 760 else 761 - reservation_object_lock_slow(bo->base.resv, ticket); 761 + dma_resv_lock_slow(bo->base.resv, ticket); 762 762 763 763 if (likely(ret == 0)) 764 764 ttm_bo_del_sub_from_lru(bo); ··· 783 783 else 784 784 ttm_bo_move_to_lru_tail(bo, NULL); 785 785 spin_unlock(&bo->bdev->glob->lru_lock); 786 - reservation_object_unlock(bo->base.resv); 786 + dma_resv_unlock(bo->base.resv); 787 787 } 788 788 789 789 /*
+1
include/linux/amba/clcd-regs.h
··· 42 42 #define TIM2_PCD_LO_MASK GENMASK(4, 0) 43 43 #define TIM2_PCD_LO_BITS 5 44 44 #define TIM2_CLKSEL (1 << 5) 45 + #define TIM2_ACB_MASK GENMASK(10, 6) 45 46 #define TIM2_IVS (1 << 11) 46 47 #define TIM2_IHS (1 << 12) 47 48 #define TIM2_IPC (1 << 13)
+2 -2
include/linux/dma-buf.h
··· 306 306 struct module *owner; 307 307 struct list_head list_node; 308 308 void *priv; 309 - struct reservation_object *resv; 309 + struct dma_resv *resv; 310 310 311 311 /* poll support */ 312 312 wait_queue_head_t poll; ··· 365 365 const struct dma_buf_ops *ops; 366 366 size_t size; 367 367 int flags; 368 - struct reservation_object *resv; 368 + struct dma_resv *resv; 369 369 void *priv; 370 370 }; 371 371
+27 -7
include/linux/dma-fence.h
··· 63 63 * been completed, or never called at all. 64 64 */ 65 65 struct dma_fence { 66 - struct kref refcount; 67 - const struct dma_fence_ops *ops; 68 - struct rcu_head rcu; 69 - struct list_head cb_list; 70 66 spinlock_t *lock; 67 + const struct dma_fence_ops *ops; 68 + /* 69 + * We clear the callback list on kref_put so that by the time we 70 + * release the fence it is unused. No one should be adding to the 71 + * cb_list that they don't themselves hold a reference for. 72 + * 73 + * The lifetime of the timestamp is similarly tied to both the 74 + * rcu freelist and the cb_list. The timestamp is only set upon 75 + * signaling while simultaneously notifying the cb_list. Ergo, we 76 + * only use either the cb_list of timestamp. Upon destruction, 77 + * neither are accessible, and so we can use the rcu. This means 78 + * that the cb_list is *only* valid until the signal bit is set, 79 + * and to read either you *must* hold a reference to the fence, 80 + * and not just the rcu_read_lock. 81 + * 82 + * Listed in chronological order. 83 + */ 84 + union { 85 + struct list_head cb_list; 86 + /* @cb_list replaced by @timestamp on dma_fence_signal() */ 87 + ktime_t timestamp; 88 + /* @timestamp replaced by @rcu on dma_fence_release() */ 89 + struct rcu_head rcu; 90 + }; 71 91 u64 context; 72 92 u64 seqno; 73 93 unsigned long flags; 74 - ktime_t timestamp; 94 + struct kref refcount; 75 95 int error; 76 96 }; 77 97 ··· 293 273 } 294 274 295 275 /** 296 - * dma_fence_get_rcu - get a fence from a reservation_object_list with 276 + * dma_fence_get_rcu - get a fence from a dma_resv_list with 297 277 * rcu read lock 298 278 * @fence: fence to increase refcount of 299 279 * ··· 317 297 * so long as the caller is using RCU on the pointer to the fence. 318 298 * 319 299 * An alternative mechanism is to employ a seqlock to protect a bunch of 320 - * fences, such as used by struct reservation_object. When using a seqlock, 300 + * fences, such as used by struct dma_resv. When using a seqlock, 321 301 * the seqlock must be taken before and checked after a reference to the 322 302 * fence is acquired (as shown here). 323 303 *
+51 -68
include/linux/reservation.h include/linux/dma-resv.h
··· 50 50 extern const char reservation_seqcount_string[]; 51 51 52 52 /** 53 - * struct reservation_object_list - a list of shared fences 53 + * struct dma_resv_list - a list of shared fences 54 54 * @rcu: for internal use 55 55 * @shared_count: table of shared fences 56 56 * @shared_max: for growing shared fence table 57 57 * @shared: shared fence table 58 58 */ 59 - struct reservation_object_list { 59 + struct dma_resv_list { 60 60 struct rcu_head rcu; 61 61 u32 shared_count, shared_max; 62 62 struct dma_fence __rcu *shared[]; 63 63 }; 64 64 65 65 /** 66 - * struct reservation_object - a reservation object manages fences for a buffer 66 + * struct dma_resv - a reservation object manages fences for a buffer 67 67 * @lock: update side lock 68 68 * @seq: sequence count for managing RCU read-side synchronization 69 69 * @fence_excl: the exclusive fence, if there is one currently 70 70 * @fence: list of current shared fences 71 71 */ 72 - struct reservation_object { 72 + struct dma_resv { 73 73 struct ww_mutex lock; 74 74 seqcount_t seq; 75 75 76 76 struct dma_fence __rcu *fence_excl; 77 - struct reservation_object_list __rcu *fence; 77 + struct dma_resv_list __rcu *fence; 78 78 }; 79 79 80 - #define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) 81 - #define reservation_object_assert_held(obj) \ 82 - lockdep_assert_held(&(obj)->lock.base) 80 + #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) 81 + #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) 83 82 84 83 /** 85 - * reservation_object_get_list - get the reservation object's 84 + * dma_resv_get_list - get the reservation object's 86 85 * shared fence list, with update-side lock held 87 86 * @obj: the reservation object 88 87 * 89 88 * Returns the shared fence list. Does NOT take references to 90 89 * the fence. The obj->lock must be held. 91 90 */ 92 - static inline struct reservation_object_list * 93 - reservation_object_get_list(struct reservation_object *obj) 91 + static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj) 94 92 { 95 93 return rcu_dereference_protected(obj->fence, 96 - reservation_object_held(obj)); 94 + dma_resv_held(obj)); 97 95 } 98 96 99 97 /** 100 - * reservation_object_lock - lock the reservation object 98 + * dma_resv_lock - lock the reservation object 101 99 * @obj: the reservation object 102 100 * @ctx: the locking context 103 101 * ··· 109 111 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation 110 112 * object may be locked by itself by passing NULL as @ctx. 111 113 */ 112 - static inline int 113 - reservation_object_lock(struct reservation_object *obj, 114 - struct ww_acquire_ctx *ctx) 114 + static inline int dma_resv_lock(struct dma_resv *obj, 115 + struct ww_acquire_ctx *ctx) 115 116 { 116 117 return ww_mutex_lock(&obj->lock, ctx); 117 118 } 118 119 119 120 /** 120 - * reservation_object_lock_interruptible - lock the reservation object 121 + * dma_resv_lock_interruptible - lock the reservation object 121 122 * @obj: the reservation object 122 123 * @ctx: the locking context 123 124 * ··· 130 133 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation 131 134 * object may be locked by itself by passing NULL as @ctx. 132 135 */ 133 - static inline int 134 - reservation_object_lock_interruptible(struct reservation_object *obj, 135 - struct ww_acquire_ctx *ctx) 136 + static inline int dma_resv_lock_interruptible(struct dma_resv *obj, 137 + struct ww_acquire_ctx *ctx) 136 138 { 137 139 return ww_mutex_lock_interruptible(&obj->lock, ctx); 138 140 } 139 141 140 142 /** 141 - * reservation_object_lock_slow - slowpath lock the reservation object 143 + * dma_resv_lock_slow - slowpath lock the reservation object 142 144 * @obj: the reservation object 143 145 * @ctx: the locking context 144 146 * 145 147 * Acquires the reservation object after a die case. This function 146 - * will sleep until the lock becomes available. See reservation_object_lock() as 148 + * will sleep until the lock becomes available. See dma_resv_lock() as 147 149 * well. 148 150 */ 149 - static inline void 150 - reservation_object_lock_slow(struct reservation_object *obj, 151 - struct ww_acquire_ctx *ctx) 151 + static inline void dma_resv_lock_slow(struct dma_resv *obj, 152 + struct ww_acquire_ctx *ctx) 152 153 { 153 154 ww_mutex_lock_slow(&obj->lock, ctx); 154 155 } 155 156 156 157 /** 157 - * reservation_object_lock_slow_interruptible - slowpath lock the reservation 158 + * dma_resv_lock_slow_interruptible - slowpath lock the reservation 158 159 * object, interruptible 159 160 * @obj: the reservation object 160 161 * @ctx: the locking context 161 162 * 162 163 * Acquires the reservation object interruptible after a die case. This function 163 164 * will sleep until the lock becomes available. See 164 - * reservation_object_lock_interruptible() as well. 165 + * dma_resv_lock_interruptible() as well. 165 166 */ 166 - static inline int 167 - reservation_object_lock_slow_interruptible(struct reservation_object *obj, 168 - struct ww_acquire_ctx *ctx) 167 + static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, 168 + struct ww_acquire_ctx *ctx) 169 169 { 170 170 return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); 171 171 } 172 172 173 173 /** 174 - * reservation_object_trylock - trylock the reservation object 174 + * dma_resv_trylock - trylock the reservation object 175 175 * @obj: the reservation object 176 176 * 177 177 * Tries to lock the reservation object for exclusive access and modification. ··· 181 187 * 182 188 * Returns true if the lock was acquired, false otherwise. 183 189 */ 184 - static inline bool __must_check 185 - reservation_object_trylock(struct reservation_object *obj) 190 + static inline bool __must_check dma_resv_trylock(struct dma_resv *obj) 186 191 { 187 192 return ww_mutex_trylock(&obj->lock); 188 193 } 189 194 190 195 /** 191 - * reservation_object_is_locked - is the reservation object locked 196 + * dma_resv_is_locked - is the reservation object locked 192 197 * @obj: the reservation object 193 198 * 194 199 * Returns true if the mutex is locked, false if unlocked. 195 200 */ 196 - static inline bool 197 - reservation_object_is_locked(struct reservation_object *obj) 201 + static inline bool dma_resv_is_locked(struct dma_resv *obj) 198 202 { 199 203 return ww_mutex_is_locked(&obj->lock); 200 204 } 201 205 202 206 /** 203 - * reservation_object_locking_ctx - returns the context used to lock the object 207 + * dma_resv_locking_ctx - returns the context used to lock the object 204 208 * @obj: the reservation object 205 209 * 206 210 * Returns the context used to lock a reservation object or NULL if no context 207 211 * was used or the object is not locked at all. 208 212 */ 209 - static inline struct ww_acquire_ctx * 210 - reservation_object_locking_ctx(struct reservation_object *obj) 213 + static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) 211 214 { 212 215 return READ_ONCE(obj->lock.ctx); 213 216 } 214 217 215 218 /** 216 - * reservation_object_unlock - unlock the reservation object 219 + * dma_resv_unlock - unlock the reservation object 217 220 * @obj: the reservation object 218 221 * 219 222 * Unlocks the reservation object following exclusive access. 220 223 */ 221 - static inline void 222 - reservation_object_unlock(struct reservation_object *obj) 224 + static inline void dma_resv_unlock(struct dma_resv *obj) 223 225 { 224 226 #ifdef CONFIG_DEBUG_MUTEXES 225 227 /* Test shared fence slot reservation */ 226 228 if (rcu_access_pointer(obj->fence)) { 227 - struct reservation_object_list *fence = 228 - reservation_object_get_list(obj); 229 + struct dma_resv_list *fence = dma_resv_get_list(obj); 229 230 230 231 fence->shared_max = fence->shared_count; 231 232 } ··· 229 240 } 230 241 231 242 /** 232 - * reservation_object_get_excl - get the reservation object's 243 + * dma_resv_get_excl - get the reservation object's 233 244 * exclusive fence, with update-side lock held 234 245 * @obj: the reservation object 235 246 * ··· 241 252 * The exclusive fence or NULL 242 253 */ 243 254 static inline struct dma_fence * 244 - reservation_object_get_excl(struct reservation_object *obj) 255 + dma_resv_get_excl(struct dma_resv *obj) 245 256 { 246 257 return rcu_dereference_protected(obj->fence_excl, 247 - reservation_object_held(obj)); 258 + dma_resv_held(obj)); 248 259 } 249 260 250 261 /** 251 - * reservation_object_get_excl_rcu - get the reservation object's 262 + * dma_resv_get_excl_rcu - get the reservation object's 252 263 * exclusive fence, without lock held. 253 264 * @obj: the reservation object 254 265 * ··· 259 270 * The exclusive fence or NULL if none 260 271 */ 261 272 static inline struct dma_fence * 262 - reservation_object_get_excl_rcu(struct reservation_object *obj) 273 + dma_resv_get_excl_rcu(struct dma_resv *obj) 263 274 { 264 275 struct dma_fence *fence; 265 276 ··· 273 284 return fence; 274 285 } 275 286 276 - void reservation_object_init(struct reservation_object *obj); 277 - void reservation_object_fini(struct reservation_object *obj); 278 - int reservation_object_reserve_shared(struct reservation_object *obj, 279 - unsigned int num_fences); 280 - void reservation_object_add_shared_fence(struct reservation_object *obj, 281 - struct dma_fence *fence); 287 + void dma_resv_init(struct dma_resv *obj); 288 + void dma_resv_fini(struct dma_resv *obj); 289 + int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); 290 + void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); 282 291 283 - void reservation_object_add_excl_fence(struct reservation_object *obj, 284 - struct dma_fence *fence); 292 + void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); 285 293 286 - int reservation_object_get_fences_rcu(struct reservation_object *obj, 287 - struct dma_fence **pfence_excl, 288 - unsigned *pshared_count, 289 - struct dma_fence ***pshared); 294 + int dma_resv_get_fences_rcu(struct dma_resv *obj, 295 + struct dma_fence **pfence_excl, 296 + unsigned *pshared_count, 297 + struct dma_fence ***pshared); 290 298 291 - int reservation_object_copy_fences(struct reservation_object *dst, 292 - struct reservation_object *src); 299 + int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); 293 300 294 - long reservation_object_wait_timeout_rcu(struct reservation_object *obj, 295 - bool wait_all, bool intr, 296 - unsigned long timeout); 301 + long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, 302 + unsigned long timeout); 297 303 298 - bool reservation_object_test_signaled_rcu(struct reservation_object *obj, 299 - bool test_all); 304 + bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); 300 305 301 306 #endif /* _LINUX_RESERVATION_H */
+25
include/uapi/drm/panfrost_drm.h
··· 20 20 #define DRM_PANFROST_GET_BO_OFFSET 0x05 21 21 #define DRM_PANFROST_PERFCNT_ENABLE 0x06 22 22 #define DRM_PANFROST_PERFCNT_DUMP 0x07 23 + #define DRM_PANFROST_MADVISE 0x08 23 24 24 25 #define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit) 25 26 #define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo) ··· 28 27 #define DRM_IOCTL_PANFROST_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo) 29 28 #define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param) 30 29 #define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset) 30 + #define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise) 31 31 32 32 /* 33 33 * Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module ··· 83 81 __u32 pad; 84 82 __s64 timeout_ns; /* absolute */ 85 83 }; 84 + 85 + #define PANFROST_BO_NOEXEC 1 86 + #define PANFROST_BO_HEAP 2 86 87 87 88 /** 88 89 * struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs. ··· 201 196 202 197 struct drm_panfrost_perfcnt_dump { 203 198 __u64 buf_ptr; 199 + }; 200 + 201 + /* madvise provides a way to tell the kernel in case a buffers contents 202 + * can be discarded under memory pressure, which is useful for userspace 203 + * bo cache where we want to optimistically hold on to buffer allocate 204 + * and potential mmap, but allow the pages to be discarded under memory 205 + * pressure. 206 + * 207 + * Typical usage would involve madvise(DONTNEED) when buffer enters BO 208 + * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache. 209 + * In the WILLNEED case, 'retained' indicates to userspace whether the 210 + * backing pages still exist. 211 + */ 212 + #define PANFROST_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */ 213 + #define PANFROST_MADV_DONTNEED 1 /* backing pages not needed */ 214 + 215 + struct drm_panfrost_madvise { 216 + __u32 handle; /* in, GEM handle */ 217 + __u32 madv; /* in, PANFROST_MADV_x */ 218 + __u32 retained; /* out, whether backing store still exists */ 204 219 }; 205 220 206 221 #if defined(__cplusplus)