Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2020-04-14' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.8:

UAPI Changes:

- drm: error out with EBUSY when device has existing master
- drm: rework SET_MASTER and DROP_MASTER perm handling

Cross-subsystem Changes:

- mm: export two symbols from slub/slob
- fbdev: savage: fix -Wextra build warning
- video: omap2: Use scnprintf() for avoiding potential buffer overflow

Core Changes:

- Remove drm_pci.h
- drm_pci_{alloc/free)() are now legacy
- Introduce managed DRM resourcesA
- Allow drivers to subclass struct drm_framebuffer
- Introduce struct drm_afbc_framebuffer and helpers
- fbdev: remove return value from generic fbdev setup
- Introduce simple-encoder helper
- vram-helpers: set fence on plane
- dp_mst: ACT timeout improvements
- dp_mst: Remove drm_dp_mst_has_audio()
- TTM: ttm_trace_dma_{map/unmap}() cleanups
- dma-buf: add flag for PCIP2P support
- EDID: Various improvements
- Encoder: cleanup semantics of possible_clones and possible_crtcs
- VBLANK documentation updates
- Writeback documentation updates

Driver Changes:

- Convert several drivers to i2c_new_client_device()
- Drop explicit drm_mode_config_cleanup() calls from drivers
- Auto-release device structures with drmm_add_final_kfree()
- Init bfdev console after registering DRM device
- Make various .debugfs functions return 0 unconditionally; ignore errors
- video: Use scnprintf() to avoid buffer overflows
- Convert drivers to simple encoders

- drm/amdgpu: note that we can handle peer2peer DMA-buf
- drm/amdgpu: add support for exporting VRAM using DMA-buf v3
- drm/kirin: Revert change to register connectors
- drm/lima: Add optional devfreq and cooling device support
- drm/lima: Various improvements wrt. task handling
- drm/panel: nt39016: Support multiple modes and 50Hz
- drm/panel: Support Leadtek LTK050H3146W
- drm/rockchip: Add support for afbc
- drm/virtio: Various cleanups
- drm/hisilicon/hibmc: Enforce 128-byte stride alignment
- drm/qxl: Fix notify port address of cursor ring buffer
- drm/sun4i: Improvements to format handling
- drm/bridge: dw-hdmi: Various improvements

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20200414090738.GA16827@linux-uq9g

+7064 -2689
+226
Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/bridge/nwl-dsi.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Northwest Logic MIPI-DSI controller on i.MX SoCs 8 + 9 + maintainers: 10 + - Guido Gúnther <agx@sigxcpu.org> 11 + - Robert Chiras <robert.chiras@nxp.com> 12 + 13 + description: | 14 + NWL MIPI-DSI host controller found on i.MX8 platforms. This is a dsi bridge for 15 + the SOCs NWL MIPI-DSI host controller. 16 + 17 + properties: 18 + compatible: 19 + const: fsl,imx8mq-nwl-dsi 20 + 21 + reg: 22 + maxItems: 1 23 + 24 + interrupts: 25 + maxItems: 1 26 + 27 + '#address-cells': 28 + const: 1 29 + 30 + '#size-cells': 31 + const: 0 32 + 33 + clocks: 34 + items: 35 + - description: DSI core clock 36 + - description: RX_ESC clock (used in escape mode) 37 + - description: TX_ESC clock (used in escape mode) 38 + - description: PHY_REF clock 39 + - description: LCDIF clock 40 + 41 + clock-names: 42 + items: 43 + - const: core 44 + - const: rx_esc 45 + - const: tx_esc 46 + - const: phy_ref 47 + - const: lcdif 48 + 49 + mux-controls: 50 + description: 51 + mux controller node to use for operating the input mux 52 + 53 + phys: 54 + maxItems: 1 55 + description: 56 + A phandle to the phy module representing the DPHY 57 + 58 + phy-names: 59 + items: 60 + - const: dphy 61 + 62 + power-domains: 63 + maxItems: 1 64 + 65 + resets: 66 + items: 67 + - description: dsi byte reset line 68 + - description: dsi dpi reset line 69 + - description: dsi esc reset line 70 + - description: dsi pclk reset line 71 + 72 + reset-names: 73 + items: 74 + - const: byte 75 + - const: dpi 76 + - const: esc 77 + - const: pclk 78 + 79 + ports: 80 + type: object 81 + description: 82 + A node containing DSI input & output port nodes with endpoint 83 + definitions as documented in 84 + Documentation/devicetree/bindings/graph.txt. 85 + properties: 86 + port@0: 87 + type: object 88 + description: 89 + Input port node to receive pixel data from the 90 + display controller. Exactly one endpoint must be 91 + specified. 92 + properties: 93 + '#address-cells': 94 + const: 1 95 + 96 + '#size-cells': 97 + const: 0 98 + 99 + endpoint@0: 100 + description: sub-node describing the input from LCDIF 101 + type: object 102 + 103 + endpoint@1: 104 + description: sub-node describing the input from DCSS 105 + type: object 106 + 107 + reg: 108 + const: 0 109 + 110 + required: 111 + - '#address-cells' 112 + - '#size-cells' 113 + - reg 114 + 115 + oneOf: 116 + - required: 117 + - endpoint@0 118 + - required: 119 + - endpoint@1 120 + 121 + additionalProperties: false 122 + 123 + port@1: 124 + type: object 125 + description: 126 + DSI output port node to the panel or the next bridge 127 + in the chain 128 + 129 + '#address-cells': 130 + const: 1 131 + 132 + '#size-cells': 133 + const: 0 134 + 135 + required: 136 + - '#address-cells' 137 + - '#size-cells' 138 + - port@0 139 + - port@1 140 + 141 + additionalProperties: false 142 + 143 + patternProperties: 144 + "^panel@[0-9]+$": 145 + type: object 146 + 147 + required: 148 + - '#address-cells' 149 + - '#size-cells' 150 + - clock-names 151 + - clocks 152 + - compatible 153 + - interrupts 154 + - mux-controls 155 + - phy-names 156 + - phys 157 + - ports 158 + - reg 159 + - reset-names 160 + - resets 161 + 162 + additionalProperties: false 163 + 164 + examples: 165 + - | 166 + 167 + #include <dt-bindings/clock/imx8mq-clock.h> 168 + #include <dt-bindings/interrupt-controller/arm-gic.h> 169 + #include <dt-bindings/reset/imx8mq-reset.h> 170 + 171 + mipi_dsi: mipi_dsi@30a00000 { 172 + #address-cells = <1>; 173 + #size-cells = <0>; 174 + compatible = "fsl,imx8mq-nwl-dsi"; 175 + reg = <0x30A00000 0x300>; 176 + clocks = <&clk IMX8MQ_CLK_DSI_CORE>, 177 + <&clk IMX8MQ_CLK_DSI_AHB>, 178 + <&clk IMX8MQ_CLK_DSI_IPG_DIV>, 179 + <&clk IMX8MQ_CLK_DSI_PHY_REF>, 180 + <&clk IMX8MQ_CLK_LCDIF_PIXEL>; 181 + clock-names = "core", "rx_esc", "tx_esc", "phy_ref", "lcdif"; 182 + interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>; 183 + mux-controls = <&mux 0>; 184 + power-domains = <&pgc_mipi>; 185 + resets = <&src IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N>, 186 + <&src IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N>, 187 + <&src IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N>, 188 + <&src IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N>; 189 + reset-names = "byte", "dpi", "esc", "pclk"; 190 + phys = <&dphy>; 191 + phy-names = "dphy"; 192 + 193 + panel@0 { 194 + #address-cells = <1>; 195 + #size-cells = <0>; 196 + compatible = "rocktech,jh057n00900"; 197 + reg = <0>; 198 + port@0 { 199 + reg = <0>; 200 + panel_in: endpoint { 201 + remote-endpoint = <&mipi_dsi_out>; 202 + }; 203 + }; 204 + }; 205 + 206 + ports { 207 + #address-cells = <1>; 208 + #size-cells = <0>; 209 + 210 + port@0 { 211 + #size-cells = <0>; 212 + #address-cells = <1>; 213 + reg = <0>; 214 + mipi_dsi_in: endpoint@0 { 215 + reg = <0>; 216 + remote-endpoint = <&lcdif_mipi_dsi>; 217 + }; 218 + }; 219 + port@1 { 220 + reg = <1>; 221 + mipi_dsi_out: endpoint { 222 + remote-endpoint = <&panel_in>; 223 + }; 224 + }; 225 + }; 226 + };
+2
Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
··· 24 24 - boe,tv101wum-n53 25 25 # AUO B101UAN08.3 10.1" WUXGA TFT LCD panel 26 26 - auo,b101uan08.3 27 + # BOE TV105WUM-NW0 10.5" WUXGA TFT LCD panel 28 + - boe,tv105wum-nw0 27 29 28 30 reg: 29 31 description: the virtual channel number of a DSI peripheral
+4 -4
Documentation/devicetree/bindings/display/panel/display-timings.yaml
··· 4 4 $id: http://devicetree.org/schemas/display/panel/display-timings.yaml# 5 5 $schema: http://devicetree.org/meta-schemas/core.yaml# 6 6 7 - title: display timing bindings 7 + title: display timings bindings 8 8 9 9 maintainers: 10 10 - Thierry Reding <thierry.reding@gmail.com> ··· 14 14 description: | 15 15 A display panel may be able to handle several display timings, 16 16 with different resolutions. 17 - The display-timings node makes it possible to specify the timing 17 + The display-timings node makes it possible to specify the timings 18 18 and to specify the timing that is native for the display. 19 19 20 20 properties: ··· 25 25 $ref: /schemas/types.yaml#/definitions/phandle 26 26 description: | 27 27 The default display timing is the one specified as native-mode. 28 - If no native-mode is specified then the first node is assumed the 29 - native mode. 28 + If no native-mode is specified then the first node is assumed 29 + to be the native mode. 30 30 31 31 patternProperties: 32 32 "^timing":
-20
Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
··· 1 - Feiyang FY07024DI26A30-D 7" MIPI-DSI LCD Panel 2 - 3 - Required properties: 4 - - compatible: must be "feiyang,fy07024di26a30d" 5 - - reg: DSI virtual channel used by that screen 6 - - avdd-supply: analog regulator dc1 switch 7 - - dvdd-supply: 3v3 digital regulator 8 - - reset-gpios: a GPIO phandle for the reset pin 9 - 10 - Optional properties: 11 - - backlight: phandle for the backlight control. 12 - 13 - panel@0 { 14 - compatible = "feiyang,fy07024di26a30d"; 15 - reg = <0>; 16 - avdd-supply = <&reg_dc1sw>; 17 - dvdd-supply = <&reg_dldo2>; 18 - reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */ 19 - backlight = <&backlight>; 20 - };
+58
Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/panel/feiyang,fy07024di26a30d.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Feiyang FY07024DI26A30-D 7" MIPI-DSI LCD Panel 8 + 9 + maintainers: 10 + - Jagan Teki <jagan@amarulasolutions.com> 11 + 12 + allOf: 13 + - $ref: panel-common.yaml# 14 + 15 + properties: 16 + compatible: 17 + const: feiyang,fy07024di26a30d 18 + 19 + reg: 20 + description: DSI virtual channel used by that screen 21 + maxItems: 1 22 + 23 + avdd-supply: 24 + description: analog regulator dc1 switch 25 + 26 + dvdd-supply: 27 + description: 3v3 digital regulator 28 + 29 + reset-gpios: true 30 + 31 + backlight: true 32 + 33 + required: 34 + - compatible 35 + - reg 36 + - avdd-supply 37 + - dvdd-supply 38 + - reset-gpios 39 + 40 + additionalProperties: false 41 + 42 + examples: 43 + - | 44 + #include <dt-bindings/gpio/gpio.h> 45 + 46 + dsi { 47 + #address-cells = <1>; 48 + #size-cells = <0>; 49 + 50 + panel@0 { 51 + compatible = "feiyang,fy07024di26a30d"; 52 + reg = <0>; 53 + avdd-supply = <&reg_dc1sw>; 54 + dvdd-supply = <&reg_dldo2>; 55 + reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */ 56 + backlight = <&backlight>; 57 + }; 58 + };
+51
Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/panel/leadtek,ltk050h3146w.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Leadtek LTK050H3146W 5.0in 720x1280 DSI panel 8 + 9 + maintainers: 10 + - Heiko Stuebner <heiko.stuebner@theobroma-systems.com> 11 + 12 + allOf: 13 + - $ref: panel-common.yaml# 14 + 15 + properties: 16 + compatible: 17 + enum: 18 + - leadtek,ltk050h3146w 19 + - leadtek,ltk050h3146w-a2 20 + reg: true 21 + backlight: true 22 + reset-gpios: true 23 + iovcc-supply: 24 + description: regulator that supplies the iovcc voltage 25 + vci-supply: 26 + description: regulator that supplies the vci voltage 27 + 28 + required: 29 + - compatible 30 + - reg 31 + - backlight 32 + - iovcc-supply 33 + - vci-supply 34 + 35 + additionalProperties: false 36 + 37 + examples: 38 + - | 39 + dsi { 40 + #address-cells = <1>; 41 + #size-cells = <0>; 42 + panel@0 { 43 + compatible = "leadtek,ltk050h3146w"; 44 + reg = <0>; 45 + backlight = <&backlight>; 46 + iovcc-supply = <&vcc_1v8>; 47 + vci-supply = <&vcc3v3_lcd>; 48 + }; 49 + }; 50 + 51 + ...
-1
Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
··· 37 37 dsi { 38 38 #address-cells = <1>; 39 39 #size-cells = <0>; 40 - reg = <0xff450000 0x1000>; 41 40 42 41 panel@0 { 43 42 compatible = "leadtek,ltk500hd1829";
+2 -2
Documentation/devicetree/bindings/display/panel/panel-common.yaml
··· 63 63 64 64 display-timings: 65 65 description: 66 - Some display panels supports several resolutions with different timing. 66 + Some display panels support several resolutions with different timings. 67 67 The display-timings bindings supports specifying several timings and 68 - optional specify which is the native mode. 68 + optionally specifying which is the native mode. 69 69 allOf: 70 70 - $ref: display-timings.yaml# 71 71
+2
Documentation/devicetree/bindings/display/panel/panel-simple.yaml
··· 227 227 - sharp,ls020b1dd01d 228 228 # Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel 229 229 - shelly,sca07010-bfn-lnn 230 + # Starry KR070PE2T 7" WVGA TFT LCD panel 231 + - starry,kr070pe2t 230 232 # Starry 12.2" (1920x1200 pixels) TFT LCD panel 231 233 - starry,kr122ea0sra 232 234 # Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
-30
Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt
··· 1 - Sitronix ST7701 based LCD panels 2 - 3 - ST7701 designed for small and medium sizes of TFT LCD display, is 4 - capable of supporting up to 480RGBX864 in resolution. It provides 5 - several system interfaces like MIPI/RGB/SPI. 6 - 7 - Techstar TS8550B is 480x854, 2-lane MIPI DSI LCD panel which has 8 - inbuilt ST7701 chip. 9 - 10 - Required properties: 11 - - compatible: must be "sitronix,st7701" and one of 12 - * "techstar,ts8550b" 13 - - reset-gpios: a GPIO phandle for the reset pin 14 - 15 - Required properties for techstar,ts8550b: 16 - - reg: DSI virtual channel used by that screen 17 - - VCC-supply: analog regulator for MIPI circuit 18 - - IOVCC-supply: I/O system regulator 19 - 20 - Optional properties: 21 - - backlight: phandle for the backlight control. 22 - 23 - panel@0 { 24 - compatible = "techstar,ts8550b", "sitronix,st7701"; 25 - reg = <0>; 26 - VCC-supply = <&reg_dldo2>; 27 - IOVCC-supply = <&reg_dldo2>; 28 - reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */ 29 - backlight = <&backlight>; 30 - };
+69
Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/panel/sitronix,st7701.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Sitronix ST7701 based LCD panels 8 + 9 + maintainers: 10 + - Jagan Teki <jagan@amarulasolutions.com> 11 + 12 + description: | 13 + ST7701 designed for small and medium sizes of TFT LCD display, is 14 + capable of supporting up to 480RGBX864 in resolution. It provides 15 + several system interfaces like MIPI/RGB/SPI. 16 + 17 + Techstar TS8550B is 480x854, 2-lane MIPI DSI LCD panel which has 18 + inbuilt ST7701 chip. 19 + 20 + allOf: 21 + - $ref: panel-common.yaml# 22 + 23 + properties: 24 + compatible: 25 + items: 26 + - enum: 27 + - techstar,ts8550b 28 + - const: sitronix,st7701 29 + 30 + reg: 31 + description: DSI virtual channel used by that screen 32 + maxItems: 1 33 + 34 + VCC-supply: 35 + description: analog regulator for MIPI circuit 36 + 37 + IOVCC-supply: 38 + description: I/O system regulator 39 + 40 + reset-gpios: true 41 + 42 + backlight: true 43 + 44 + required: 45 + - compatible 46 + - reg 47 + - VCC-supply 48 + - IOVCC-supply 49 + - reset-gpios 50 + 51 + additionalProperties: false 52 + 53 + examples: 54 + - | 55 + #include <dt-bindings/gpio/gpio.h> 56 + 57 + dsi { 58 + #address-cells = <1>; 59 + #size-cells = <0>; 60 + 61 + panel@0 { 62 + compatible = "techstar,ts8550b", "sitronix,st7701"; 63 + reg = <0>; 64 + VCC-supply = <&reg_dldo2>; 65 + IOVCC-supply = <&reg_dldo2>; 66 + reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */ 67 + backlight = <&backlight>; 68 + }; 69 + };
+57
Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/panel/visionox,rm69299.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Visionox model RM69299 Panels Device Tree Bindings. 8 + 9 + maintainers: 10 + - Harigovindan P <harigovi@codeaurora.org> 11 + 12 + description: | 13 + This binding is for display panels using a Visionox RM692999 panel. 14 + 15 + allOf: 16 + - $ref: panel-common.yaml# 17 + 18 + properties: 19 + compatible: 20 + const: visionox,rm69299-1080p-display 21 + 22 + vdda-supply: 23 + description: | 24 + Phandle of the regulator that provides the vdda supply voltage. 25 + 26 + vdd3p3-supply: 27 + description: | 28 + Phandle of the regulator that provides the vdd3p3 supply voltage. 29 + 30 + port: true 31 + reset-gpios: true 32 + 33 + additionalProperties: false 34 + 35 + required: 36 + - compatible 37 + - vdda-supply 38 + - vdd3p3-supply 39 + - reset-gpios 40 + - port 41 + 42 + examples: 43 + - | 44 + panel { 45 + compatible = "visionox,rm69299-1080p-display"; 46 + 47 + vdda-supply = <&src_pp1800_l8c>; 48 + vdd3p3-supply = <&src_pp2800_l18a>; 49 + 50 + reset-gpios = <&pm6150l_gpio 3 0>; 51 + port { 52 + panel0_in: endpoint { 53 + remote-endpoint = <&dsi0_out>; 54 + }; 55 + }; 56 + }; 57 + ...
-1
Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
··· 37 37 dsi { 38 38 #address-cells = <1>; 39 39 #size-cells = <0>; 40 - reg = <0xff450000 0x1000>; 41 40 42 41 panel@0 { 43 42 compatible = "xinpeng,xpp055c272";
-74
Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
··· 1 - device-tree bindings for rockchip soc display controller (vop) 2 - 3 - VOP (Visual Output Processor) is the Display Controller for the Rockchip 4 - series of SoCs which transfers the image data from a video memory 5 - buffer to an external LCD interface. 6 - 7 - Required properties: 8 - - compatible: value should be one of the following 9 - "rockchip,rk3036-vop"; 10 - "rockchip,rk3126-vop"; 11 - "rockchip,px30-vop-lit"; 12 - "rockchip,px30-vop-big"; 13 - "rockchip,rk3066-vop"; 14 - "rockchip,rk3188-vop"; 15 - "rockchip,rk3288-vop"; 16 - "rockchip,rk3368-vop"; 17 - "rockchip,rk3366-vop"; 18 - "rockchip,rk3399-vop-big"; 19 - "rockchip,rk3399-vop-lit"; 20 - "rockchip,rk3228-vop"; 21 - "rockchip,rk3328-vop"; 22 - 23 - - reg: Must contain one entry corresponding to the base address and length 24 - of the register space. Can optionally contain a second entry 25 - corresponding to the CRTC gamma LUT address. 26 - 27 - - interrupts: should contain a list of all VOP IP block interrupts in the 28 - order: VSYNC, LCD_SYSTEM. The interrupt specifier 29 - format depends on the interrupt controller used. 30 - 31 - - clocks: must include clock specifiers corresponding to entries in the 32 - clock-names property. 33 - 34 - - clock-names: Must contain 35 - aclk_vop: for ddr buffer transfer. 36 - hclk_vop: for ahb bus to R/W the phy regs. 37 - dclk_vop: pixel clock. 38 - 39 - - resets: Must contain an entry for each entry in reset-names. 40 - See ../reset/reset.txt for details. 41 - - reset-names: Must include the following entries: 42 - - axi 43 - - ahb 44 - - dclk 45 - 46 - - iommus: required a iommu node 47 - 48 - - port: A port node with endpoint definitions as defined in 49 - Documentation/devicetree/bindings/media/video-interfaces.txt. 50 - 51 - Example: 52 - SoC specific DT entry: 53 - vopb: vopb@ff930000 { 54 - compatible = "rockchip,rk3288-vop"; 55 - reg = <0x0 0xff930000 0x0 0x19c>, <0x0 0xff931000 0x0 0x1000>; 56 - interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; 57 - clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>; 58 - clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; 59 - resets = <&cru SRST_LCDC1_AXI>, <&cru SRST_LCDC1_AHB>, <&cru SRST_LCDC1_DCLK>; 60 - reset-names = "axi", "ahb", "dclk"; 61 - iommus = <&vopb_mmu>; 62 - vopb_out: port { 63 - #address-cells = <1>; 64 - #size-cells = <0>; 65 - vopb_out_edp: endpoint@0 { 66 - reg = <0>; 67 - remote-endpoint=<&edp_in_vopb>; 68 - }; 69 - vopb_out_hdmi: endpoint@1 { 70 - reg = <1>; 71 - remote-endpoint=<&hdmi_in_vopb>; 72 - }; 73 - }; 74 - };
+134
Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/rockchip/rockchip-vop.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Rockchip SoC display controller (VOP) 8 + 9 + description: 10 + VOP (Video Output Processor) is the display controller for the Rockchip 11 + series of SoCs which transfers the image data from a video memory 12 + buffer to an external LCD interface. 13 + 14 + maintainers: 15 + - Sandy Huang <hjc@rock-chips.com> 16 + - Heiko Stuebner <heiko@sntech.de> 17 + 18 + properties: 19 + compatible: 20 + enum: 21 + - rockchip,px30-vop-big 22 + - rockchip,px30-vop-lit 23 + - rockchip,rk3036-vop 24 + - rockchip,rk3066-vop 25 + - rockchip,rk3126-vop 26 + - rockchip,rk3188-vop 27 + - rockchip,rk3228-vop 28 + - rockchip,rk3288-vop 29 + - rockchip,rk3328-vop 30 + - rockchip,rk3366-vop 31 + - rockchip,rk3368-vop 32 + - rockchip,rk3399-vop-big 33 + - rockchip,rk3399-vop-lit 34 + 35 + reg: 36 + minItems: 1 37 + items: 38 + - description: 39 + Must contain one entry corresponding to the base address and length 40 + of the register space. 41 + - description: 42 + Can optionally contain a second entry corresponding to 43 + the CRTC gamma LUT address. 44 + 45 + interrupts: 46 + maxItems: 1 47 + description: 48 + The VOP interrupt is shared by several interrupt sources, such as 49 + frame start (VSYNC), line flag and other status interrupts. 50 + 51 + clocks: 52 + items: 53 + - description: Clock for ddr buffer transfer. 54 + - description: Pixel clock. 55 + - description: Clock for the ahb bus to R/W the phy regs. 56 + 57 + clock-names: 58 + items: 59 + - const: aclk_vop 60 + - const: dclk_vop 61 + - const: hclk_vop 62 + 63 + resets: 64 + maxItems: 3 65 + 66 + reset-names: 67 + items: 68 + - const: axi 69 + - const: ahb 70 + - const: dclk 71 + 72 + port: 73 + type: object 74 + description: 75 + A port node with endpoint definitions as defined in 76 + Documentation/devicetree/bindings/media/video-interfaces.txt. 77 + 78 + assigned-clocks: 79 + maxItems: 2 80 + 81 + assigned-clock-rates: 82 + maxItems: 2 83 + 84 + iommus: 85 + maxItems: 1 86 + 87 + power-domains: 88 + maxItems: 1 89 + 90 + required: 91 + - compatible 92 + - reg 93 + - interrupts 94 + - clocks 95 + - clock-names 96 + - resets 97 + - reset-names 98 + - port 99 + 100 + additionalProperties: false 101 + 102 + examples: 103 + - | 104 + #include <dt-bindings/clock/rk3288-cru.h> 105 + #include <dt-bindings/interrupt-controller/arm-gic.h> 106 + #include <dt-bindings/power/rk3288-power.h> 107 + vopb: vopb@ff930000 { 108 + compatible = "rockchip,rk3288-vop"; 109 + reg = <0x0 0xff930000 0x0 0x19c>, 110 + <0x0 0xff931000 0x0 0x1000>; 111 + interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; 112 + clocks = <&cru ACLK_VOP0>, 113 + <&cru DCLK_VOP0>, 114 + <&cru HCLK_VOP0>; 115 + clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; 116 + power-domains = <&power RK3288_PD_VIO>; 117 + resets = <&cru SRST_LCDC1_AXI>, 118 + <&cru SRST_LCDC1_AHB>, 119 + <&cru SRST_LCDC1_DCLK>; 120 + reset-names = "axi", "ahb", "dclk"; 121 + iommus = <&vopb_mmu>; 122 + vopb_out: port { 123 + #address-cells = <1>; 124 + #size-cells = <0>; 125 + vopb_out_edp: endpoint@0 { 126 + reg = <0>; 127 + remote-endpoint=<&edp_in_vopb>; 128 + }; 129 + vopb_out_hdmi: endpoint@1 { 130 + reg = <1>; 131 + remote-endpoint=<&hdmi_in_vopb>; 132 + }; 133 + }; 134 + };
+12
Documentation/gpu/drm-internals.rst
··· 132 132 other BARs, so leaving it mapped could cause undesired behaviour like 133 133 hangs or memory corruption. 134 134 135 + Managed Resources 136 + ----------------- 137 + 138 + .. kernel-doc:: drivers/gpu/drm/drm_managed.c 139 + :doc: managed resources 140 + 141 + .. kernel-doc:: drivers/gpu/drm/drm_managed.c 142 + :export: 143 + 144 + .. kernel-doc:: include/drm/drm_managed.h 145 + :internal: 146 + 135 147 Bus-specific Device Registration and PCI Support 136 148 ------------------------------------------------ 137 149
+4 -1
Documentation/gpu/drm-kms.rst
··· 3 3 ========================= 4 4 5 5 Drivers must initialize the mode setting core by calling 6 - drm_mode_config_init() on the DRM device. The function 6 + drmm_mode_config_init() on the DRM device. The function 7 7 initializes the :c:type:`struct drm_device <drm_device>` 8 8 mode_config field and never fails. Once done, mode configuration must 9 9 be setup by initializing the following fields. ··· 396 396 397 397 Writeback Connectors 398 398 -------------------- 399 + 400 + .. kernel-doc:: include/drm/drm_writeback.h 401 + :internal: 399 402 400 403 .. kernel-doc:: drivers/gpu/drm/drm_writeback.c 401 404 :doc: overview
-9
Documentation/gpu/drm-mm.rst
··· 373 373 .. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c 374 374 :export: 375 375 376 - VRAM Helper Function Reference 377 - ============================== 378 - 379 - .. kernel-doc:: drivers/gpu/drm/drm_vram_helper_common.c 380 - :doc: overview 381 - 382 - .. kernel-doc:: include/drm/drm_gem_vram_helper.h 383 - :internal: 384 - 385 376 GEM VRAM Helper Functions Reference 386 377 ----------------------------------- 387 378
+3 -3
MAINTAINERS
··· 5045 5045 F: include/linux/*fence.h 5046 5046 F: include/linux/dma-buf* 5047 5047 F: include/linux/dma-resv.h 5048 - K: dma_(buf|fence|resv) 5048 + K: \bdma_(?:buf|fence|resv)\b 5049 5049 5050 5050 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM 5051 5051 M: Vinod Koul <vkoul@kernel.org> ··· 5300 5300 DRM DRIVER FOR FEIYANG FY07024DI26A30-D MIPI-DSI LCD PANELS 5301 5301 M: Jagan Teki <jagan@amarulasolutions.com> 5302 5302 S: Maintained 5303 - F: Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt 5303 + F: Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml 5304 5304 F: drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c 5305 5305 5306 5306 DRM DRIVER FOR GRAIN MEDIA GM12U320 PROJECTORS ··· 5450 5450 DRM DRIVER FOR SITRONIX ST7701 PANELS 5451 5451 M: Jagan Teki <jagan@amarulasolutions.com> 5452 5452 S: Maintained 5453 - F: Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt 5453 + F: Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml 5454 5454 F: drivers/gpu/drm/panel/panel-sitronix-st7701.c 5455 5455 5456 5456 DRM DRIVER FOR SITRONIX ST7735R PANELS
+2 -1
drivers/dma-buf/Makefile
··· 9 9 10 10 dmabuf_selftests-y := \ 11 11 selftest.o \ 12 - st-dma-fence.o 12 + st-dma-fence.o \ 13 + st-dma-fence-chain.o 13 14 14 15 obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
+2
drivers/dma-buf/dma-buf.c
··· 690 690 691 691 attach->dev = dev; 692 692 attach->dmabuf = dmabuf; 693 + if (importer_ops) 694 + attach->peer2peer = importer_ops->allow_peer2peer; 693 695 attach->importer_ops = importer_ops; 694 696 attach->importer_priv = importer_priv; 695 697
+9 -1
drivers/dma-buf/dma-fence-chain.c
··· 62 62 replacement = NULL; 63 63 } 64 64 65 - tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement); 65 + tmp = cmpxchg((struct dma_fence __force **)&chain->prev, 66 + prev, replacement); 66 67 if (tmp == prev) 67 68 dma_fence_put(tmp); 68 69 else ··· 99 98 return -EINVAL; 100 99 101 100 dma_fence_chain_for_each(*pfence, &chain->base) { 101 + if ((*pfence)->seqno < seqno) { /* already signaled */ 102 + dma_fence_put(*pfence); 103 + *pfence = NULL; 104 + break; 105 + } 106 + 102 107 if ((*pfence)->context != chain->base.context || 103 108 to_dma_fence_chain(*pfence)->prev_seqno < seqno) 104 109 break; ··· 228 221 * @chain: the chain node to initialize 229 222 * @prev: the previous fence 230 223 * @fence: the current fence 224 + * @seqno: the sequence number (syncpt) of the fence within the chain 231 225 * 232 226 * Initialize a new chain node and either start a new chain or add the node to 233 227 * the existing chain of the previous fence.
+1
drivers/dma-buf/selftests.h
··· 11 11 */ 12 12 selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */ 13 13 selftest(dma_fence, dma_fence) 14 + selftest(dma_fence_chain, dma_fence_chain)
+715
drivers/dma-buf/st-dma-fence-chain.c
··· 1 + // SPDX-License-Identifier: MIT 2 + 3 + /* 4 + * Copyright © 2019 Intel Corporation 5 + */ 6 + 7 + #include <linux/delay.h> 8 + #include <linux/dma-fence.h> 9 + #include <linux/dma-fence-chain.h> 10 + #include <linux/kernel.h> 11 + #include <linux/kthread.h> 12 + #include <linux/mm.h> 13 + #include <linux/sched/signal.h> 14 + #include <linux/slab.h> 15 + #include <linux/spinlock.h> 16 + #include <linux/random.h> 17 + 18 + #include "selftest.h" 19 + 20 + #define CHAIN_SZ (4 << 10) 21 + 22 + static struct kmem_cache *slab_fences; 23 + 24 + static inline struct mock_fence { 25 + struct dma_fence base; 26 + spinlock_t lock; 27 + } *to_mock_fence(struct dma_fence *f) { 28 + return container_of(f, struct mock_fence, base); 29 + } 30 + 31 + static const char *mock_name(struct dma_fence *f) 32 + { 33 + return "mock"; 34 + } 35 + 36 + static void mock_fence_release(struct dma_fence *f) 37 + { 38 + kmem_cache_free(slab_fences, to_mock_fence(f)); 39 + } 40 + 41 + static const struct dma_fence_ops mock_ops = { 42 + .get_driver_name = mock_name, 43 + .get_timeline_name = mock_name, 44 + .release = mock_fence_release, 45 + }; 46 + 47 + static struct dma_fence *mock_fence(void) 48 + { 49 + struct mock_fence *f; 50 + 51 + f = kmem_cache_alloc(slab_fences, GFP_KERNEL); 52 + if (!f) 53 + return NULL; 54 + 55 + spin_lock_init(&f->lock); 56 + dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0); 57 + 58 + return &f->base; 59 + } 60 + 61 + static inline struct mock_chain { 62 + struct dma_fence_chain base; 63 + } *to_mock_chain(struct dma_fence *f) { 64 + return container_of(f, struct mock_chain, base.base); 65 + } 66 + 67 + static struct dma_fence *mock_chain(struct dma_fence *prev, 68 + struct dma_fence *fence, 69 + u64 seqno) 70 + { 71 + struct mock_chain *f; 72 + 73 + f = kmalloc(sizeof(*f), GFP_KERNEL); 74 + if (!f) 75 + return NULL; 76 + 77 + dma_fence_chain_init(&f->base, 78 + dma_fence_get(prev), 79 + dma_fence_get(fence), 80 + seqno); 81 + 82 + return &f->base.base; 83 + } 84 + 85 + static int sanitycheck(void *arg) 86 + { 87 + struct dma_fence *f, *chain; 88 + int err = 0; 89 + 90 + f = mock_fence(); 91 + if (!f) 92 + return -ENOMEM; 93 + 94 + chain = mock_chain(NULL, f, 1); 95 + if (!chain) 96 + err = -ENOMEM; 97 + 98 + dma_fence_signal(f); 99 + dma_fence_put(f); 100 + 101 + dma_fence_put(chain); 102 + 103 + return err; 104 + } 105 + 106 + struct fence_chains { 107 + unsigned int chain_length; 108 + struct dma_fence **fences; 109 + struct dma_fence **chains; 110 + 111 + struct dma_fence *tail; 112 + }; 113 + 114 + static uint64_t seqno_inc(unsigned int i) 115 + { 116 + return i + 1; 117 + } 118 + 119 + static int fence_chains_init(struct fence_chains *fc, unsigned int count, 120 + uint64_t (*seqno_fn)(unsigned int)) 121 + { 122 + unsigned int i; 123 + int err = 0; 124 + 125 + fc->chains = kvmalloc_array(count, sizeof(*fc->chains), 126 + GFP_KERNEL | __GFP_ZERO); 127 + if (!fc->chains) 128 + return -ENOMEM; 129 + 130 + fc->fences = kvmalloc_array(count, sizeof(*fc->fences), 131 + GFP_KERNEL | __GFP_ZERO); 132 + if (!fc->fences) { 133 + err = -ENOMEM; 134 + goto err_chains; 135 + } 136 + 137 + fc->tail = NULL; 138 + for (i = 0; i < count; i++) { 139 + fc->fences[i] = mock_fence(); 140 + if (!fc->fences[i]) { 141 + err = -ENOMEM; 142 + goto unwind; 143 + } 144 + 145 + fc->chains[i] = mock_chain(fc->tail, 146 + fc->fences[i], 147 + seqno_fn(i)); 148 + if (!fc->chains[i]) { 149 + err = -ENOMEM; 150 + goto unwind; 151 + } 152 + 153 + fc->tail = fc->chains[i]; 154 + } 155 + 156 + fc->chain_length = i; 157 + return 0; 158 + 159 + unwind: 160 + for (i = 0; i < count; i++) { 161 + dma_fence_put(fc->fences[i]); 162 + dma_fence_put(fc->chains[i]); 163 + } 164 + kvfree(fc->fences); 165 + err_chains: 166 + kvfree(fc->chains); 167 + return err; 168 + } 169 + 170 + static void fence_chains_fini(struct fence_chains *fc) 171 + { 172 + unsigned int i; 173 + 174 + for (i = 0; i < fc->chain_length; i++) { 175 + dma_fence_signal(fc->fences[i]); 176 + dma_fence_put(fc->fences[i]); 177 + } 178 + kvfree(fc->fences); 179 + 180 + for (i = 0; i < fc->chain_length; i++) 181 + dma_fence_put(fc->chains[i]); 182 + kvfree(fc->chains); 183 + } 184 + 185 + static int find_seqno(void *arg) 186 + { 187 + struct fence_chains fc; 188 + struct dma_fence *fence; 189 + int err; 190 + int i; 191 + 192 + err = fence_chains_init(&fc, 64, seqno_inc); 193 + if (err) 194 + return err; 195 + 196 + fence = dma_fence_get(fc.tail); 197 + err = dma_fence_chain_find_seqno(&fence, 0); 198 + dma_fence_put(fence); 199 + if (err) { 200 + pr_err("Reported %d for find_seqno(0)!\n", err); 201 + goto err; 202 + } 203 + 204 + for (i = 0; i < fc.chain_length; i++) { 205 + fence = dma_fence_get(fc.tail); 206 + err = dma_fence_chain_find_seqno(&fence, i + 1); 207 + dma_fence_put(fence); 208 + if (err) { 209 + pr_err("Reported %d for find_seqno(%d:%d)!\n", 210 + err, fc.chain_length + 1, i + 1); 211 + goto err; 212 + } 213 + if (fence != fc.chains[i]) { 214 + pr_err("Incorrect fence reported by find_seqno(%d:%d)\n", 215 + fc.chain_length + 1, i + 1); 216 + err = -EINVAL; 217 + goto err; 218 + } 219 + 220 + dma_fence_get(fence); 221 + err = dma_fence_chain_find_seqno(&fence, i + 1); 222 + dma_fence_put(fence); 223 + if (err) { 224 + pr_err("Error reported for finding self\n"); 225 + goto err; 226 + } 227 + if (fence != fc.chains[i]) { 228 + pr_err("Incorrect fence reported by find self\n"); 229 + err = -EINVAL; 230 + goto err; 231 + } 232 + 233 + dma_fence_get(fence); 234 + err = dma_fence_chain_find_seqno(&fence, i + 2); 235 + dma_fence_put(fence); 236 + if (!err) { 237 + pr_err("Error not reported for future fence: find_seqno(%d:%d)!\n", 238 + i + 1, i + 2); 239 + err = -EINVAL; 240 + goto err; 241 + } 242 + 243 + dma_fence_get(fence); 244 + err = dma_fence_chain_find_seqno(&fence, i); 245 + dma_fence_put(fence); 246 + if (err) { 247 + pr_err("Error reported for previous fence!\n"); 248 + goto err; 249 + } 250 + if (i > 0 && fence != fc.chains[i - 1]) { 251 + pr_err("Incorrect fence reported by find_seqno(%d:%d)\n", 252 + i + 1, i); 253 + err = -EINVAL; 254 + goto err; 255 + } 256 + } 257 + 258 + err: 259 + fence_chains_fini(&fc); 260 + return err; 261 + } 262 + 263 + static int find_signaled(void *arg) 264 + { 265 + struct fence_chains fc; 266 + struct dma_fence *fence; 267 + int err; 268 + 269 + err = fence_chains_init(&fc, 2, seqno_inc); 270 + if (err) 271 + return err; 272 + 273 + dma_fence_signal(fc.fences[0]); 274 + 275 + fence = dma_fence_get(fc.tail); 276 + err = dma_fence_chain_find_seqno(&fence, 1); 277 + dma_fence_put(fence); 278 + if (err) { 279 + pr_err("Reported %d for find_seqno()!\n", err); 280 + goto err; 281 + } 282 + 283 + if (fence && fence != fc.chains[0]) { 284 + pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:1\n", 285 + fence->seqno); 286 + 287 + dma_fence_get(fence); 288 + err = dma_fence_chain_find_seqno(&fence, 1); 289 + dma_fence_put(fence); 290 + if (err) 291 + pr_err("Reported %d for finding self!\n", err); 292 + 293 + err = -EINVAL; 294 + } 295 + 296 + err: 297 + fence_chains_fini(&fc); 298 + return err; 299 + } 300 + 301 + static int find_out_of_order(void *arg) 302 + { 303 + struct fence_chains fc; 304 + struct dma_fence *fence; 305 + int err; 306 + 307 + err = fence_chains_init(&fc, 3, seqno_inc); 308 + if (err) 309 + return err; 310 + 311 + dma_fence_signal(fc.fences[1]); 312 + 313 + fence = dma_fence_get(fc.tail); 314 + err = dma_fence_chain_find_seqno(&fence, 2); 315 + dma_fence_put(fence); 316 + if (err) { 317 + pr_err("Reported %d for find_seqno()!\n", err); 318 + goto err; 319 + } 320 + 321 + if (fence && fence != fc.chains[1]) { 322 + pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:2\n", 323 + fence->seqno); 324 + 325 + dma_fence_get(fence); 326 + err = dma_fence_chain_find_seqno(&fence, 2); 327 + dma_fence_put(fence); 328 + if (err) 329 + pr_err("Reported %d for finding self!\n", err); 330 + 331 + err = -EINVAL; 332 + } 333 + 334 + err: 335 + fence_chains_fini(&fc); 336 + return err; 337 + } 338 + 339 + static uint64_t seqno_inc2(unsigned int i) 340 + { 341 + return 2 * i + 2; 342 + } 343 + 344 + static int find_gap(void *arg) 345 + { 346 + struct fence_chains fc; 347 + struct dma_fence *fence; 348 + int err; 349 + int i; 350 + 351 + err = fence_chains_init(&fc, 64, seqno_inc2); 352 + if (err) 353 + return err; 354 + 355 + for (i = 0; i < fc.chain_length; i++) { 356 + fence = dma_fence_get(fc.tail); 357 + err = dma_fence_chain_find_seqno(&fence, 2 * i + 1); 358 + dma_fence_put(fence); 359 + if (err) { 360 + pr_err("Reported %d for find_seqno(%d:%d)!\n", 361 + err, fc.chain_length + 1, 2 * i + 1); 362 + goto err; 363 + } 364 + if (fence != fc.chains[i]) { 365 + pr_err("Incorrect fence.seqno:%lld reported by find_seqno(%d:%d)\n", 366 + fence->seqno, 367 + fc.chain_length + 1, 368 + 2 * i + 1); 369 + err = -EINVAL; 370 + goto err; 371 + } 372 + 373 + dma_fence_get(fence); 374 + err = dma_fence_chain_find_seqno(&fence, 2 * i + 2); 375 + dma_fence_put(fence); 376 + if (err) { 377 + pr_err("Error reported for finding self\n"); 378 + goto err; 379 + } 380 + if (fence != fc.chains[i]) { 381 + pr_err("Incorrect fence reported by find self\n"); 382 + err = -EINVAL; 383 + goto err; 384 + } 385 + } 386 + 387 + err: 388 + fence_chains_fini(&fc); 389 + return err; 390 + } 391 + 392 + struct find_race { 393 + struct fence_chains fc; 394 + atomic_t children; 395 + }; 396 + 397 + static int __find_race(void *arg) 398 + { 399 + struct find_race *data = arg; 400 + int err = 0; 401 + 402 + while (!kthread_should_stop()) { 403 + struct dma_fence *fence = dma_fence_get(data->fc.tail); 404 + int seqno; 405 + 406 + seqno = prandom_u32_max(data->fc.chain_length) + 1; 407 + 408 + err = dma_fence_chain_find_seqno(&fence, seqno); 409 + if (err) { 410 + pr_err("Failed to find fence seqno:%d\n", 411 + seqno); 412 + dma_fence_put(fence); 413 + break; 414 + } 415 + if (!fence) 416 + goto signal; 417 + 418 + err = dma_fence_chain_find_seqno(&fence, seqno); 419 + if (err) { 420 + pr_err("Reported an invalid fence for find-self:%d\n", 421 + seqno); 422 + dma_fence_put(fence); 423 + break; 424 + } 425 + 426 + if (fence->seqno < seqno) { 427 + pr_err("Reported an earlier fence.seqno:%lld for seqno:%d\n", 428 + fence->seqno, seqno); 429 + err = -EINVAL; 430 + dma_fence_put(fence); 431 + break; 432 + } 433 + 434 + dma_fence_put(fence); 435 + 436 + signal: 437 + seqno = prandom_u32_max(data->fc.chain_length - 1); 438 + dma_fence_signal(data->fc.fences[seqno]); 439 + cond_resched(); 440 + } 441 + 442 + if (atomic_dec_and_test(&data->children)) 443 + wake_up_var(&data->children); 444 + return err; 445 + } 446 + 447 + static int find_race(void *arg) 448 + { 449 + struct find_race data; 450 + int ncpus = num_online_cpus(); 451 + struct task_struct **threads; 452 + unsigned long count; 453 + int err; 454 + int i; 455 + 456 + err = fence_chains_init(&data.fc, CHAIN_SZ, seqno_inc); 457 + if (err) 458 + return err; 459 + 460 + threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL); 461 + if (!threads) { 462 + err = -ENOMEM; 463 + goto err; 464 + } 465 + 466 + atomic_set(&data.children, 0); 467 + for (i = 0; i < ncpus; i++) { 468 + threads[i] = kthread_run(__find_race, &data, "dmabuf/%d", i); 469 + if (IS_ERR(threads[i])) { 470 + ncpus = i; 471 + break; 472 + } 473 + atomic_inc(&data.children); 474 + get_task_struct(threads[i]); 475 + } 476 + 477 + wait_var_event_timeout(&data.children, 478 + !atomic_read(&data.children), 479 + 5 * HZ); 480 + 481 + for (i = 0; i < ncpus; i++) { 482 + int ret; 483 + 484 + ret = kthread_stop(threads[i]); 485 + if (ret && !err) 486 + err = ret; 487 + put_task_struct(threads[i]); 488 + } 489 + kfree(threads); 490 + 491 + count = 0; 492 + for (i = 0; i < data.fc.chain_length; i++) 493 + if (dma_fence_is_signaled(data.fc.fences[i])) 494 + count++; 495 + pr_info("Completed %lu cycles\n", count); 496 + 497 + err: 498 + fence_chains_fini(&data.fc); 499 + return err; 500 + } 501 + 502 + static int signal_forward(void *arg) 503 + { 504 + struct fence_chains fc; 505 + int err; 506 + int i; 507 + 508 + err = fence_chains_init(&fc, 64, seqno_inc); 509 + if (err) 510 + return err; 511 + 512 + for (i = 0; i < fc.chain_length; i++) { 513 + dma_fence_signal(fc.fences[i]); 514 + 515 + if (!dma_fence_is_signaled(fc.chains[i])) { 516 + pr_err("chain[%d] not signaled!\n", i); 517 + err = -EINVAL; 518 + goto err; 519 + } 520 + 521 + if (i + 1 < fc.chain_length && 522 + dma_fence_is_signaled(fc.chains[i + 1])) { 523 + pr_err("chain[%d] is signaled!\n", i); 524 + err = -EINVAL; 525 + goto err; 526 + } 527 + } 528 + 529 + err: 530 + fence_chains_fini(&fc); 531 + return err; 532 + } 533 + 534 + static int signal_backward(void *arg) 535 + { 536 + struct fence_chains fc; 537 + int err; 538 + int i; 539 + 540 + err = fence_chains_init(&fc, 64, seqno_inc); 541 + if (err) 542 + return err; 543 + 544 + for (i = fc.chain_length; i--; ) { 545 + dma_fence_signal(fc.fences[i]); 546 + 547 + if (i > 0 && dma_fence_is_signaled(fc.chains[i])) { 548 + pr_err("chain[%d] is signaled!\n", i); 549 + err = -EINVAL; 550 + goto err; 551 + } 552 + } 553 + 554 + for (i = 0; i < fc.chain_length; i++) { 555 + if (!dma_fence_is_signaled(fc.chains[i])) { 556 + pr_err("chain[%d] was not signaled!\n", i); 557 + err = -EINVAL; 558 + goto err; 559 + } 560 + } 561 + 562 + err: 563 + fence_chains_fini(&fc); 564 + return err; 565 + } 566 + 567 + static int __wait_fence_chains(void *arg) 568 + { 569 + struct fence_chains *fc = arg; 570 + 571 + if (dma_fence_wait(fc->tail, false)) 572 + return -EIO; 573 + 574 + return 0; 575 + } 576 + 577 + static int wait_forward(void *arg) 578 + { 579 + struct fence_chains fc; 580 + struct task_struct *tsk; 581 + int err; 582 + int i; 583 + 584 + err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc); 585 + if (err) 586 + return err; 587 + 588 + tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait"); 589 + if (IS_ERR(tsk)) { 590 + err = PTR_ERR(tsk); 591 + goto err; 592 + } 593 + get_task_struct(tsk); 594 + yield_to(tsk, true); 595 + 596 + for (i = 0; i < fc.chain_length; i++) 597 + dma_fence_signal(fc.fences[i]); 598 + 599 + err = kthread_stop(tsk); 600 + put_task_struct(tsk); 601 + 602 + err: 603 + fence_chains_fini(&fc); 604 + return err; 605 + } 606 + 607 + static int wait_backward(void *arg) 608 + { 609 + struct fence_chains fc; 610 + struct task_struct *tsk; 611 + int err; 612 + int i; 613 + 614 + err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc); 615 + if (err) 616 + return err; 617 + 618 + tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait"); 619 + if (IS_ERR(tsk)) { 620 + err = PTR_ERR(tsk); 621 + goto err; 622 + } 623 + get_task_struct(tsk); 624 + yield_to(tsk, true); 625 + 626 + for (i = fc.chain_length; i--; ) 627 + dma_fence_signal(fc.fences[i]); 628 + 629 + err = kthread_stop(tsk); 630 + put_task_struct(tsk); 631 + 632 + err: 633 + fence_chains_fini(&fc); 634 + return err; 635 + } 636 + 637 + static void randomise_fences(struct fence_chains *fc) 638 + { 639 + unsigned int count = fc->chain_length; 640 + 641 + /* Fisher-Yates shuffle courtesy of Knuth */ 642 + while (--count) { 643 + unsigned int swp; 644 + 645 + swp = prandom_u32_max(count + 1); 646 + if (swp == count) 647 + continue; 648 + 649 + swap(fc->fences[count], fc->fences[swp]); 650 + } 651 + } 652 + 653 + static int wait_random(void *arg) 654 + { 655 + struct fence_chains fc; 656 + struct task_struct *tsk; 657 + int err; 658 + int i; 659 + 660 + err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc); 661 + if (err) 662 + return err; 663 + 664 + randomise_fences(&fc); 665 + 666 + tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait"); 667 + if (IS_ERR(tsk)) { 668 + err = PTR_ERR(tsk); 669 + goto err; 670 + } 671 + get_task_struct(tsk); 672 + yield_to(tsk, true); 673 + 674 + for (i = 0; i < fc.chain_length; i++) 675 + dma_fence_signal(fc.fences[i]); 676 + 677 + err = kthread_stop(tsk); 678 + put_task_struct(tsk); 679 + 680 + err: 681 + fence_chains_fini(&fc); 682 + return err; 683 + } 684 + 685 + int dma_fence_chain(void) 686 + { 687 + static const struct subtest tests[] = { 688 + SUBTEST(sanitycheck), 689 + SUBTEST(find_seqno), 690 + SUBTEST(find_signaled), 691 + SUBTEST(find_out_of_order), 692 + SUBTEST(find_gap), 693 + SUBTEST(find_race), 694 + SUBTEST(signal_forward), 695 + SUBTEST(signal_backward), 696 + SUBTEST(wait_forward), 697 + SUBTEST(wait_backward), 698 + SUBTEST(wait_random), 699 + }; 700 + int ret; 701 + 702 + pr_info("sizeof(dma_fence_chain)=%zu\n", 703 + sizeof(struct dma_fence_chain)); 704 + 705 + slab_fences = KMEM_CACHE(mock_fence, 706 + SLAB_TYPESAFE_BY_RCU | 707 + SLAB_HWCACHE_ALIGN); 708 + if (!slab_fences) 709 + return -ENOMEM; 710 + 711 + ret = subtests(tests, NULL); 712 + 713 + kmem_cache_destroy(slab_fences); 714 + return ret; 715 + }
+3 -3
drivers/gpu/drm/Makefile
··· 17 17 drm_plane.o drm_color_mgmt.o drm_print.o \ 18 18 drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \ 19 19 drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \ 20 - drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o 20 + drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \ 21 + drm_managed.o 21 22 22 23 drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o 23 24 drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o ··· 33 32 drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o 34 33 drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 35 34 36 - drm_vram_helper-y := drm_gem_vram_helper.o \ 37 - drm_vram_helper_common.o 35 + drm_vram_helper-y := drm_gem_vram_helper.o 38 36 obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o 39 37 40 38 drm_ttm_helper-y := drm_gem_ttm_helper.o
+47 -12
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 38 38 #include <drm/amdgpu_drm.h> 39 39 #include <linux/dma-buf.h> 40 40 #include <linux/dma-fence-array.h> 41 + #include <linux/pci-p2pdma.h> 41 42 42 43 /** 43 44 * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation ··· 180 179 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 181 180 int r; 182 181 182 + if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0) 183 + attach->peer2peer = false; 184 + 183 185 if (attach->dev->driver == adev->dev->driver) 184 186 return 0; 185 187 ··· 276 272 struct dma_buf *dma_buf = attach->dmabuf; 277 273 struct drm_gem_object *obj = dma_buf->priv; 278 274 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 275 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 279 276 struct sg_table *sgt; 280 277 long r; 281 278 282 279 if (!bo->pin_count) { 283 - /* move buffer into GTT */ 280 + /* move buffer into GTT or VRAM */ 284 281 struct ttm_operation_ctx ctx = { false, false }; 282 + unsigned domains = AMDGPU_GEM_DOMAIN_GTT; 285 283 286 - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 284 + if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && 285 + attach->peer2peer) { 286 + bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 287 + domains |= AMDGPU_GEM_DOMAIN_VRAM; 288 + } 289 + amdgpu_bo_placement_from_domain(bo, domains); 287 290 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 288 291 if (r) 289 292 return ERR_PTR(r); ··· 300 289 return ERR_PTR(-EBUSY); 301 290 } 302 291 303 - sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages); 304 - if (IS_ERR(sgt)) 305 - return sgt; 292 + switch (bo->tbo.mem.mem_type) { 293 + case TTM_PL_TT: 294 + sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, 295 + bo->tbo.num_pages); 296 + if (IS_ERR(sgt)) 297 + return sgt; 306 298 307 - if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, 308 - DMA_ATTR_SKIP_CPU_SYNC)) 309 - goto error_free; 299 + if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, 300 + DMA_ATTR_SKIP_CPU_SYNC)) 301 + goto error_free; 302 + break; 303 + 304 + case TTM_PL_VRAM: 305 + r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev, 306 + dir, &sgt); 307 + if (r) 308 + return ERR_PTR(r); 309 + break; 310 + default: 311 + return ERR_PTR(-EINVAL); 312 + } 310 313 311 314 return sgt; 312 315 313 316 error_free: 314 317 sg_free_table(sgt); 315 318 kfree(sgt); 316 - return ERR_PTR(-ENOMEM); 319 + return ERR_PTR(-EBUSY); 317 320 } 318 321 319 322 /** ··· 343 318 struct sg_table *sgt, 344 319 enum dma_data_direction dir) 345 320 { 346 - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 347 - sg_free_table(sgt); 348 - kfree(sgt); 321 + struct dma_buf *dma_buf = attach->dmabuf; 322 + struct drm_gem_object *obj = dma_buf->priv; 323 + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 324 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 325 + 326 + if (sgt->sgl->page_link) { 327 + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 328 + sg_free_table(sgt); 329 + kfree(sgt); 330 + } else { 331 + amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt); 332 + } 349 333 } 350 334 351 335 /** ··· 548 514 } 549 515 550 516 static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = { 517 + .allow_peer2peer = true, 551 518 .move_notify = amdgpu_dma_buf_move_notify 552 519 }; 553 520
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
··· 856 856 const char *name = pp_lib_thermal_controller_names[controller->ucType]; 857 857 info.addr = controller->ucI2cAddress >> 1; 858 858 strlcpy(info.type, name, sizeof(info.type)); 859 - i2c_new_device(&adev->pm.i2c_bus->adapter, &info); 859 + i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); 860 860 } 861 861 } else { 862 862 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 29 29 #include <linux/module.h> 30 30 #include <linux/pagemap.h> 31 31 #include <linux/pci.h> 32 + #include <linux/dma-buf.h> 32 33 33 34 #include <drm/amdgpu_drm.h> 34 35 #include <drm/drm_debugfs.h> ··· 855 854 attachment = READ_ONCE(bo->tbo.base.import_attach); 856 855 857 856 if (attachment) 858 - seq_printf(m, " imported from %p", dma_buf); 857 + seq_printf(m, " imported from %p%s", dma_buf, 858 + attachment->peer2peer ? " P2P" : ""); 859 859 else if (dma_buf) 860 860 seq_printf(m, " exported as %p", dma_buf); 861 861
+11 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 24 24 #ifndef __AMDGPU_TTM_H__ 25 25 #define __AMDGPU_TTM_H__ 26 26 27 - #include "amdgpu.h" 27 + #include <linux/dma-direction.h> 28 28 #include <drm/gpu_scheduler.h> 29 + #include "amdgpu.h" 29 30 30 31 #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) 31 32 #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) ··· 75 74 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); 76 75 77 76 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); 77 + int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, 78 + struct ttm_mem_reg *mem, 79 + struct device *dev, 80 + enum dma_data_direction dir, 81 + struct sg_table **sgt); 82 + void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev, 83 + struct device *dev, 84 + enum dma_data_direction dir, 85 + struct sg_table *sgt); 78 86 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); 79 87 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); 80 88
+99
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 22 22 * Authors: Christian König 23 23 */ 24 24 25 + #include <linux/dma-mapping.h> 25 26 #include "amdgpu.h" 26 27 #include "amdgpu_vm.h" 27 28 #include "amdgpu_atomfirmware.h" ··· 457 456 458 457 kvfree(mem->mm_node); 459 458 mem->mm_node = NULL; 459 + } 460 + 461 + /** 462 + * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table 463 + * 464 + * @adev: amdgpu device pointer 465 + * @mem: TTM memory object 466 + * @dev: the other device 467 + * @dir: dma direction 468 + * @sgt: resulting sg table 469 + * 470 + * Allocate and fill a sg table from a VRAM allocation. 471 + */ 472 + int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, 473 + struct ttm_mem_reg *mem, 474 + struct device *dev, 475 + enum dma_data_direction dir, 476 + struct sg_table **sgt) 477 + { 478 + struct drm_mm_node *node; 479 + struct scatterlist *sg; 480 + int num_entries = 0; 481 + unsigned int pages; 482 + int i, r; 483 + 484 + *sgt = kmalloc(sizeof(*sg), GFP_KERNEL); 485 + if (!*sgt) 486 + return -ENOMEM; 487 + 488 + for (pages = mem->num_pages, node = mem->mm_node; 489 + pages; pages -= node->size, ++node) 490 + ++num_entries; 491 + 492 + r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL); 493 + if (r) 494 + goto error_free; 495 + 496 + for_each_sg((*sgt)->sgl, sg, num_entries, i) 497 + sg->length = 0; 498 + 499 + node = mem->mm_node; 500 + for_each_sg((*sgt)->sgl, sg, num_entries, i) { 501 + phys_addr_t phys = (node->start << PAGE_SHIFT) + 502 + adev->gmc.aper_base; 503 + size_t size = node->size << PAGE_SHIFT; 504 + dma_addr_t addr; 505 + 506 + ++node; 507 + addr = dma_map_resource(dev, phys, size, dir, 508 + DMA_ATTR_SKIP_CPU_SYNC); 509 + r = dma_mapping_error(dev, addr); 510 + if (r) 511 + goto error_unmap; 512 + 513 + sg_set_page(sg, NULL, size, 0); 514 + sg_dma_address(sg) = addr; 515 + sg_dma_len(sg) = size; 516 + } 517 + return 0; 518 + 519 + error_unmap: 520 + for_each_sg((*sgt)->sgl, sg, num_entries, i) { 521 + if (!sg->length) 522 + continue; 523 + 524 + dma_unmap_resource(dev, sg->dma_address, 525 + sg->length, dir, 526 + DMA_ATTR_SKIP_CPU_SYNC); 527 + } 528 + sg_free_table(*sgt); 529 + 530 + error_free: 531 + kfree(*sgt); 532 + return r; 533 + } 534 + 535 + /** 536 + * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table 537 + * 538 + * @adev: amdgpu device pointer 539 + * @sgt: sg table to free 540 + * 541 + * Free a previously allocate sg table. 542 + */ 543 + void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev, 544 + struct device *dev, 545 + enum dma_data_direction dir, 546 + struct sg_table *sgt) 547 + { 548 + struct scatterlist *sg; 549 + int i; 550 + 551 + for_each_sg(sgt->sgl, sg, sgt->nents, i) 552 + dma_unmap_resource(dev, sg->dma_address, 553 + sg->length, dir, 554 + DMA_ATTR_SKIP_CPU_SYNC); 555 + sg_free_table(sgt); 556 + kfree(sgt); 460 557 } 461 558 462 559 /**
+12 -33
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 136 136 static void 137 137 dm_dp_mst_connector_destroy(struct drm_connector *connector) 138 138 { 139 - struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 140 - struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; 139 + struct amdgpu_dm_connector *aconnector = 140 + to_amdgpu_dm_connector(connector); 141 + struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder; 141 142 142 - kfree(amdgpu_dm_connector->edid); 143 - amdgpu_dm_connector->edid = NULL; 143 + if (aconnector->dc_sink) { 144 + dc_link_remove_remote_sink(aconnector->dc_link, 145 + aconnector->dc_sink); 146 + dc_sink_release(aconnector->dc_sink); 147 + } 148 + 149 + kfree(aconnector->edid); 144 150 145 151 drm_encoder_cleanup(&amdgpu_encoder->base); 146 152 kfree(amdgpu_encoder); 147 153 drm_connector_cleanup(connector); 148 - drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port); 149 - kfree(amdgpu_dm_connector); 154 + drm_dp_mst_put_port_malloc(aconnector->port); 155 + kfree(aconnector); 150 156 } 151 157 152 158 static int ··· 441 435 */ 442 436 amdgpu_dm_connector_funcs_reset(connector); 443 437 444 - DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", 445 - aconnector, connector->base.id, aconnector->mst_port); 446 - 447 438 drm_dp_mst_get_port_malloc(port); 448 - 449 - DRM_DEBUG_KMS(":%d\n", connector->base.id); 450 439 451 440 return connector; 452 441 } 453 442 454 - static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, 455 - struct drm_connector *connector) 456 - { 457 - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 458 - 459 - DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", 460 - aconnector, connector->base.id, aconnector->mst_port); 461 - 462 - if (aconnector->dc_sink) { 463 - amdgpu_dm_update_freesync_caps(connector, NULL); 464 - dc_link_remove_remote_sink(aconnector->dc_link, 465 - aconnector->dc_sink); 466 - dc_sink_release(aconnector->dc_sink); 467 - aconnector->dc_sink = NULL; 468 - aconnector->dc_link->cur_link_settings.lane_count = 0; 469 - } 470 - 471 - drm_connector_unregister(connector); 472 - drm_connector_put(connector); 473 - } 474 - 475 443 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { 476 444 .add_connector = dm_dp_add_mst_connector, 477 - .destroy_connector = dm_dp_destroy_mst_connector, 478 445 }; 479 446 480 447 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+4 -3
drivers/gpu/drm/arc/arcpgu_drv.c
··· 137 137 { "clocks", arcpgu_show_pxlclock, 0 }, 138 138 }; 139 139 140 - static int arcpgu_debugfs_init(struct drm_minor *minor) 140 + static void arcpgu_debugfs_init(struct drm_minor *minor) 141 141 { 142 - return drm_debugfs_create_files(arcpgu_debugfs_list, 143 - ARRAY_SIZE(arcpgu_debugfs_list), minor->debugfs_root, minor); 142 + drm_debugfs_create_files(arcpgu_debugfs_list, 143 + ARRAY_SIZE(arcpgu_debugfs_list), 144 + minor->debugfs_root, minor); 144 145 } 145 146 #endif 146 147
+2
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
··· 14 14 #include <drm/drm_gem_cma_helper.h> 15 15 #include <drm/drm_gem_framebuffer_helper.h> 16 16 #include <drm/drm_irq.h> 17 + #include <drm/drm_managed.h> 17 18 #include <drm/drm_probe_helper.h> 18 19 #include <drm/drm_vblank.h> 19 20 ··· 272 271 err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev); 273 272 if (err) 274 273 goto free_kms; 274 + drmm_add_final_kfree(drm, kms); 275 275 276 276 drm->dev_private = mdev; 277 277
+4 -3
drivers/gpu/drm/arm/hdlcd_drv.c
··· 224 224 { "clocks", hdlcd_show_pxlclock, 0 }, 225 225 }; 226 226 227 - static int hdlcd_debugfs_init(struct drm_minor *minor) 227 + static void hdlcd_debugfs_init(struct drm_minor *minor) 228 228 { 229 - return drm_debugfs_create_files(hdlcd_debugfs_list, 230 - ARRAY_SIZE(hdlcd_debugfs_list), minor->debugfs_root, minor); 229 + drm_debugfs_create_files(hdlcd_debugfs_list, 230 + ARRAY_SIZE(hdlcd_debugfs_list), 231 + minor->debugfs_root, minor); 231 232 } 232 233 #endif 233 234
+1 -2
drivers/gpu/drm/arm/malidp_drv.c
··· 548 548 .release = single_release, 549 549 }; 550 550 551 - static int malidp_debugfs_init(struct drm_minor *minor) 551 + static void malidp_debugfs_init(struct drm_minor *minor) 552 552 { 553 553 struct malidp_drm *malidp = minor->dev->dev_private; 554 554 ··· 557 557 spin_lock_init(&malidp->errors_lock); 558 558 debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root, 559 559 minor->dev, &malidp_debugfs_fops); 560 - return 0; 561 560 } 562 561 563 562 #endif //CONFIG_DEBUG_FS
+2
drivers/gpu/drm/armada/armada_drv.c
··· 12 12 #include <drm/drm_atomic_helper.h> 13 13 #include <drm/drm_drv.h> 14 14 #include <drm/drm_ioctl.h> 15 + #include <drm/drm_managed.h> 15 16 #include <drm/drm_prime.h> 16 17 #include <drm/drm_probe_helper.h> 17 18 #include <drm/drm_fb_helper.h> ··· 104 103 kfree(priv); 105 104 return ret; 106 105 } 106 + drmm_add_final_kfree(&priv->drm, priv); 107 107 108 108 /* Remove early framebuffers */ 109 109 ret = drm_fb_helper_remove_conflicting_framebuffers(NULL,
+3
drivers/gpu/drm/ast/ast_drv.c
··· 32 32 33 33 #include <drm/drm_crtc_helper.h> 34 34 #include <drm/drm_drv.h> 35 + #include <drm/drm_fb_helper.h> 35 36 #include <drm/drm_gem_vram_helper.h> 36 37 #include <drm/drm_probe_helper.h> 37 38 ··· 111 110 ret = drm_dev_register(dev, ent->driver_data); 112 111 if (ret) 113 112 goto err_ast_driver_unload; 113 + 114 + drm_fbdev_generic_setup(dev, 32); 114 115 115 116 return 0; 116 117
-5
drivers/gpu/drm/ast/ast_main.c
··· 30 30 31 31 #include <drm/drm_atomic_helper.h> 32 32 #include <drm/drm_crtc_helper.h> 33 - #include <drm/drm_fb_helper.h> 34 33 #include <drm/drm_gem.h> 35 34 #include <drm/drm_gem_framebuffer_helper.h> 36 35 #include <drm/drm_gem_vram_helper.h> ··· 510 511 goto out_free; 511 512 512 513 drm_mode_config_reset(dev); 513 - 514 - ret = drm_fbdev_generic_setup(dev, 32); 515 - if (ret) 516 - goto out_free; 517 514 518 515 return 0; 519 516 out_free:
+4 -8
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
··· 11 11 #include <linux/media-bus-format.h> 12 12 #include <linux/of_graph.h> 13 13 14 + #include <drm/drm_bridge.h> 14 15 #include <drm/drm_encoder.h> 15 16 #include <drm/drm_of.h> 16 - #include <drm/drm_bridge.h> 17 + #include <drm/drm_simple_kms_helper.h> 17 18 18 19 #include "atmel_hlcdc_dc.h" 19 20 20 21 struct atmel_hlcdc_rgb_output { 21 22 struct drm_encoder encoder; 22 23 int bus_fmt; 23 - }; 24 - 25 - static const struct drm_encoder_funcs atmel_hlcdc_panel_encoder_funcs = { 26 - .destroy = drm_encoder_cleanup, 27 24 }; 28 25 29 26 static struct atmel_hlcdc_rgb_output * ··· 95 98 return -EINVAL; 96 99 } 97 100 98 - ret = drm_encoder_init(dev, &output->encoder, 99 - &atmel_hlcdc_panel_encoder_funcs, 100 - DRM_MODE_ENCODER_NONE, NULL); 101 + ret = drm_simple_encoder_init(dev, &output->encoder, 102 + DRM_MODE_ENCODER_NONE); 101 103 if (ret) 102 104 return ret; 103 105
-1
drivers/gpu/drm/bochs/bochs.h
··· 92 92 93 93 /* bochs_kms.c */ 94 94 int bochs_kms_init(struct bochs_device *bochs); 95 - void bochs_kms_fini(struct bochs_device *bochs); 96 95 97 96 /* bochs_fbdev.c */ 98 97 extern const struct drm_mode_config_funcs bochs_mode_funcs;
+2 -4
drivers/gpu/drm/bochs/bochs_drv.c
··· 7 7 8 8 #include <drm/drm_drv.h> 9 9 #include <drm/drm_atomic_helper.h> 10 + #include <drm/drm_managed.h> 10 11 11 12 #include "bochs.h" 12 13 ··· 22 21 { 23 22 struct bochs_device *bochs = dev->dev_private; 24 23 25 - bochs_kms_fini(bochs); 26 24 bochs_mm_fini(bochs); 27 - kfree(bochs); 28 - dev->dev_private = NULL; 29 25 } 30 26 31 27 static int bochs_load(struct drm_device *dev) ··· 30 32 struct bochs_device *bochs; 31 33 int ret; 32 34 33 - bochs = kzalloc(sizeof(*bochs), GFP_KERNEL); 35 + bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL); 34 36 if (bochs == NULL) 35 37 return -ENOMEM; 36 38 dev->dev_private = bochs;
+5 -10
drivers/gpu/drm/bochs/bochs_kms.c
··· 134 134 135 135 int bochs_kms_init(struct bochs_device *bochs) 136 136 { 137 - drm_mode_config_init(bochs->dev); 137 + int ret; 138 + 139 + ret = drmm_mode_config_init(bochs->dev); 140 + if (ret) 141 + return ret; 138 142 139 143 bochs->dev->mode_config.max_width = 8192; 140 144 bochs->dev->mode_config.max_height = 8192; ··· 163 159 drm_mode_config_reset(bochs->dev); 164 160 165 161 return 0; 166 - } 167 - 168 - void bochs_kms_fini(struct bochs_device *bochs) 169 - { 170 - if (!bochs->dev->mode_config.num_connector) 171 - return; 172 - 173 - drm_atomic_helper_shutdown(bochs->dev); 174 - drm_mode_config_cleanup(bochs->dev); 175 162 }
+16
drivers/gpu/drm/bridge/Kconfig
··· 58 58 to DP++. This is used with the i.MX6 imx-ldb 59 59 driver. You are likely to say N here. 60 60 61 + config DRM_NWL_MIPI_DSI 62 + tristate "Northwest Logic MIPI DSI Host controller" 63 + depends on DRM 64 + depends on COMMON_CLK 65 + depends on OF && HAS_IOMEM 66 + select DRM_KMS_HELPER 67 + select DRM_MIPI_DSI 68 + select DRM_PANEL_BRIDGE 69 + select GENERIC_PHY_MIPI_DPHY 70 + select MFD_SYSCON 71 + select MULTIPLEXER 72 + select REGMAP_MMIO 73 + help 74 + This enables the Northwest Logic MIPI DSI Host controller as 75 + for example found on NXP's i.MX8 Processors. 76 + 61 77 config DRM_NXP_PTN3460 62 78 tristate "NXP PTN3460 DP/LVDS bridge" 63 79 depends on OF
+1
drivers/gpu/drm/bridge/Makefile
··· 18 18 obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o 19 19 obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o 20 20 obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o 21 + obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o 21 22 22 23 obj-y += analogix/ 23 24 obj-y += synopsys/
+1213
drivers/gpu/drm/bridge/nwl-dsi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * i.MX8 NWL MIPI DSI host driver 4 + * 5 + * Copyright (C) 2017 NXP 6 + * Copyright (C) 2020 Purism SPC 7 + */ 8 + 9 + #include <linux/bitfield.h> 10 + #include <linux/clk.h> 11 + #include <linux/irq.h> 12 + #include <linux/math64.h> 13 + #include <linux/mfd/syscon.h> 14 + #include <linux/module.h> 15 + #include <linux/mux/consumer.h> 16 + #include <linux/of.h> 17 + #include <linux/of_platform.h> 18 + #include <linux/phy/phy.h> 19 + #include <linux/regmap.h> 20 + #include <linux/reset.h> 21 + #include <linux/sys_soc.h> 22 + #include <linux/time64.h> 23 + 24 + #include <drm/drm_bridge.h> 25 + #include <drm/drm_mipi_dsi.h> 26 + #include <drm/drm_of.h> 27 + #include <drm/drm_panel.h> 28 + #include <drm/drm_print.h> 29 + 30 + #include <video/mipi_display.h> 31 + 32 + #include "nwl-dsi.h" 33 + 34 + #define DRV_NAME "nwl-dsi" 35 + 36 + /* i.MX8 NWL quirks */ 37 + /* i.MX8MQ errata E11418 */ 38 + #define E11418_HS_MODE_QUIRK BIT(0) 39 + 40 + #define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500) 41 + 42 + enum transfer_direction { 43 + DSI_PACKET_SEND, 44 + DSI_PACKET_RECEIVE, 45 + }; 46 + 47 + #define NWL_DSI_ENDPOINT_LCDIF 0 48 + #define NWL_DSI_ENDPOINT_DCSS 1 49 + 50 + struct nwl_dsi_plat_clk_config { 51 + const char *id; 52 + struct clk *clk; 53 + bool present; 54 + }; 55 + 56 + struct nwl_dsi_transfer { 57 + const struct mipi_dsi_msg *msg; 58 + struct mipi_dsi_packet packet; 59 + struct completion completed; 60 + 61 + int status; /* status of transmission */ 62 + enum transfer_direction direction; 63 + bool need_bta; 64 + u8 cmd; 65 + u16 rx_word_count; 66 + size_t tx_len; /* in bytes */ 67 + size_t rx_len; /* in bytes */ 68 + }; 69 + 70 + struct nwl_dsi { 71 + struct drm_bridge bridge; 72 + struct mipi_dsi_host dsi_host; 73 + struct drm_bridge *panel_bridge; 74 + struct device *dev; 75 + struct phy *phy; 76 + union phy_configure_opts phy_cfg; 77 + unsigned int quirks; 78 + 79 + struct regmap *regmap; 80 + int irq; 81 + /* 82 + * The DSI host controller needs this reset sequence according to NWL: 83 + * 1. Deassert pclk reset to get access to DSI regs 84 + * 2. Configure DSI Host and DPHY and enable DPHY 85 + * 3. Deassert ESC and BYTE resets to allow host TX operations) 86 + * 4. Send DSI cmds to configure peripheral (handled by panel drv) 87 + * 5. Deassert DPI reset so DPI receives pixels and starts sending 88 + * DSI data 89 + * 90 + * TODO: Since panel_bridges do their DSI setup in enable we 91 + * currently have 4. and 5. swapped. 92 + */ 93 + struct reset_control *rst_byte; 94 + struct reset_control *rst_esc; 95 + struct reset_control *rst_dpi; 96 + struct reset_control *rst_pclk; 97 + struct mux_control *mux; 98 + 99 + /* DSI clocks */ 100 + struct clk *phy_ref_clk; 101 + struct clk *rx_esc_clk; 102 + struct clk *tx_esc_clk; 103 + struct clk *core_clk; 104 + /* 105 + * hardware bug: the i.MX8MQ needs this clock on during reset 106 + * even when not using LCDIF. 107 + */ 108 + struct clk *lcdif_clk; 109 + 110 + /* dsi lanes */ 111 + u32 lanes; 112 + enum mipi_dsi_pixel_format format; 113 + struct drm_display_mode mode; 114 + unsigned long dsi_mode_flags; 115 + int error; 116 + 117 + struct nwl_dsi_transfer *xfer; 118 + }; 119 + 120 + static const struct regmap_config nwl_dsi_regmap_config = { 121 + .reg_bits = 16, 122 + .val_bits = 32, 123 + .reg_stride = 4, 124 + .max_register = NWL_DSI_IRQ_MASK2, 125 + .name = DRV_NAME, 126 + }; 127 + 128 + static inline struct nwl_dsi *bridge_to_dsi(struct drm_bridge *bridge) 129 + { 130 + return container_of(bridge, struct nwl_dsi, bridge); 131 + } 132 + 133 + static int nwl_dsi_clear_error(struct nwl_dsi *dsi) 134 + { 135 + int ret = dsi->error; 136 + 137 + dsi->error = 0; 138 + return ret; 139 + } 140 + 141 + static void nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val) 142 + { 143 + int ret; 144 + 145 + if (dsi->error) 146 + return; 147 + 148 + ret = regmap_write(dsi->regmap, reg, val); 149 + if (ret < 0) { 150 + DRM_DEV_ERROR(dsi->dev, 151 + "Failed to write NWL DSI reg 0x%x: %d\n", reg, 152 + ret); 153 + dsi->error = ret; 154 + } 155 + } 156 + 157 + static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg) 158 + { 159 + unsigned int val; 160 + int ret; 161 + 162 + if (dsi->error) 163 + return 0; 164 + 165 + ret = regmap_read(dsi->regmap, reg, &val); 166 + if (ret < 0) { 167 + DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n", 168 + reg, ret); 169 + dsi->error = ret; 170 + } 171 + return val; 172 + } 173 + 174 + static int nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format) 175 + { 176 + switch (format) { 177 + case MIPI_DSI_FMT_RGB565: 178 + return NWL_DSI_PIXEL_FORMAT_16; 179 + case MIPI_DSI_FMT_RGB666: 180 + return NWL_DSI_PIXEL_FORMAT_18L; 181 + case MIPI_DSI_FMT_RGB666_PACKED: 182 + return NWL_DSI_PIXEL_FORMAT_18; 183 + case MIPI_DSI_FMT_RGB888: 184 + return NWL_DSI_PIXEL_FORMAT_24; 185 + default: 186 + return -EINVAL; 187 + } 188 + } 189 + 190 + /* 191 + * ps2bc - Picoseconds to byte clock cycles 192 + */ 193 + static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps) 194 + { 195 + u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); 196 + 197 + return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp, 198 + dsi->lanes * 8 * NSEC_PER_SEC); 199 + } 200 + 201 + /* 202 + * ui2bc - UI time periods to byte clock cycles 203 + */ 204 + static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui) 205 + { 206 + u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); 207 + 208 + return DIV64_U64_ROUND_UP(ui * dsi->lanes, 209 + dsi->mode.clock * 1000 * bpp); 210 + } 211 + 212 + /* 213 + * us2bc - micro seconds to lp clock cycles 214 + */ 215 + static u32 us2lp(u32 lp_clk_rate, unsigned long us) 216 + { 217 + return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC); 218 + } 219 + 220 + static int nwl_dsi_config_host(struct nwl_dsi *dsi) 221 + { 222 + u32 cycles; 223 + struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy; 224 + 225 + if (dsi->lanes < 1 || dsi->lanes > 4) 226 + return -EINVAL; 227 + 228 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes); 229 + nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1); 230 + 231 + if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { 232 + nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01); 233 + nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01); 234 + } else { 235 + nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00); 236 + nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00); 237 + } 238 + 239 + /* values in byte clock cycles */ 240 + cycles = ui2bc(dsi, cfg->clk_pre); 241 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles); 242 + nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles); 243 + cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero); 244 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles); 245 + cycles += ui2bc(dsi, cfg->clk_pre); 246 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles); 247 + nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles); 248 + cycles = ps2bc(dsi, cfg->hs_exit); 249 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles); 250 + nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles); 251 + 252 + nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01); 253 + nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00); 254 + nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00); 255 + nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00); 256 + /* In LP clock cycles */ 257 + cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup); 258 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles); 259 + nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles); 260 + 261 + return nwl_dsi_clear_error(dsi); 262 + } 263 + 264 + static int nwl_dsi_config_dpi(struct nwl_dsi *dsi) 265 + { 266 + u32 mode; 267 + int color_format; 268 + bool burst_mode; 269 + int hfront_porch, hback_porch, vfront_porch, vback_porch; 270 + int hsync_len, vsync_len; 271 + 272 + hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay; 273 + hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start; 274 + hback_porch = dsi->mode.htotal - dsi->mode.hsync_end; 275 + 276 + vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay; 277 + vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start; 278 + vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end; 279 + 280 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch); 281 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch); 282 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len); 283 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay); 284 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch); 285 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch); 286 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len); 287 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay); 288 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock); 289 + 290 + color_format = nwl_dsi_get_dpi_pixel_format(dsi->format); 291 + if (color_format < 0) { 292 + DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n", 293 + dsi->format); 294 + return color_format; 295 + } 296 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format); 297 + 298 + nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT); 299 + nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format); 300 + /* 301 + * Adjusting input polarity based on the video mode results in 302 + * a black screen so always pick active low: 303 + */ 304 + nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY, 305 + NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW); 306 + nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY, 307 + NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW); 308 + 309 + burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) && 310 + !(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE); 311 + 312 + if (burst_mode) { 313 + nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE); 314 + nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256); 315 + } else { 316 + mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ? 317 + NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES : 318 + NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS); 319 + nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode); 320 + nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 321 + dsi->mode.hdisplay); 322 + } 323 + 324 + nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch); 325 + nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch); 326 + nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len); 327 + 328 + nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0); 329 + nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1); 330 + nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0); 331 + nwl_dsi_write(dsi, NWL_DSI_VC, 0x0); 332 + 333 + nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay); 334 + nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1); 335 + nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch); 336 + nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch); 337 + 338 + return nwl_dsi_clear_error(dsi); 339 + } 340 + 341 + static int nwl_dsi_init_interrupts(struct nwl_dsi *dsi) 342 + { 343 + u32 irq_enable; 344 + 345 + nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, 0xffffffff); 346 + nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7); 347 + 348 + irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK | 349 + NWL_DSI_RX_PKT_HDR_RCVD_MASK | 350 + NWL_DSI_TX_FIFO_OVFLW_MASK | 351 + NWL_DSI_HS_TX_TIMEOUT_MASK); 352 + 353 + nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable); 354 + 355 + return nwl_dsi_clear_error(dsi); 356 + } 357 + 358 + static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host, 359 + struct mipi_dsi_device *device) 360 + { 361 + struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host); 362 + struct device *dev = dsi->dev; 363 + 364 + DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes, 365 + device->format, device->mode_flags); 366 + 367 + if (device->lanes < 1 || device->lanes > 4) 368 + return -EINVAL; 369 + 370 + dsi->lanes = device->lanes; 371 + dsi->format = device->format; 372 + dsi->dsi_mode_flags = device->mode_flags; 373 + 374 + return 0; 375 + } 376 + 377 + static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status) 378 + { 379 + struct device *dev = dsi->dev; 380 + struct nwl_dsi_transfer *xfer = dsi->xfer; 381 + int err; 382 + u8 *payload = xfer->msg->rx_buf; 383 + u32 val; 384 + u16 word_count; 385 + u8 channel; 386 + u8 data_type; 387 + 388 + xfer->status = 0; 389 + 390 + if (xfer->rx_word_count == 0) { 391 + if (!(status & NWL_DSI_RX_PKT_HDR_RCVD)) 392 + return false; 393 + /* Get the RX header and parse it */ 394 + val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER); 395 + err = nwl_dsi_clear_error(dsi); 396 + if (err) 397 + xfer->status = err; 398 + word_count = NWL_DSI_WC(val); 399 + channel = NWL_DSI_RX_VC(val); 400 + data_type = NWL_DSI_RX_DT(val); 401 + 402 + if (channel != xfer->msg->channel) { 403 + DRM_DEV_ERROR(dev, 404 + "[%02X] Channel mismatch (%u != %u)\n", 405 + xfer->cmd, channel, xfer->msg->channel); 406 + xfer->status = -EINVAL; 407 + return true; 408 + } 409 + 410 + switch (data_type) { 411 + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: 412 + fallthrough; 413 + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: 414 + if (xfer->msg->rx_len > 1) { 415 + /* read second byte */ 416 + payload[1] = word_count >> 8; 417 + ++xfer->rx_len; 418 + } 419 + fallthrough; 420 + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: 421 + fallthrough; 422 + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: 423 + if (xfer->msg->rx_len > 0) { 424 + /* read first byte */ 425 + payload[0] = word_count & 0xff; 426 + ++xfer->rx_len; 427 + } 428 + xfer->status = xfer->rx_len; 429 + return true; 430 + case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: 431 + word_count &= 0xff; 432 + DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n", 433 + xfer->cmd, word_count); 434 + xfer->status = -EPROTO; 435 + return true; 436 + } 437 + 438 + if (word_count > xfer->msg->rx_len) { 439 + DRM_DEV_ERROR(dev, 440 + "[%02X] Receive buffer too small: %zu (< %u)\n", 441 + xfer->cmd, xfer->msg->rx_len, word_count); 442 + xfer->status = -EINVAL; 443 + return true; 444 + } 445 + 446 + xfer->rx_word_count = word_count; 447 + } else { 448 + /* Set word_count from previous header read */ 449 + word_count = xfer->rx_word_count; 450 + } 451 + 452 + /* If RX payload is not yet received, wait for it */ 453 + if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)) 454 + return false; 455 + 456 + /* Read the RX payload */ 457 + while (word_count >= 4) { 458 + val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD); 459 + payload[0] = (val >> 0) & 0xff; 460 + payload[1] = (val >> 8) & 0xff; 461 + payload[2] = (val >> 16) & 0xff; 462 + payload[3] = (val >> 24) & 0xff; 463 + payload += 4; 464 + xfer->rx_len += 4; 465 + word_count -= 4; 466 + } 467 + 468 + if (word_count > 0) { 469 + val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD); 470 + switch (word_count) { 471 + case 3: 472 + payload[2] = (val >> 16) & 0xff; 473 + ++xfer->rx_len; 474 + fallthrough; 475 + case 2: 476 + payload[1] = (val >> 8) & 0xff; 477 + ++xfer->rx_len; 478 + fallthrough; 479 + case 1: 480 + payload[0] = (val >> 0) & 0xff; 481 + ++xfer->rx_len; 482 + break; 483 + } 484 + } 485 + 486 + xfer->status = xfer->rx_len; 487 + err = nwl_dsi_clear_error(dsi); 488 + if (err) 489 + xfer->status = err; 490 + 491 + return true; 492 + } 493 + 494 + static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status) 495 + { 496 + struct nwl_dsi_transfer *xfer = dsi->xfer; 497 + bool end_packet = false; 498 + 499 + if (!xfer) 500 + return; 501 + 502 + if (xfer->direction == DSI_PACKET_SEND && 503 + status & NWL_DSI_TX_PKT_DONE) { 504 + xfer->status = xfer->tx_len; 505 + end_packet = true; 506 + } else if (status & NWL_DSI_DPHY_DIRECTION && 507 + ((status & (NWL_DSI_RX_PKT_HDR_RCVD | 508 + NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) { 509 + end_packet = nwl_dsi_read_packet(dsi, status); 510 + } 511 + 512 + if (end_packet) 513 + complete(&xfer->completed); 514 + } 515 + 516 + static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi) 517 + { 518 + struct nwl_dsi_transfer *xfer = dsi->xfer; 519 + struct mipi_dsi_packet *pkt = &xfer->packet; 520 + const u8 *payload; 521 + size_t length; 522 + u16 word_count; 523 + u8 hs_mode; 524 + u32 val; 525 + u32 hs_workaround = 0; 526 + 527 + /* Send the payload, if any */ 528 + length = pkt->payload_length; 529 + payload = pkt->payload; 530 + 531 + while (length >= 4) { 532 + val = *(u32 *)payload; 533 + hs_workaround |= !(val & 0xFFFF00); 534 + nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val); 535 + payload += 4; 536 + length -= 4; 537 + } 538 + /* Send the rest of the payload */ 539 + val = 0; 540 + switch (length) { 541 + case 3: 542 + val |= payload[2] << 16; 543 + fallthrough; 544 + case 2: 545 + val |= payload[1] << 8; 546 + hs_workaround |= !(val & 0xFFFF00); 547 + fallthrough; 548 + case 1: 549 + val |= payload[0]; 550 + nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val); 551 + break; 552 + } 553 + xfer->tx_len = pkt->payload_length; 554 + 555 + /* 556 + * Send the header 557 + * header[0] = Virtual Channel + Data Type 558 + * header[1] = Word Count LSB (LP) or first param (SP) 559 + * header[2] = Word Count MSB (LP) or second param (SP) 560 + */ 561 + word_count = pkt->header[1] | (pkt->header[2] << 8); 562 + if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) { 563 + DRM_DEV_DEBUG_DRIVER(dsi->dev, 564 + "Using hs mode workaround for cmd 0x%x\n", 565 + xfer->cmd); 566 + hs_mode = 1; 567 + } else { 568 + hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1; 569 + } 570 + val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) | 571 + NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) | 572 + NWL_DSI_BTA_TX(xfer->need_bta); 573 + nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val); 574 + 575 + /* Send packet command */ 576 + nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1); 577 + } 578 + 579 + static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host, 580 + const struct mipi_dsi_msg *msg) 581 + { 582 + struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host); 583 + struct nwl_dsi_transfer xfer; 584 + ssize_t ret = 0; 585 + 586 + /* Create packet to be sent */ 587 + dsi->xfer = &xfer; 588 + ret = mipi_dsi_create_packet(&xfer.packet, msg); 589 + if (ret < 0) { 590 + dsi->xfer = NULL; 591 + return ret; 592 + } 593 + 594 + if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM || 595 + msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM || 596 + msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM || 597 + msg->type & MIPI_DSI_DCS_READ) && 598 + msg->rx_len > 0 && msg->rx_buf) 599 + xfer.direction = DSI_PACKET_RECEIVE; 600 + else 601 + xfer.direction = DSI_PACKET_SEND; 602 + 603 + xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE); 604 + xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0; 605 + xfer.msg = msg; 606 + xfer.status = -ETIMEDOUT; 607 + xfer.rx_word_count = 0; 608 + xfer.rx_len = 0; 609 + xfer.cmd = 0x00; 610 + if (msg->tx_len > 0) 611 + xfer.cmd = ((u8 *)(msg->tx_buf))[0]; 612 + init_completion(&xfer.completed); 613 + 614 + ret = clk_prepare_enable(dsi->rx_esc_clk); 615 + if (ret < 0) { 616 + DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n", 617 + ret); 618 + return ret; 619 + } 620 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n", 621 + clk_get_rate(dsi->rx_esc_clk)); 622 + 623 + /* Initiate the DSI packet transmision */ 624 + nwl_dsi_begin_transmission(dsi); 625 + 626 + if (!wait_for_completion_timeout(&xfer.completed, 627 + NWL_DSI_MIPI_FIFO_TIMEOUT)) { 628 + DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n", 629 + xfer.cmd); 630 + ret = -ETIMEDOUT; 631 + } else { 632 + ret = xfer.status; 633 + } 634 + 635 + clk_disable_unprepare(dsi->rx_esc_clk); 636 + 637 + return ret; 638 + } 639 + 640 + static const struct mipi_dsi_host_ops nwl_dsi_host_ops = { 641 + .attach = nwl_dsi_host_attach, 642 + .transfer = nwl_dsi_host_transfer, 643 + }; 644 + 645 + static irqreturn_t nwl_dsi_irq_handler(int irq, void *data) 646 + { 647 + u32 irq_status; 648 + struct nwl_dsi *dsi = data; 649 + 650 + irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS); 651 + 652 + if (irq_status & NWL_DSI_TX_FIFO_OVFLW) 653 + DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n"); 654 + 655 + if (irq_status & NWL_DSI_HS_TX_TIMEOUT) 656 + DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n"); 657 + 658 + if (irq_status & NWL_DSI_TX_PKT_DONE || 659 + irq_status & NWL_DSI_RX_PKT_HDR_RCVD || 660 + irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD) 661 + nwl_dsi_finish_transmission(dsi, irq_status); 662 + 663 + return IRQ_HANDLED; 664 + } 665 + 666 + static int nwl_dsi_enable(struct nwl_dsi *dsi) 667 + { 668 + struct device *dev = dsi->dev; 669 + union phy_configure_opts *phy_cfg = &dsi->phy_cfg; 670 + int ret; 671 + 672 + if (!dsi->lanes) { 673 + DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes); 674 + return -EINVAL; 675 + } 676 + 677 + ret = phy_init(dsi->phy); 678 + if (ret < 0) { 679 + DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret); 680 + return ret; 681 + } 682 + 683 + ret = phy_configure(dsi->phy, phy_cfg); 684 + if (ret < 0) { 685 + DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret); 686 + goto uninit_phy; 687 + } 688 + 689 + ret = clk_prepare_enable(dsi->tx_esc_clk); 690 + if (ret < 0) { 691 + DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n", 692 + ret); 693 + goto uninit_phy; 694 + } 695 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n", 696 + clk_get_rate(dsi->tx_esc_clk)); 697 + 698 + ret = nwl_dsi_config_host(dsi); 699 + if (ret < 0) { 700 + DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret); 701 + goto disable_clock; 702 + } 703 + 704 + ret = nwl_dsi_config_dpi(dsi); 705 + if (ret < 0) { 706 + DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret); 707 + goto disable_clock; 708 + } 709 + 710 + ret = phy_power_on(dsi->phy); 711 + if (ret < 0) { 712 + DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret); 713 + goto disable_clock; 714 + } 715 + 716 + ret = nwl_dsi_init_interrupts(dsi); 717 + if (ret < 0) 718 + goto power_off_phy; 719 + 720 + return ret; 721 + 722 + power_off_phy: 723 + phy_power_off(dsi->phy); 724 + disable_clock: 725 + clk_disable_unprepare(dsi->tx_esc_clk); 726 + uninit_phy: 727 + phy_exit(dsi->phy); 728 + 729 + return ret; 730 + } 731 + 732 + static int nwl_dsi_disable(struct nwl_dsi *dsi) 733 + { 734 + struct device *dev = dsi->dev; 735 + 736 + DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n"); 737 + 738 + phy_power_off(dsi->phy); 739 + phy_exit(dsi->phy); 740 + 741 + /* Disabling the clock before the phy breaks enabling dsi again */ 742 + clk_disable_unprepare(dsi->tx_esc_clk); 743 + 744 + return 0; 745 + } 746 + 747 + static void nwl_dsi_bridge_disable(struct drm_bridge *bridge) 748 + { 749 + struct nwl_dsi *dsi = bridge_to_dsi(bridge); 750 + int ret; 751 + 752 + nwl_dsi_disable(dsi); 753 + 754 + ret = reset_control_assert(dsi->rst_dpi); 755 + if (ret < 0) { 756 + DRM_DEV_ERROR(dsi->dev, "Failed to assert DPI: %d\n", ret); 757 + return; 758 + } 759 + ret = reset_control_assert(dsi->rst_byte); 760 + if (ret < 0) { 761 + DRM_DEV_ERROR(dsi->dev, "Failed to assert ESC: %d\n", ret); 762 + return; 763 + } 764 + ret = reset_control_assert(dsi->rst_esc); 765 + if (ret < 0) { 766 + DRM_DEV_ERROR(dsi->dev, "Failed to assert BYTE: %d\n", ret); 767 + return; 768 + } 769 + ret = reset_control_assert(dsi->rst_pclk); 770 + if (ret < 0) { 771 + DRM_DEV_ERROR(dsi->dev, "Failed to assert PCLK: %d\n", ret); 772 + return; 773 + } 774 + 775 + clk_disable_unprepare(dsi->core_clk); 776 + clk_disable_unprepare(dsi->lcdif_clk); 777 + 778 + pm_runtime_put(dsi->dev); 779 + } 780 + 781 + static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi, 782 + const struct drm_display_mode *mode, 783 + union phy_configure_opts *phy_opts) 784 + { 785 + unsigned long rate; 786 + int ret; 787 + 788 + if (dsi->lanes < 1 || dsi->lanes > 4) 789 + return -EINVAL; 790 + 791 + /* 792 + * So far the DPHY spec minimal timings work for both mixel 793 + * dphy and nwl dsi host 794 + */ 795 + ret = phy_mipi_dphy_get_default_config(mode->clock * 1000, 796 + mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes, 797 + &phy_opts->mipi_dphy); 798 + if (ret < 0) 799 + return ret; 800 + 801 + rate = clk_get_rate(dsi->tx_esc_clk); 802 + DRM_DEV_DEBUG_DRIVER(dsi->dev, "LP clk is @%lu Hz\n", rate); 803 + phy_opts->mipi_dphy.lp_clk_rate = rate; 804 + 805 + return 0; 806 + } 807 + 808 + static bool nwl_dsi_bridge_mode_fixup(struct drm_bridge *bridge, 809 + const struct drm_display_mode *mode, 810 + struct drm_display_mode *adjusted_mode) 811 + { 812 + /* At least LCDIF + NWL needs active high sync */ 813 + adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); 814 + adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC); 815 + 816 + return true; 817 + } 818 + 819 + static enum drm_mode_status 820 + nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge, 821 + const struct drm_display_mode *mode) 822 + { 823 + struct nwl_dsi *dsi = bridge_to_dsi(bridge); 824 + int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); 825 + 826 + if (mode->clock * bpp > 15000000 * dsi->lanes) 827 + return MODE_CLOCK_HIGH; 828 + 829 + if (mode->clock * bpp < 80000 * dsi->lanes) 830 + return MODE_CLOCK_LOW; 831 + 832 + return MODE_OK; 833 + } 834 + 835 + static void 836 + nwl_dsi_bridge_mode_set(struct drm_bridge *bridge, 837 + const struct drm_display_mode *mode, 838 + const struct drm_display_mode *adjusted_mode) 839 + { 840 + struct nwl_dsi *dsi = bridge_to_dsi(bridge); 841 + struct device *dev = dsi->dev; 842 + union phy_configure_opts new_cfg; 843 + unsigned long phy_ref_rate; 844 + int ret; 845 + 846 + ret = nwl_dsi_get_dphy_params(dsi, adjusted_mode, &new_cfg); 847 + if (ret < 0) 848 + return; 849 + 850 + /* 851 + * If hs clock is unchanged, we're all good - all parameters are 852 + * derived from it atm. 853 + */ 854 + if (new_cfg.mipi_dphy.hs_clk_rate == dsi->phy_cfg.mipi_dphy.hs_clk_rate) 855 + return; 856 + 857 + phy_ref_rate = clk_get_rate(dsi->phy_ref_clk); 858 + DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate); 859 + /* Save the new desired phy config */ 860 + memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg)); 861 + 862 + memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode)); 863 + drm_mode_debug_printmodeline(adjusted_mode); 864 + } 865 + 866 + static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge) 867 + { 868 + struct nwl_dsi *dsi = bridge_to_dsi(bridge); 869 + int ret; 870 + 871 + pm_runtime_get_sync(dsi->dev); 872 + 873 + if (clk_prepare_enable(dsi->lcdif_clk) < 0) 874 + return; 875 + if (clk_prepare_enable(dsi->core_clk) < 0) 876 + return; 877 + 878 + /* Step 1 from DSI reset-out instructions */ 879 + ret = reset_control_deassert(dsi->rst_pclk); 880 + if (ret < 0) { 881 + DRM_DEV_ERROR(dsi->dev, "Failed to deassert PCLK: %d\n", ret); 882 + return; 883 + } 884 + 885 + /* Step 2 from DSI reset-out instructions */ 886 + nwl_dsi_enable(dsi); 887 + 888 + /* Step 3 from DSI reset-out instructions */ 889 + ret = reset_control_deassert(dsi->rst_esc); 890 + if (ret < 0) { 891 + DRM_DEV_ERROR(dsi->dev, "Failed to deassert ESC: %d\n", ret); 892 + return; 893 + } 894 + ret = reset_control_deassert(dsi->rst_byte); 895 + if (ret < 0) { 896 + DRM_DEV_ERROR(dsi->dev, "Failed to deassert BYTE: %d\n", ret); 897 + return; 898 + } 899 + } 900 + 901 + static void nwl_dsi_bridge_enable(struct drm_bridge *bridge) 902 + { 903 + struct nwl_dsi *dsi = bridge_to_dsi(bridge); 904 + int ret; 905 + 906 + /* Step 5 from DSI reset-out instructions */ 907 + ret = reset_control_deassert(dsi->rst_dpi); 908 + if (ret < 0) 909 + DRM_DEV_ERROR(dsi->dev, "Failed to deassert DPI: %d\n", ret); 910 + } 911 + 912 + static int nwl_dsi_bridge_attach(struct drm_bridge *bridge, 913 + enum drm_bridge_attach_flags flags) 914 + { 915 + struct nwl_dsi *dsi = bridge_to_dsi(bridge); 916 + struct drm_bridge *panel_bridge; 917 + struct drm_panel *panel; 918 + int ret; 919 + 920 + if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { 921 + DRM_ERROR("Fix bridge driver to make connector optional!"); 922 + return -EINVAL; 923 + } 924 + 925 + ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel, 926 + &panel_bridge); 927 + if (ret) 928 + return ret; 929 + 930 + if (panel) { 931 + panel_bridge = drm_panel_bridge_add(panel); 932 + if (IS_ERR(panel_bridge)) 933 + return PTR_ERR(panel_bridge); 934 + } 935 + dsi->panel_bridge = panel_bridge; 936 + 937 + if (!dsi->panel_bridge) 938 + return -EPROBE_DEFER; 939 + 940 + return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge, 941 + flags); 942 + } 943 + 944 + static void nwl_dsi_bridge_detach(struct drm_bridge *bridge) 945 + { struct nwl_dsi *dsi = bridge_to_dsi(bridge); 946 + 947 + drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0); 948 + } 949 + 950 + static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = { 951 + .pre_enable = nwl_dsi_bridge_pre_enable, 952 + .enable = nwl_dsi_bridge_enable, 953 + .disable = nwl_dsi_bridge_disable, 954 + .mode_fixup = nwl_dsi_bridge_mode_fixup, 955 + .mode_set = nwl_dsi_bridge_mode_set, 956 + .mode_valid = nwl_dsi_bridge_mode_valid, 957 + .attach = nwl_dsi_bridge_attach, 958 + .detach = nwl_dsi_bridge_detach, 959 + }; 960 + 961 + static int nwl_dsi_parse_dt(struct nwl_dsi *dsi) 962 + { 963 + struct platform_device *pdev = to_platform_device(dsi->dev); 964 + struct clk *clk; 965 + void __iomem *base; 966 + int ret; 967 + 968 + dsi->phy = devm_phy_get(dsi->dev, "dphy"); 969 + if (IS_ERR(dsi->phy)) { 970 + ret = PTR_ERR(dsi->phy); 971 + if (ret != -EPROBE_DEFER) 972 + DRM_DEV_ERROR(dsi->dev, "Could not get PHY: %d\n", ret); 973 + return ret; 974 + } 975 + 976 + clk = devm_clk_get(dsi->dev, "lcdif"); 977 + if (IS_ERR(clk)) { 978 + ret = PTR_ERR(clk); 979 + DRM_DEV_ERROR(dsi->dev, "Failed to get lcdif clock: %d\n", 980 + ret); 981 + return ret; 982 + } 983 + dsi->lcdif_clk = clk; 984 + 985 + clk = devm_clk_get(dsi->dev, "core"); 986 + if (IS_ERR(clk)) { 987 + ret = PTR_ERR(clk); 988 + DRM_DEV_ERROR(dsi->dev, "Failed to get core clock: %d\n", 989 + ret); 990 + return ret; 991 + } 992 + dsi->core_clk = clk; 993 + 994 + clk = devm_clk_get(dsi->dev, "phy_ref"); 995 + if (IS_ERR(clk)) { 996 + ret = PTR_ERR(clk); 997 + DRM_DEV_ERROR(dsi->dev, "Failed to get phy_ref clock: %d\n", 998 + ret); 999 + return ret; 1000 + } 1001 + dsi->phy_ref_clk = clk; 1002 + 1003 + clk = devm_clk_get(dsi->dev, "rx_esc"); 1004 + if (IS_ERR(clk)) { 1005 + ret = PTR_ERR(clk); 1006 + DRM_DEV_ERROR(dsi->dev, "Failed to get rx_esc clock: %d\n", 1007 + ret); 1008 + return ret; 1009 + } 1010 + dsi->rx_esc_clk = clk; 1011 + 1012 + clk = devm_clk_get(dsi->dev, "tx_esc"); 1013 + if (IS_ERR(clk)) { 1014 + ret = PTR_ERR(clk); 1015 + DRM_DEV_ERROR(dsi->dev, "Failed to get tx_esc clock: %d\n", 1016 + ret); 1017 + return ret; 1018 + } 1019 + dsi->tx_esc_clk = clk; 1020 + 1021 + dsi->mux = devm_mux_control_get(dsi->dev, NULL); 1022 + if (IS_ERR(dsi->mux)) { 1023 + ret = PTR_ERR(dsi->mux); 1024 + if (ret != -EPROBE_DEFER) 1025 + DRM_DEV_ERROR(dsi->dev, "Failed to get mux: %d\n", ret); 1026 + return ret; 1027 + } 1028 + 1029 + base = devm_platform_ioremap_resource(pdev, 0); 1030 + if (IS_ERR(base)) 1031 + return PTR_ERR(base); 1032 + 1033 + dsi->regmap = 1034 + devm_regmap_init_mmio(dsi->dev, base, &nwl_dsi_regmap_config); 1035 + if (IS_ERR(dsi->regmap)) { 1036 + ret = PTR_ERR(dsi->regmap); 1037 + DRM_DEV_ERROR(dsi->dev, "Failed to create NWL DSI regmap: %d\n", 1038 + ret); 1039 + return ret; 1040 + } 1041 + 1042 + dsi->irq = platform_get_irq(pdev, 0); 1043 + if (dsi->irq < 0) { 1044 + DRM_DEV_ERROR(dsi->dev, "Failed to get device IRQ: %d\n", 1045 + dsi->irq); 1046 + return dsi->irq; 1047 + } 1048 + 1049 + dsi->rst_pclk = devm_reset_control_get_exclusive(dsi->dev, "pclk"); 1050 + if (IS_ERR(dsi->rst_pclk)) { 1051 + DRM_DEV_ERROR(dsi->dev, "Failed to get pclk reset: %ld\n", 1052 + PTR_ERR(dsi->rst_pclk)); 1053 + return PTR_ERR(dsi->rst_pclk); 1054 + } 1055 + dsi->rst_byte = devm_reset_control_get_exclusive(dsi->dev, "byte"); 1056 + if (IS_ERR(dsi->rst_byte)) { 1057 + DRM_DEV_ERROR(dsi->dev, "Failed to get byte reset: %ld\n", 1058 + PTR_ERR(dsi->rst_byte)); 1059 + return PTR_ERR(dsi->rst_byte); 1060 + } 1061 + dsi->rst_esc = devm_reset_control_get_exclusive(dsi->dev, "esc"); 1062 + if (IS_ERR(dsi->rst_esc)) { 1063 + DRM_DEV_ERROR(dsi->dev, "Failed to get esc reset: %ld\n", 1064 + PTR_ERR(dsi->rst_esc)); 1065 + return PTR_ERR(dsi->rst_esc); 1066 + } 1067 + dsi->rst_dpi = devm_reset_control_get_exclusive(dsi->dev, "dpi"); 1068 + if (IS_ERR(dsi->rst_dpi)) { 1069 + DRM_DEV_ERROR(dsi->dev, "Failed to get dpi reset: %ld\n", 1070 + PTR_ERR(dsi->rst_dpi)); 1071 + return PTR_ERR(dsi->rst_dpi); 1072 + } 1073 + return 0; 1074 + } 1075 + 1076 + static int nwl_dsi_select_input(struct nwl_dsi *dsi) 1077 + { 1078 + struct device_node *remote; 1079 + u32 use_dcss = 1; 1080 + int ret; 1081 + 1082 + remote = of_graph_get_remote_node(dsi->dev->of_node, 0, 1083 + NWL_DSI_ENDPOINT_LCDIF); 1084 + if (remote) { 1085 + use_dcss = 0; 1086 + } else { 1087 + remote = of_graph_get_remote_node(dsi->dev->of_node, 0, 1088 + NWL_DSI_ENDPOINT_DCSS); 1089 + if (!remote) { 1090 + DRM_DEV_ERROR(dsi->dev, 1091 + "No valid input endpoint found\n"); 1092 + return -EINVAL; 1093 + } 1094 + } 1095 + 1096 + DRM_DEV_INFO(dsi->dev, "Using %s as input source\n", 1097 + (use_dcss) ? "DCSS" : "LCDIF"); 1098 + ret = mux_control_try_select(dsi->mux, use_dcss); 1099 + if (ret < 0) 1100 + DRM_DEV_ERROR(dsi->dev, "Failed to select input: %d\n", ret); 1101 + 1102 + of_node_put(remote); 1103 + return ret; 1104 + } 1105 + 1106 + static int nwl_dsi_deselect_input(struct nwl_dsi *dsi) 1107 + { 1108 + int ret; 1109 + 1110 + ret = mux_control_deselect(dsi->mux); 1111 + if (ret < 0) 1112 + DRM_DEV_ERROR(dsi->dev, "Failed to deselect input: %d\n", ret); 1113 + 1114 + return ret; 1115 + } 1116 + 1117 + static const struct drm_bridge_timings nwl_dsi_timings = { 1118 + .input_bus_flags = DRM_BUS_FLAG_DE_LOW, 1119 + }; 1120 + 1121 + static const struct of_device_id nwl_dsi_dt_ids[] = { 1122 + { .compatible = "fsl,imx8mq-nwl-dsi", }, 1123 + { /* sentinel */ } 1124 + }; 1125 + MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids); 1126 + 1127 + static const struct soc_device_attribute nwl_dsi_quirks_match[] = { 1128 + { .soc_id = "i.MX8MQ", .revision = "2.0", 1129 + .data = (void *)E11418_HS_MODE_QUIRK }, 1130 + { /* sentinel. */ }, 1131 + }; 1132 + 1133 + static int nwl_dsi_probe(struct platform_device *pdev) 1134 + { 1135 + struct device *dev = &pdev->dev; 1136 + const struct soc_device_attribute *attr; 1137 + struct nwl_dsi *dsi; 1138 + int ret; 1139 + 1140 + dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); 1141 + if (!dsi) 1142 + return -ENOMEM; 1143 + 1144 + dsi->dev = dev; 1145 + 1146 + ret = nwl_dsi_parse_dt(dsi); 1147 + if (ret) 1148 + return ret; 1149 + 1150 + ret = devm_request_irq(dev, dsi->irq, nwl_dsi_irq_handler, 0, 1151 + dev_name(dev), dsi); 1152 + if (ret < 0) { 1153 + DRM_DEV_ERROR(dev, "Failed to request IRQ %d: %d\n", dsi->irq, 1154 + ret); 1155 + return ret; 1156 + } 1157 + 1158 + dsi->dsi_host.ops = &nwl_dsi_host_ops; 1159 + dsi->dsi_host.dev = dev; 1160 + ret = mipi_dsi_host_register(&dsi->dsi_host); 1161 + if (ret) { 1162 + DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret); 1163 + return ret; 1164 + } 1165 + 1166 + attr = soc_device_match(nwl_dsi_quirks_match); 1167 + if (attr) 1168 + dsi->quirks = (uintptr_t)attr->data; 1169 + 1170 + dsi->bridge.driver_private = dsi; 1171 + dsi->bridge.funcs = &nwl_dsi_bridge_funcs; 1172 + dsi->bridge.of_node = dev->of_node; 1173 + dsi->bridge.timings = &nwl_dsi_timings; 1174 + 1175 + dev_set_drvdata(dev, dsi); 1176 + pm_runtime_enable(dev); 1177 + 1178 + ret = nwl_dsi_select_input(dsi); 1179 + if (ret < 0) { 1180 + mipi_dsi_host_unregister(&dsi->dsi_host); 1181 + return ret; 1182 + } 1183 + 1184 + drm_bridge_add(&dsi->bridge); 1185 + return 0; 1186 + } 1187 + 1188 + static int nwl_dsi_remove(struct platform_device *pdev) 1189 + { 1190 + struct nwl_dsi *dsi = platform_get_drvdata(pdev); 1191 + 1192 + nwl_dsi_deselect_input(dsi); 1193 + mipi_dsi_host_unregister(&dsi->dsi_host); 1194 + drm_bridge_remove(&dsi->bridge); 1195 + pm_runtime_disable(&pdev->dev); 1196 + return 0; 1197 + } 1198 + 1199 + static struct platform_driver nwl_dsi_driver = { 1200 + .probe = nwl_dsi_probe, 1201 + .remove = nwl_dsi_remove, 1202 + .driver = { 1203 + .of_match_table = nwl_dsi_dt_ids, 1204 + .name = DRV_NAME, 1205 + }, 1206 + }; 1207 + 1208 + module_platform_driver(nwl_dsi_driver); 1209 + 1210 + MODULE_AUTHOR("NXP Semiconductor"); 1211 + MODULE_AUTHOR("Purism SPC"); 1212 + MODULE_DESCRIPTION("Northwest Logic MIPI-DSI driver"); 1213 + MODULE_LICENSE("GPL"); /* GPLv2 or later */
+144
drivers/gpu/drm/bridge/nwl-dsi.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + /* 3 + * NWL MIPI DSI host driver 4 + * 5 + * Copyright (C) 2017 NXP 6 + * Copyright (C) 2019 Purism SPC 7 + */ 8 + #ifndef __NWL_DSI_H__ 9 + #define __NWL_DSI_H__ 10 + 11 + /* DSI HOST registers */ 12 + #define NWL_DSI_CFG_NUM_LANES 0x0 13 + #define NWL_DSI_CFG_NONCONTINUOUS_CLK 0x4 14 + #define NWL_DSI_CFG_T_PRE 0x8 15 + #define NWL_DSI_CFG_T_POST 0xc 16 + #define NWL_DSI_CFG_TX_GAP 0x10 17 + #define NWL_DSI_CFG_AUTOINSERT_EOTP 0x14 18 + #define NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP 0x18 19 + #define NWL_DSI_CFG_HTX_TO_COUNT 0x1c 20 + #define NWL_DSI_CFG_LRX_H_TO_COUNT 0x20 21 + #define NWL_DSI_CFG_BTA_H_TO_COUNT 0x24 22 + #define NWL_DSI_CFG_TWAKEUP 0x28 23 + #define NWL_DSI_CFG_STATUS_OUT 0x2c 24 + #define NWL_DSI_RX_ERROR_STATUS 0x30 25 + 26 + /* DSI DPI registers */ 27 + #define NWL_DSI_PIXEL_PAYLOAD_SIZE 0x200 28 + #define NWL_DSI_PIXEL_FIFO_SEND_LEVEL 0x204 29 + #define NWL_DSI_INTERFACE_COLOR_CODING 0x208 30 + #define NWL_DSI_PIXEL_FORMAT 0x20c 31 + #define NWL_DSI_VSYNC_POLARITY 0x210 32 + #define NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW 0 33 + #define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH BIT(1) 34 + 35 + #define NWL_DSI_HSYNC_POLARITY 0x214 36 + #define NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW 0 37 + #define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH BIT(1) 38 + 39 + #define NWL_DSI_VIDEO_MODE 0x218 40 + #define NWL_DSI_HFP 0x21c 41 + #define NWL_DSI_HBP 0x220 42 + #define NWL_DSI_HSA 0x224 43 + #define NWL_DSI_ENABLE_MULT_PKTS 0x228 44 + #define NWL_DSI_VBP 0x22c 45 + #define NWL_DSI_VFP 0x230 46 + #define NWL_DSI_BLLP_MODE 0x234 47 + #define NWL_DSI_USE_NULL_PKT_BLLP 0x238 48 + #define NWL_DSI_VACTIVE 0x23c 49 + #define NWL_DSI_VC 0x240 50 + 51 + /* DSI APB PKT control */ 52 + #define NWL_DSI_TX_PAYLOAD 0x280 53 + #define NWL_DSI_PKT_CONTROL 0x284 54 + #define NWL_DSI_SEND_PACKET 0x288 55 + #define NWL_DSI_PKT_STATUS 0x28c 56 + #define NWL_DSI_PKT_FIFO_WR_LEVEL 0x290 57 + #define NWL_DSI_PKT_FIFO_RD_LEVEL 0x294 58 + #define NWL_DSI_RX_PAYLOAD 0x298 59 + #define NWL_DSI_RX_PKT_HEADER 0x29c 60 + 61 + /* DSI IRQ handling */ 62 + #define NWL_DSI_IRQ_STATUS 0x2a0 63 + #define NWL_DSI_SM_NOT_IDLE BIT(0) 64 + #define NWL_DSI_TX_PKT_DONE BIT(1) 65 + #define NWL_DSI_DPHY_DIRECTION BIT(2) 66 + #define NWL_DSI_TX_FIFO_OVFLW BIT(3) 67 + #define NWL_DSI_TX_FIFO_UDFLW BIT(4) 68 + #define NWL_DSI_RX_FIFO_OVFLW BIT(5) 69 + #define NWL_DSI_RX_FIFO_UDFLW BIT(6) 70 + #define NWL_DSI_RX_PKT_HDR_RCVD BIT(7) 71 + #define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD BIT(8) 72 + #define NWL_DSI_BTA_TIMEOUT BIT(29) 73 + #define NWL_DSI_LP_RX_TIMEOUT BIT(30) 74 + #define NWL_DSI_HS_TX_TIMEOUT BIT(31) 75 + 76 + #define NWL_DSI_IRQ_STATUS2 0x2a4 77 + #define NWL_DSI_SINGLE_BIT_ECC_ERR BIT(0) 78 + #define NWL_DSI_MULTI_BIT_ECC_ERR BIT(1) 79 + #define NWL_DSI_CRC_ERR BIT(2) 80 + 81 + #define NWL_DSI_IRQ_MASK 0x2a8 82 + #define NWL_DSI_SM_NOT_IDLE_MASK BIT(0) 83 + #define NWL_DSI_TX_PKT_DONE_MASK BIT(1) 84 + #define NWL_DSI_DPHY_DIRECTION_MASK BIT(2) 85 + #define NWL_DSI_TX_FIFO_OVFLW_MASK BIT(3) 86 + #define NWL_DSI_TX_FIFO_UDFLW_MASK BIT(4) 87 + #define NWL_DSI_RX_FIFO_OVFLW_MASK BIT(5) 88 + #define NWL_DSI_RX_FIFO_UDFLW_MASK BIT(6) 89 + #define NWL_DSI_RX_PKT_HDR_RCVD_MASK BIT(7) 90 + #define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD_MASK BIT(8) 91 + #define NWL_DSI_BTA_TIMEOUT_MASK BIT(29) 92 + #define NWL_DSI_LP_RX_TIMEOUT_MASK BIT(30) 93 + #define NWL_DSI_HS_TX_TIMEOUT_MASK BIT(31) 94 + 95 + #define NWL_DSI_IRQ_MASK2 0x2ac 96 + #define NWL_DSI_SINGLE_BIT_ECC_ERR_MASK BIT(0) 97 + #define NWL_DSI_MULTI_BIT_ECC_ERR_MASK BIT(1) 98 + #define NWL_DSI_CRC_ERR_MASK BIT(2) 99 + 100 + /* 101 + * PKT_CONTROL format: 102 + * [15: 0] - word count 103 + * [17:16] - virtual channel 104 + * [23:18] - data type 105 + * [24] - LP or HS select (0 - LP, 1 - HS) 106 + * [25] - perform BTA after packet is sent 107 + * [26] - perform BTA only, no packet tx 108 + */ 109 + #define NWL_DSI_WC(x) FIELD_PREP(GENMASK(15, 0), (x)) 110 + #define NWL_DSI_TX_VC(x) FIELD_PREP(GENMASK(17, 16), (x)) 111 + #define NWL_DSI_TX_DT(x) FIELD_PREP(GENMASK(23, 18), (x)) 112 + #define NWL_DSI_HS_SEL(x) FIELD_PREP(GENMASK(24, 24), (x)) 113 + #define NWL_DSI_BTA_TX(x) FIELD_PREP(GENMASK(25, 25), (x)) 114 + #define NWL_DSI_BTA_NO_TX(x) FIELD_PREP(GENMASK(26, 26), (x)) 115 + 116 + /* 117 + * RX_PKT_HEADER format: 118 + * [15: 0] - word count 119 + * [21:16] - data type 120 + * [23:22] - virtual channel 121 + */ 122 + #define NWL_DSI_RX_DT(x) FIELD_GET(GENMASK(21, 16), (x)) 123 + #define NWL_DSI_RX_VC(x) FIELD_GET(GENMASK(23, 22), (x)) 124 + 125 + /* DSI Video mode */ 126 + #define NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES 0 127 + #define NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS BIT(0) 128 + #define NWL_DSI_VM_BURST_MODE BIT(1) 129 + 130 + /* * DPI color coding */ 131 + #define NWL_DSI_DPI_16_BIT_565_PACKED 0 132 + #define NWL_DSI_DPI_16_BIT_565_ALIGNED 1 133 + #define NWL_DSI_DPI_16_BIT_565_SHIFTED 2 134 + #define NWL_DSI_DPI_18_BIT_PACKED 3 135 + #define NWL_DSI_DPI_18_BIT_ALIGNED 4 136 + #define NWL_DSI_DPI_24_BIT 5 137 + 138 + /* * DPI Pixel format */ 139 + #define NWL_DSI_PIXEL_FORMAT_16 0 140 + #define NWL_DSI_PIXEL_FORMAT_18 BIT(0) 141 + #define NWL_DSI_PIXEL_FORMAT_18L BIT(1) 142 + #define NWL_DSI_PIXEL_FORMAT_24 (BIT(0) | BIT(1)) 143 + 144 + #endif /* __NWL_DSI_H__ */
+1
drivers/gpu/drm/bridge/panel.c
··· 311 311 312 312 /** 313 313 * drm_panel_bridge_connector - return the connector for the panel bridge 314 + * @bridge: The drm_bridge. 314 315 * 315 316 * drm_panel_bridge creates the connector. 316 317 * This function gives external access to the connector.
+2 -1
drivers/gpu/drm/bridge/sii9234.c
··· 836 836 ctx->supplies[3].supply = "cvcc12"; 837 837 ret = devm_regulator_bulk_get(ctx->dev, 4, ctx->supplies); 838 838 if (ret) { 839 - dev_err(ctx->dev, "regulator_bulk failed\n"); 839 + if (ret != -EPROBE_DEFER) 840 + dev_err(ctx->dev, "regulator_bulk failed\n"); 840 841 return ret; 841 842 } 842 843
+62 -24
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
··· 92 92 { 0x6756, 0x78ab, 0x2000, 0x0200 } 93 93 }; 94 94 95 + static const u16 csc_coeff_rgb_full_to_rgb_limited[3][4] = { 96 + { 0x1b7c, 0x0000, 0x0000, 0x0020 }, 97 + { 0x0000, 0x1b7c, 0x0000, 0x0020 }, 98 + { 0x0000, 0x0000, 0x1b7c, 0x0020 } 99 + }; 100 + 95 101 struct hdmi_vmode { 96 102 bool mdataenablepolarity; 97 103 ··· 115 109 unsigned int pix_repet_factor; 116 110 unsigned int hdcp_enable; 117 111 struct hdmi_vmode video_mode; 112 + bool rgb_limited_range; 118 113 }; 119 114 120 115 struct dw_hdmi_i2c { ··· 963 956 964 957 static int is_color_space_conversion(struct dw_hdmi *hdmi) 965 958 { 966 - return hdmi->hdmi_data.enc_in_bus_format != hdmi->hdmi_data.enc_out_bus_format; 959 + struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data; 960 + bool is_input_rgb, is_output_rgb; 961 + 962 + is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_in_bus_format); 963 + is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_out_bus_format); 964 + 965 + return (is_input_rgb != is_output_rgb) || 966 + (is_input_rgb && is_output_rgb && hdmi_data->rgb_limited_range); 967 967 } 968 968 969 969 static int is_color_space_decimation(struct dw_hdmi *hdmi) ··· 997 983 return 0; 998 984 } 999 985 986 + static bool is_csc_needed(struct dw_hdmi *hdmi) 987 + { 988 + return is_color_space_conversion(hdmi) || 989 + is_color_space_decimation(hdmi) || 990 + is_color_space_interpolation(hdmi); 991 + } 992 + 1000 993 static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi) 1001 994 { 1002 995 const u16 (*csc_coeff)[3][4] = &csc_coeff_default; 996 + bool is_input_rgb, is_output_rgb; 1003 997 unsigned i; 1004 998 u32 csc_scale = 1; 1005 999 1006 - if (is_color_space_conversion(hdmi)) { 1007 - if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) { 1008 - if (hdmi->hdmi_data.enc_out_encoding == 1009 - V4L2_YCBCR_ENC_601) 1010 - csc_coeff = &csc_coeff_rgb_out_eitu601; 1011 - else 1012 - csc_coeff = &csc_coeff_rgb_out_eitu709; 1013 - } else if (hdmi_bus_fmt_is_rgb( 1014 - hdmi->hdmi_data.enc_in_bus_format)) { 1015 - if (hdmi->hdmi_data.enc_out_encoding == 1016 - V4L2_YCBCR_ENC_601) 1017 - csc_coeff = &csc_coeff_rgb_in_eitu601; 1018 - else 1019 - csc_coeff = &csc_coeff_rgb_in_eitu709; 1020 - csc_scale = 0; 1021 - } 1000 + is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_in_bus_format); 1001 + is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format); 1002 + 1003 + if (!is_input_rgb && is_output_rgb) { 1004 + if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601) 1005 + csc_coeff = &csc_coeff_rgb_out_eitu601; 1006 + else 1007 + csc_coeff = &csc_coeff_rgb_out_eitu709; 1008 + } else if (is_input_rgb && !is_output_rgb) { 1009 + if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601) 1010 + csc_coeff = &csc_coeff_rgb_in_eitu601; 1011 + else 1012 + csc_coeff = &csc_coeff_rgb_in_eitu709; 1013 + csc_scale = 0; 1014 + } else if (is_input_rgb && is_output_rgb && 1015 + hdmi->hdmi_data.rgb_limited_range) { 1016 + csc_coeff = &csc_coeff_rgb_full_to_rgb_limited; 1022 1017 } 1023 1018 1024 1019 /* The CSC registers are sequential, alternating MSB then LSB */ ··· 1637 1614 drm_hdmi_avi_infoframe_from_display_mode(&frame, 1638 1615 &hdmi->connector, mode); 1639 1616 1617 + if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) { 1618 + drm_hdmi_avi_infoframe_quant_range(&frame, &hdmi->connector, 1619 + mode, 1620 + hdmi->hdmi_data.rgb_limited_range ? 1621 + HDMI_QUANTIZATION_RANGE_LIMITED : 1622 + HDMI_QUANTIZATION_RANGE_FULL); 1623 + } else { 1624 + frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; 1625 + frame.ycc_quantization_range = 1626 + HDMI_YCC_QUANTIZATION_RANGE_LIMITED; 1627 + } 1628 + 1640 1629 if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format)) 1641 1630 frame.colorspace = HDMI_COLORSPACE_YUV444; 1642 1631 else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) ··· 1688 1653 frame.extended_colorimetry = 1689 1654 HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; 1690 1655 } 1691 - 1692 - frame.scan_mode = HDMI_SCAN_MODE_NONE; 1693 1656 1694 1657 /* 1695 1658 * The Designware IP uses a different byte format from standard ··· 2043 2010 hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); 2044 2011 2045 2012 /* Enable csc path */ 2046 - if (is_color_space_conversion(hdmi)) { 2013 + if (is_csc_needed(hdmi)) { 2047 2014 hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE; 2048 2015 hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); 2049 - } 2050 2016 2051 - /* Enable color space conversion if needed */ 2052 - if (is_color_space_conversion(hdmi)) 2053 2017 hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH, 2054 2018 HDMI_MC_FLOWCTRL); 2055 - else 2019 + } else { 2020 + hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CSCCLK_DISABLE; 2021 + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); 2022 + 2056 2023 hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS, 2057 2024 HDMI_MC_FLOWCTRL); 2025 + } 2058 2026 } 2059 2027 2060 2028 /* Workaround to clear the overflow condition */ ··· 2152 2118 2153 2119 if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED) 2154 2120 hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24; 2121 + 2122 + hdmi->hdmi_data.rgb_limited_range = hdmi->sink_is_hdmi && 2123 + drm_default_rgb_quant_range(mode) == 2124 + HDMI_QUANTIZATION_RANGE_LIMITED; 2155 2125 2156 2126 hdmi->hdmi_data.pix_repet_factor = 0; 2157 2127 hdmi->hdmi_data.hdcp_enable = 0;
+29 -45
drivers/gpu/drm/cirrus/cirrus.c
··· 35 35 #include <drm/drm_gem_shmem_helper.h> 36 36 #include <drm/drm_gem_framebuffer_helper.h> 37 37 #include <drm/drm_ioctl.h> 38 + #include <drm/drm_managed.h> 38 39 #include <drm/drm_modeset_helper_vtables.h> 39 40 #include <drm/drm_probe_helper.h> 40 41 #include <drm/drm_simple_kms_helper.h> ··· 510 509 .atomic_commit = drm_atomic_helper_commit, 511 510 }; 512 511 513 - static void cirrus_mode_config_init(struct cirrus_device *cirrus) 512 + static int cirrus_mode_config_init(struct cirrus_device *cirrus) 514 513 { 515 514 struct drm_device *dev = &cirrus->dev; 515 + int ret; 516 516 517 - drm_mode_config_init(dev); 517 + ret = drmm_mode_config_init(dev); 518 + if (ret) 519 + return ret; 520 + 518 521 dev->mode_config.min_width = 0; 519 522 dev->mode_config.min_height = 0; 520 523 dev->mode_config.max_width = CIRRUS_MAX_PITCH / 2; ··· 526 521 dev->mode_config.preferred_depth = 16; 527 522 dev->mode_config.prefer_shadow = 0; 528 523 dev->mode_config.funcs = &cirrus_mode_config_funcs; 524 + 525 + return 0; 529 526 } 530 527 531 528 /* ------------------------------------------------------------------ */ 532 - 533 - static void cirrus_release(struct drm_device *dev) 534 - { 535 - struct cirrus_device *cirrus = dev->dev_private; 536 - 537 - drm_mode_config_cleanup(dev); 538 - kfree(cirrus); 539 - } 540 529 541 530 DEFINE_DRM_GEM_FOPS(cirrus_fops); 542 531 ··· 545 546 546 547 .fops = &cirrus_fops, 547 548 DRM_GEM_SHMEM_DRIVER_OPS, 548 - .release = cirrus_release, 549 549 }; 550 550 551 551 static int cirrus_pci_probe(struct pci_dev *pdev, ··· 558 560 if (ret) 559 561 return ret; 560 562 561 - ret = pci_enable_device(pdev); 563 + ret = pcim_enable_device(pdev); 562 564 if (ret) 563 565 return ret; 564 566 ··· 569 571 ret = -ENOMEM; 570 572 cirrus = kzalloc(sizeof(*cirrus), GFP_KERNEL); 571 573 if (cirrus == NULL) 572 - goto err_pci_release; 574 + return ret; 573 575 574 576 dev = &cirrus->dev; 575 - ret = drm_dev_init(dev, &cirrus_driver, &pdev->dev); 576 - if (ret) 577 - goto err_free_cirrus; 577 + ret = devm_drm_dev_init(&pdev->dev, dev, &cirrus_driver); 578 + if (ret) { 579 + kfree(cirrus); 580 + return ret; 581 + } 578 582 dev->dev_private = cirrus; 583 + drmm_add_final_kfree(dev, cirrus); 579 584 580 - ret = -ENOMEM; 581 - cirrus->vram = ioremap(pci_resource_start(pdev, 0), 582 - pci_resource_len(pdev, 0)); 585 + cirrus->vram = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0), 586 + pci_resource_len(pdev, 0)); 583 587 if (cirrus->vram == NULL) 584 - goto err_dev_put; 588 + return -ENOMEM; 585 589 586 - cirrus->mmio = ioremap(pci_resource_start(pdev, 1), 587 - pci_resource_len(pdev, 1)); 590 + cirrus->mmio = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 1), 591 + pci_resource_len(pdev, 1)); 588 592 if (cirrus->mmio == NULL) 589 - goto err_unmap_vram; 593 + return -ENOMEM; 590 594 591 - cirrus_mode_config_init(cirrus); 595 + ret = cirrus_mode_config_init(cirrus); 596 + if (ret) 597 + return ret; 592 598 593 599 ret = cirrus_conn_init(cirrus); 594 600 if (ret < 0) 595 - goto err_cleanup; 601 + return ret; 596 602 597 603 ret = cirrus_pipe_init(cirrus); 598 604 if (ret < 0) 599 - goto err_cleanup; 605 + return ret; 600 606 601 607 drm_mode_config_reset(dev); 602 608 ··· 608 606 pci_set_drvdata(pdev, dev); 609 607 ret = drm_dev_register(dev, 0); 610 608 if (ret) 611 - goto err_cleanup; 609 + return ret; 612 610 613 611 drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); 614 612 return 0; 615 - 616 - err_cleanup: 617 - drm_mode_config_cleanup(dev); 618 - iounmap(cirrus->mmio); 619 - err_unmap_vram: 620 - iounmap(cirrus->vram); 621 - err_dev_put: 622 - drm_dev_put(dev); 623 - err_free_cirrus: 624 - kfree(cirrus); 625 - err_pci_release: 626 - pci_release_regions(pdev); 627 - return ret; 628 613 } 629 614 630 615 static void cirrus_pci_remove(struct pci_dev *pdev) 631 616 { 632 617 struct drm_device *dev = pci_get_drvdata(pdev); 633 - struct cirrus_device *cirrus = dev->dev_private; 634 618 635 619 drm_dev_unplug(dev); 636 620 drm_atomic_helper_shutdown(dev); 637 - iounmap(cirrus->mmio); 638 - iounmap(cirrus->vram); 639 - drm_dev_put(dev); 640 - pci_release_regions(pdev); 641 621 } 642 622 643 623 static const struct pci_device_id pciidlist[] = {
+4 -4
drivers/gpu/drm/drm_atomic.c
··· 1641 1641 {"state", drm_state_info, 0}, 1642 1642 }; 1643 1643 1644 - int drm_atomic_debugfs_init(struct drm_minor *minor) 1644 + void drm_atomic_debugfs_init(struct drm_minor *minor) 1645 1645 { 1646 - return drm_debugfs_create_files(drm_atomic_debugfs_list, 1647 - ARRAY_SIZE(drm_atomic_debugfs_list), 1648 - minor->debugfs_root, minor); 1646 + drm_debugfs_create_files(drm_atomic_debugfs_list, 1647 + ARRAY_SIZE(drm_atomic_debugfs_list), 1648 + minor->debugfs_root, minor); 1649 1649 } 1650 1650 #endif
+68 -1
drivers/gpu/drm/drm_auth.c
··· 135 135 } 136 136 } 137 137 138 + fpriv->was_master = (ret == 0); 138 139 return ret; 139 140 } 140 141 ··· 175 174 return ret; 176 175 } 177 176 177 + /* 178 + * In the olden days the SET/DROP_MASTER ioctls used to return EACCES when 179 + * CAP_SYS_ADMIN was not set. This was used to prevent rogue applications 180 + * from becoming master and/or failing to release it. 181 + * 182 + * At the same time, the first client (for a given VT) is _always_ master. 183 + * Thus in order for the ioctls to succeed, one had to _explicitly_ run the 184 + * application as root or flip the setuid bit. 185 + * 186 + * If the CAP_SYS_ADMIN was missing, no other client could become master... 187 + * EVER :-( Leading to a) the graphics session dying badly or b) a completely 188 + * locked session. 189 + * 190 + * 191 + * As some point systemd-logind was introduced to orchestrate and delegate 192 + * master as applicable. It does so by opening the fd and passing it to users 193 + * while in itself logind a) does the set/drop master per users' request and 194 + * b) * implicitly drops master on VT switch. 195 + * 196 + * Even though logind looks like the future, there are a few issues: 197 + * - some platforms don't have equivalent (Android, CrOS, some BSDs) so 198 + * root is required _solely_ for SET/DROP MASTER. 199 + * - applications may not be updated to use it, 200 + * - any client which fails to drop master* can DoS the application using 201 + * logind, to a varying degree. 202 + * 203 + * * Either due missing CAP_SYS_ADMIN or simply not calling DROP_MASTER. 204 + * 205 + * 206 + * Here we implement the next best thing: 207 + * - ensure the logind style of fd passing works unchanged, and 208 + * - allow a client to drop/set master, iff it is/was master at a given point 209 + * in time. 210 + * 211 + * Note: DROP_MASTER cannot be free for all, as an arbitrator user could: 212 + * - DoS/crash the arbitrator - details would be implementation specific 213 + * - open the node, become master implicitly and cause issues 214 + * 215 + * As a result this fixes the following when using root-less build w/o logind 216 + * - startx 217 + * - weston 218 + * - various compositors based on wlroots 219 + */ 220 + static int 221 + drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv) 222 + { 223 + if (file_priv->pid == task_pid(current) && file_priv->was_master) 224 + return 0; 225 + 226 + if (!capable(CAP_SYS_ADMIN)) 227 + return -EACCES; 228 + 229 + return 0; 230 + } 231 + 178 232 int drm_setmaster_ioctl(struct drm_device *dev, void *data, 179 233 struct drm_file *file_priv) 180 234 { 181 235 int ret = 0; 182 236 183 237 mutex_lock(&dev->master_mutex); 238 + 239 + ret = drm_master_check_perm(dev, file_priv); 240 + if (ret) 241 + goto out_unlock; 242 + 184 243 if (drm_is_current_master(file_priv)) 185 244 goto out_unlock; 186 245 187 246 if (dev->master) { 188 - ret = -EINVAL; 247 + ret = -EBUSY; 189 248 goto out_unlock; 190 249 } 191 250 ··· 285 224 int ret = -EINVAL; 286 225 287 226 mutex_lock(&dev->master_mutex); 227 + 228 + ret = drm_master_check_perm(dev, file_priv); 229 + if (ret) 230 + goto out_unlock; 231 + 232 + ret = -EINVAL; 288 233 if (!drm_is_current_master(file_priv)) 289 234 goto out_unlock; 290 235
+6
drivers/gpu/drm/drm_blend.c
··· 183 183 * plane does not expose the "alpha" property, then this is 184 184 * assumed to be 1.0 185 185 * 186 + * IN_FORMATS: 187 + * Blob property which contains the set of buffer format and modifier 188 + * pairs supported by this plane. The blob is a drm_format_modifier_blob 189 + * struct. Without this property the plane doesn't support buffers with 190 + * modifiers. Userspace cannot change this property. 191 + * 186 192 * Note that all the property extensions described here apply either to the 187 193 * plane or the CRTC (e.g. for the background color, which currently is not 188 194 * exposed and assumed to be black).
+1 -1
drivers/gpu/drm/drm_bufs.c
··· 33 33 #include <linux/mm.h> 34 34 #include <linux/mman.h> 35 35 #include <linux/nospec.h> 36 + #include <linux/pci.h> 36 37 #include <linux/slab.h> 37 38 #include <linux/uaccess.h> 38 39 #include <linux/vmalloc.h> ··· 44 43 #include <drm/drm_device.h> 45 44 #include <drm/drm_drv.h> 46 45 #include <drm/drm_file.h> 47 - #include <drm/drm_pci.h> 48 46 #include <drm/drm_print.h> 49 47 50 48 #include "drm_legacy.h"
+4 -4
drivers/gpu/drm/drm_client.c
··· 457 457 { "internal_clients", drm_client_debugfs_internal_clients, 0 }, 458 458 }; 459 459 460 - int drm_client_debugfs_init(struct drm_minor *minor) 460 + void drm_client_debugfs_init(struct drm_minor *minor) 461 461 { 462 - return drm_debugfs_create_files(drm_client_debugfs_list, 463 - ARRAY_SIZE(drm_client_debugfs_list), 464 - minor->debugfs_root, minor); 462 + drm_debugfs_create_files(drm_client_debugfs_list, 463 + ARRAY_SIZE(drm_client_debugfs_list), 464 + minor->debugfs_root, minor); 465 465 } 466 466 #endif
+4 -2
drivers/gpu/drm/drm_connector.c
··· 1970 1970 else 1971 1971 drm_reset_display_info(connector); 1972 1972 1973 + drm_update_tile_info(connector, edid); 1974 + 1973 1975 drm_object_property_set_value(&connector->base, 1974 1976 dev->mode_config.non_desktop_property, 1975 1977 connector->display_info.non_desktop); ··· 2394 2392 * tile group or NULL if not found. 2395 2393 */ 2396 2394 struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, 2397 - char topology[8]) 2395 + const char topology[8]) 2398 2396 { 2399 2397 struct drm_tile_group *tg; 2400 2398 int id; ··· 2424 2422 * new tile group or NULL. 2425 2423 */ 2426 2424 struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, 2427 - char topology[8]) 2425 + const char topology[8]) 2428 2426 { 2429 2427 struct drm_tile_group *tg; 2430 2428 int ret;
+3 -1
drivers/gpu/drm/drm_crtc_internal.h
··· 82 82 /* drm_mode_config.c */ 83 83 int drm_modeset_register_all(struct drm_device *dev); 84 84 void drm_modeset_unregister_all(struct drm_device *dev); 85 + void drm_mode_config_validate(struct drm_device *dev); 85 86 86 87 /* drm_modes.c */ 87 88 const char *drm_get_mode_status_name(enum drm_mode_status status); ··· 225 224 /* drm_atomic.c */ 226 225 #ifdef CONFIG_DEBUG_FS 227 226 struct drm_minor; 228 - int drm_atomic_debugfs_init(struct drm_minor *minor); 227 + void drm_atomic_debugfs_init(struct drm_minor *minor); 229 228 #endif 230 229 231 230 int __drm_atomic_helper_disable_plane(struct drm_plane *plane, ··· 279 278 void drm_mode_fixup_1366x768(struct drm_display_mode *mode); 280 279 void drm_reset_display_info(struct drm_connector *connector); 281 280 u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid); 281 + void drm_update_tile_info(struct drm_connector *connector, const struct edid *edid);
+10 -35
drivers/gpu/drm/drm_debugfs.c
··· 172 172 * &struct drm_info_list in the given root directory. These files will be removed 173 173 * automatically on drm_debugfs_cleanup(). 174 174 */ 175 - int drm_debugfs_create_files(const struct drm_info_list *files, int count, 176 - struct dentry *root, struct drm_minor *minor) 175 + void drm_debugfs_create_files(const struct drm_info_list *files, int count, 176 + struct dentry *root, struct drm_minor *minor) 177 177 { 178 178 struct drm_device *dev = minor->dev; 179 179 struct drm_info_node *tmp; ··· 199 199 list_add(&tmp->list, &minor->debugfs_list); 200 200 mutex_unlock(&minor->debugfs_lock); 201 201 } 202 - return 0; 203 202 } 204 203 EXPORT_SYMBOL(drm_debugfs_create_files); 205 204 ··· 207 208 { 208 209 struct drm_device *dev = minor->dev; 209 210 char name[64]; 210 - int ret; 211 211 212 212 INIT_LIST_HEAD(&minor->debugfs_list); 213 213 mutex_init(&minor->debugfs_lock); 214 214 sprintf(name, "%d", minor_id); 215 215 minor->debugfs_root = debugfs_create_dir(name, root); 216 216 217 - ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, 218 - minor->debugfs_root, minor); 219 - if (ret) { 220 - debugfs_remove(minor->debugfs_root); 221 - minor->debugfs_root = NULL; 222 - DRM_ERROR("Failed to create core drm debugfs files\n"); 223 - return ret; 224 - } 217 + drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, 218 + minor->debugfs_root, minor); 225 219 226 220 if (drm_drv_uses_atomic_modeset(dev)) { 227 - ret = drm_atomic_debugfs_init(minor); 228 - if (ret) { 229 - DRM_ERROR("Failed to create atomic debugfs files\n"); 230 - return ret; 231 - } 221 + drm_atomic_debugfs_init(minor); 232 222 } 233 223 234 224 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 235 - ret = drm_framebuffer_debugfs_init(minor); 236 - if (ret) { 237 - DRM_ERROR("Failed to create framebuffer debugfs file\n"); 238 - return ret; 239 - } 225 + drm_framebuffer_debugfs_init(minor); 240 226 241 - ret = drm_client_debugfs_init(minor); 242 - if (ret) { 243 - DRM_ERROR("Failed to create client debugfs file\n"); 244 - return ret; 245 - } 227 + drm_client_debugfs_init(minor); 246 228 } 247 229 248 - if (dev->driver->debugfs_init) { 249 - ret = dev->driver->debugfs_init(minor); 250 - if (ret) { 251 - DRM_ERROR("DRM: Driver failed to initialize " 252 - "/sys/kernel/debug/dri.\n"); 253 - return ret; 254 - } 255 - } 230 + if (dev->driver->debugfs_init) 231 + dev->driver->debugfs_init(minor); 232 + 256 233 return 0; 257 234 } 258 235
+1 -1
drivers/gpu/drm/drm_dma.c
··· 34 34 */ 35 35 36 36 #include <linux/export.h> 37 + #include <linux/pci.h> 37 38 38 39 #include <drm/drm_drv.h> 39 - #include <drm/drm_pci.h> 40 40 #include <drm/drm_print.h> 41 41 42 42 #include "drm_legacy.h"
+159 -156
drivers/gpu/drm/drm_dp_mst_topology.c
··· 27 27 #include <linux/kernel.h> 28 28 #include <linux/sched.h> 29 29 #include <linux/seq_file.h> 30 + #include <linux/iopoll.h> 30 31 31 32 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 32 33 #include <linux/stacktrace.h> ··· 688 687 raw->cur_len = idx; 689 688 } 690 689 691 - /* this adds a chunk of msg to the builder to get the final msg */ 692 - static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, 693 - u8 *replybuf, u8 replybuflen, bool hdr) 690 + static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg, 691 + struct drm_dp_sideband_msg_hdr *hdr, 692 + u8 hdrlen) 694 693 { 695 - int ret; 694 + /* 695 + * ignore out-of-order messages or messages that are part of a 696 + * failed transaction 697 + */ 698 + if (!hdr->somt && !msg->have_somt) 699 + return false; 700 + 701 + /* get length contained in this portion */ 702 + msg->curchunk_idx = 0; 703 + msg->curchunk_len = hdr->msg_len; 704 + msg->curchunk_hdrlen = hdrlen; 705 + 706 + /* we have already gotten an somt - don't bother parsing */ 707 + if (hdr->somt && msg->have_somt) 708 + return false; 709 + 710 + if (hdr->somt) { 711 + memcpy(&msg->initial_hdr, hdr, 712 + sizeof(struct drm_dp_sideband_msg_hdr)); 713 + msg->have_somt = true; 714 + } 715 + if (hdr->eomt) 716 + msg->have_eomt = true; 717 + 718 + return true; 719 + } 720 + 721 + /* this adds a chunk of msg to the builder to get the final msg */ 722 + static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg, 723 + u8 *replybuf, u8 replybuflen) 724 + { 696 725 u8 crc4; 697 726 698 - if (hdr) { 699 - u8 hdrlen; 700 - struct drm_dp_sideband_msg_hdr recv_hdr; 701 - ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen); 702 - if (ret == false) { 703 - print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false); 704 - return false; 705 - } 706 - 707 - /* 708 - * ignore out-of-order messages or messages that are part of a 709 - * failed transaction 710 - */ 711 - if (!recv_hdr.somt && !msg->have_somt) 712 - return false; 713 - 714 - /* get length contained in this portion */ 715 - msg->curchunk_len = recv_hdr.msg_len; 716 - msg->curchunk_hdrlen = hdrlen; 717 - 718 - /* we have already gotten an somt - don't bother parsing */ 719 - if (recv_hdr.somt && msg->have_somt) 720 - return false; 721 - 722 - if (recv_hdr.somt) { 723 - memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr)); 724 - msg->have_somt = true; 725 - } 726 - if (recv_hdr.eomt) 727 - msg->have_eomt = true; 728 - 729 - /* copy the bytes for the remainder of this header chunk */ 730 - msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen)); 731 - memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx); 732 - } else { 733 - memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); 734 - msg->curchunk_idx += replybuflen; 735 - } 727 + memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); 728 + msg->curchunk_idx += replybuflen; 736 729 737 730 if (msg->curchunk_idx >= msg->curchunk_len) { 738 731 /* do CRC */ ··· 1055 1060 drm_dp_encode_sideband_req(&req, msg); 1056 1061 } 1057 1062 1058 - static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg) 1063 + static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg) 1059 1064 { 1060 1065 struct drm_dp_sideband_msg_req_body req; 1061 1066 1062 1067 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE; 1063 1068 drm_dp_encode_sideband_req(&req, msg); 1064 - return 0; 1065 1069 } 1066 1070 1067 1071 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, ··· 1205 1211 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { 1206 1212 mstb->tx_slots[txmsg->seqno] = NULL; 1207 1213 } 1208 - mgr->is_waiting_for_dwn_reply = false; 1209 - 1210 1214 } 1211 1215 out: 1212 1216 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { ··· 1214 1222 } 1215 1223 mutex_unlock(&mgr->qlock); 1216 1224 1217 - drm_dp_mst_kick_tx(mgr); 1218 1225 return ret; 1219 1226 } 1220 1227 ··· 2789 2798 ret = process_single_tx_qlock(mgr, txmsg, false); 2790 2799 if (ret == 1) { 2791 2800 /* txmsg is sent it should be in the slots now */ 2792 - mgr->is_waiting_for_dwn_reply = true; 2793 2801 list_del(&txmsg->next); 2794 2802 } else if (ret) { 2795 2803 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); 2796 - mgr->is_waiting_for_dwn_reply = false; 2797 2804 list_del(&txmsg->next); 2798 2805 if (txmsg->seqno != -1) 2799 2806 txmsg->dst->tx_slots[txmsg->seqno] = NULL; ··· 2831 2842 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); 2832 2843 } 2833 2844 2834 - if (list_is_singular(&mgr->tx_msg_downq) && 2835 - !mgr->is_waiting_for_dwn_reply) 2845 + if (list_is_singular(&mgr->tx_msg_downq)) 2836 2846 process_single_down_tx_qlock(mgr); 2837 2847 mutex_unlock(&mgr->qlock); 2838 2848 } ··· 3691 3703 } 3692 3704 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); 3693 3705 3694 - static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) 3706 + static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, 3707 + struct drm_dp_mst_branch **mstb, int *seqno) 3695 3708 { 3696 3709 int len; 3697 3710 u8 replyblock[32]; 3698 3711 int replylen, curreply; 3699 3712 int ret; 3713 + u8 hdrlen; 3714 + struct drm_dp_sideband_msg_hdr hdr; 3700 3715 struct drm_dp_sideband_msg_rx *msg; 3701 - int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE; 3702 - msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv; 3716 + int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : 3717 + DP_SIDEBAND_MSG_DOWN_REP_BASE; 3718 + 3719 + if (!up) 3720 + *mstb = NULL; 3721 + *seqno = -1; 3703 3722 3704 3723 len = min(mgr->max_dpcd_transaction_bytes, 16); 3705 - ret = drm_dp_dpcd_read(mgr->aux, basereg, 3706 - replyblock, len); 3724 + ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len); 3707 3725 if (ret != len) { 3708 3726 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); 3709 3727 return false; 3710 3728 } 3711 - ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); 3729 + 3730 + ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen); 3731 + if (ret == false) { 3732 + print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 3733 + 1, replyblock, len, false); 3734 + DRM_DEBUG_KMS("ERROR: failed header\n"); 3735 + return false; 3736 + } 3737 + 3738 + *seqno = hdr.seqno; 3739 + 3740 + if (up) { 3741 + msg = &mgr->up_req_recv; 3742 + } else { 3743 + /* Caller is responsible for giving back this reference */ 3744 + *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad); 3745 + if (!*mstb) { 3746 + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", 3747 + hdr.lct); 3748 + return false; 3749 + } 3750 + msg = &(*mstb)->down_rep_recv[hdr.seqno]; 3751 + } 3752 + 3753 + if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) { 3754 + DRM_DEBUG_KMS("sideband msg set header failed %d\n", 3755 + replyblock[0]); 3756 + return false; 3757 + } 3758 + 3759 + replylen = min(msg->curchunk_len, (u8)(len - hdrlen)); 3760 + ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen); 3712 3761 if (!ret) { 3713 3762 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); 3714 3763 return false; 3715 3764 } 3716 - replylen = msg->curchunk_len + msg->curchunk_hdrlen; 3717 3765 3718 - replylen -= len; 3766 + replylen = msg->curchunk_len + msg->curchunk_hdrlen - len; 3719 3767 curreply = len; 3720 3768 while (replylen > 0) { 3721 3769 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); ··· 3763 3739 return false; 3764 3740 } 3765 3741 3766 - ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); 3742 + ret = drm_dp_sideband_append_payload(msg, replyblock, len); 3767 3743 if (!ret) { 3768 3744 DRM_DEBUG_KMS("failed to build sideband msg\n"); 3769 3745 return false; ··· 3778 3754 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) 3779 3755 { 3780 3756 struct drm_dp_sideband_msg_tx *txmsg; 3781 - struct drm_dp_mst_branch *mstb; 3782 - struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr; 3783 - int slot = -1; 3757 + struct drm_dp_mst_branch *mstb = NULL; 3758 + struct drm_dp_sideband_msg_rx *msg = NULL; 3759 + int seqno = -1; 3784 3760 3785 - if (!drm_dp_get_one_sb_msg(mgr, false)) 3786 - goto clear_down_rep_recv; 3761 + if (!drm_dp_get_one_sb_msg(mgr, false, &mstb, &seqno)) 3762 + goto out_clear_reply; 3787 3763 3788 - if (!mgr->down_rep_recv.have_eomt) 3789 - return 0; 3764 + msg = &mstb->down_rep_recv[seqno]; 3790 3765 3791 - mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); 3792 - if (!mstb) { 3793 - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", 3794 - hdr->lct); 3795 - goto clear_down_rep_recv; 3796 - } 3766 + /* Multi-packet message transmission, don't clear the reply */ 3767 + if (!msg->have_eomt) 3768 + goto out; 3797 3769 3798 3770 /* find the message */ 3799 - slot = hdr->seqno; 3800 3771 mutex_lock(&mgr->qlock); 3801 - txmsg = mstb->tx_slots[slot]; 3772 + txmsg = mstb->tx_slots[seqno]; 3802 3773 /* remove from slots */ 3803 3774 mutex_unlock(&mgr->qlock); 3804 3775 3805 3776 if (!txmsg) { 3777 + struct drm_dp_sideband_msg_hdr *hdr; 3778 + hdr = &msg->initial_hdr; 3806 3779 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", 3807 3780 mstb, hdr->seqno, hdr->lct, hdr->rad[0], 3808 - mgr->down_rep_recv.msg[0]); 3809 - goto no_msg; 3781 + msg->msg[0]); 3782 + goto out_clear_reply; 3810 3783 } 3811 3784 3812 - drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); 3785 + drm_dp_sideband_parse_reply(msg, &txmsg->reply); 3813 3786 3814 - if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3787 + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 3815 3788 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n", 3816 3789 txmsg->reply.req_type, 3817 3790 drm_dp_mst_req_type_str(txmsg->reply.req_type), 3818 3791 txmsg->reply.u.nak.reason, 3819 3792 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), 3820 3793 txmsg->reply.u.nak.nak_data); 3794 + } 3821 3795 3822 - memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 3796 + memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx)); 3823 3797 drm_dp_mst_topology_put_mstb(mstb); 3824 3798 3825 3799 mutex_lock(&mgr->qlock); 3826 3800 txmsg->state = DRM_DP_SIDEBAND_TX_RX; 3827 - mstb->tx_slots[slot] = NULL; 3828 - mgr->is_waiting_for_dwn_reply = false; 3801 + mstb->tx_slots[seqno] = NULL; 3829 3802 mutex_unlock(&mgr->qlock); 3830 3803 3831 3804 wake_up_all(&mgr->tx_waitq); 3832 3805 3833 3806 return 0; 3834 3807 3835 - no_msg: 3836 - drm_dp_mst_topology_put_mstb(mstb); 3837 - clear_down_rep_recv: 3838 - mutex_lock(&mgr->qlock); 3839 - mgr->is_waiting_for_dwn_reply = false; 3840 - mutex_unlock(&mgr->qlock); 3841 - memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 3808 + out_clear_reply: 3809 + if (msg) 3810 + memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx)); 3811 + out: 3812 + if (mstb) 3813 + drm_dp_mst_topology_put_mstb(mstb); 3842 3814 3843 3815 return 0; 3844 3816 } ··· 3910 3890 3911 3891 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) 3912 3892 { 3913 - struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr; 3914 3893 struct drm_dp_pending_up_req *up_req; 3915 - bool seqno; 3894 + int seqno; 3916 3895 3917 - if (!drm_dp_get_one_sb_msg(mgr, true)) 3896 + if (!drm_dp_get_one_sb_msg(mgr, true, NULL, &seqno)) 3918 3897 goto out; 3919 3898 3920 3899 if (!mgr->up_req_recv.have_eomt) ··· 3926 3907 } 3927 3908 INIT_LIST_HEAD(&up_req->next); 3928 3909 3929 - seqno = hdr->seqno; 3930 3910 drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg); 3931 3911 3932 3912 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && ··· 3959 3941 res_stat->available_pbn); 3960 3942 } 3961 3943 3962 - up_req->hdr = *hdr; 3944 + up_req->hdr = mgr->up_req_recv.initial_hdr; 3963 3945 mutex_lock(&mgr->up_req_lock); 3964 3946 list_add_tail(&up_req->next, &mgr->up_req_list); 3965 3947 mutex_unlock(&mgr->up_req_lock); ··· 4063 4045 return ret; 4064 4046 } 4065 4047 EXPORT_SYMBOL(drm_dp_mst_detect_port); 4066 - 4067 - /** 4068 - * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not 4069 - * @mgr: manager for this port 4070 - * @port: unverified pointer to a port. 4071 - * 4072 - * This returns whether the port supports audio or not. 4073 - */ 4074 - bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, 4075 - struct drm_dp_mst_port *port) 4076 - { 4077 - bool ret = false; 4078 - 4079 - port = drm_dp_mst_topology_get_port_validated(mgr, port); 4080 - if (!port) 4081 - return ret; 4082 - ret = port->has_audio; 4083 - drm_dp_mst_topology_put_port(port); 4084 - return ret; 4085 - } 4086 - EXPORT_SYMBOL(drm_dp_mst_port_has_audio); 4087 4048 4088 4049 /** 4089 4050 * drm_dp_mst_get_edid() - get EDID for an MST port ··· 4440 4443 return ret; 4441 4444 } 4442 4445 4446 + static int do_get_act_status(struct drm_dp_aux *aux) 4447 + { 4448 + int ret; 4449 + u8 status; 4450 + 4451 + ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 4452 + if (ret < 0) 4453 + return ret; 4454 + 4455 + return status; 4456 + } 4443 4457 4444 4458 /** 4445 - * drm_dp_check_act_status() - Check ACT handled status. 4459 + * drm_dp_check_act_status() - Polls for ACT handled status. 4446 4460 * @mgr: manager to use 4447 4461 * 4448 - * Check the payload status bits in the DPCD for ACT handled completion. 4462 + * Tries waiting for the MST hub to finish updating it's payload table by 4463 + * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really 4464 + * take that long). 4465 + * 4466 + * Returns: 4467 + * 0 if the ACT was handled in time, negative error code on failure. 4449 4468 */ 4450 4469 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) 4451 4470 { 4452 - u8 status; 4453 - int ret; 4454 - int count = 0; 4471 + /* 4472 + * There doesn't seem to be any recommended retry count or timeout in 4473 + * the MST specification. Since some hubs have been observed to take 4474 + * over 1 second to update their payload allocations under certain 4475 + * conditions, we use a rather large timeout value. 4476 + */ 4477 + const int timeout_ms = 3000; 4478 + int ret, status; 4455 4479 4456 - do { 4457 - ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 4458 - 4459 - if (ret < 0) { 4460 - DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); 4461 - goto fail; 4462 - } 4463 - 4464 - if (status & DP_PAYLOAD_ACT_HANDLED) 4465 - break; 4466 - count++; 4467 - udelay(100); 4468 - 4469 - } while (count < 30); 4470 - 4471 - if (!(status & DP_PAYLOAD_ACT_HANDLED)) { 4472 - DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); 4473 - ret = -EINVAL; 4474 - goto fail; 4480 + ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, 4481 + status & DP_PAYLOAD_ACT_HANDLED || status < 0, 4482 + 200, timeout_ms * USEC_PER_MSEC); 4483 + if (ret < 0 && status >= 0) { 4484 + DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n", 4485 + timeout_ms, status); 4486 + return -EINVAL; 4487 + } else if (status < 0) { 4488 + /* 4489 + * Failure here isn't unexpected - the hub may have 4490 + * just been unplugged 4491 + */ 4492 + DRM_DEBUG_KMS("Failed to read payload table status: %d\n", 4493 + status); 4494 + return status; 4475 4495 } 4496 + 4476 4497 return 0; 4477 - fail: 4478 - return ret; 4479 4498 } 4480 4499 EXPORT_SYMBOL(drm_dp_check_act_status); 4481 4500 ··· 4682 4669 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); 4683 4670 4684 4671 mutex_lock(&mgr->qlock); 4685 - if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply) 4672 + if (!list_empty(&mgr->tx_msg_downq)) 4686 4673 process_single_down_tx_qlock(mgr); 4687 4674 mutex_unlock(&mgr->qlock); 4688 - } 4689 - 4690 - static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port) 4691 - { 4692 - if (!port->connector) 4693 - return; 4694 - 4695 - if (port->mgr->cbs->destroy_connector) { 4696 - port->mgr->cbs->destroy_connector(port->mgr, port->connector); 4697 - } else { 4698 - drm_connector_unregister(port->connector); 4699 - drm_connector_put(port->connector); 4700 - } 4701 4675 } 4702 4676 4703 4677 static inline void 4704 4678 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) 4705 4679 { 4706 - drm_dp_destroy_connector(port); 4680 + if (port->connector) { 4681 + drm_connector_unregister(port->connector); 4682 + drm_connector_put(port->connector); 4683 + } 4707 4684 4708 4685 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs); 4709 4686 drm_dp_mst_put_port_malloc(port);
+94 -125
drivers/gpu/drm/drm_drv.c
··· 39 39 #include <drm/drm_color_mgmt.h> 40 40 #include <drm/drm_drv.h> 41 41 #include <drm/drm_file.h> 42 + #include <drm/drm_managed.h> 42 43 #include <drm/drm_mode_object.h> 43 44 #include <drm/drm_print.h> 44 45 ··· 93 92 } 94 93 } 95 94 95 + static void drm_minor_alloc_release(struct drm_device *dev, void *data) 96 + { 97 + struct drm_minor *minor = data; 98 + unsigned long flags; 99 + 100 + WARN_ON(dev != minor->dev); 101 + 102 + put_device(minor->kdev); 103 + 104 + spin_lock_irqsave(&drm_minor_lock, flags); 105 + idr_remove(&drm_minors_idr, minor->index); 106 + spin_unlock_irqrestore(&drm_minor_lock, flags); 107 + } 108 + 96 109 static int drm_minor_alloc(struct drm_device *dev, unsigned int type) 97 110 { 98 111 struct drm_minor *minor; 99 112 unsigned long flags; 100 113 int r; 101 114 102 - minor = kzalloc(sizeof(*minor), GFP_KERNEL); 115 + minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL); 103 116 if (!minor) 104 117 return -ENOMEM; 105 118 ··· 131 116 idr_preload_end(); 132 117 133 118 if (r < 0) 134 - goto err_free; 119 + return r; 135 120 136 121 minor->index = r; 137 122 123 + r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor); 124 + if (r) 125 + return r; 126 + 138 127 minor->kdev = drm_sysfs_minor_alloc(minor); 139 - if (IS_ERR(minor->kdev)) { 140 - r = PTR_ERR(minor->kdev); 141 - goto err_index; 142 - } 128 + if (IS_ERR(minor->kdev)) 129 + return PTR_ERR(minor->kdev); 143 130 144 131 *drm_minor_get_slot(dev, type) = minor; 145 132 return 0; 146 - 147 - err_index: 148 - spin_lock_irqsave(&drm_minor_lock, flags); 149 - idr_remove(&drm_minors_idr, minor->index); 150 - spin_unlock_irqrestore(&drm_minor_lock, flags); 151 - err_free: 152 - kfree(minor); 153 - return r; 154 - } 155 - 156 - static void drm_minor_free(struct drm_device *dev, unsigned int type) 157 - { 158 - struct drm_minor **slot, *minor; 159 - unsigned long flags; 160 - 161 - slot = drm_minor_get_slot(dev, type); 162 - minor = *slot; 163 - if (!minor) 164 - return; 165 - 166 - put_device(minor->kdev); 167 - 168 - spin_lock_irqsave(&drm_minor_lock, flags); 169 - idr_remove(&drm_minors_idr, minor->index); 170 - spin_unlock_irqrestore(&drm_minor_lock, flags); 171 - 172 - kfree(minor); 173 - *slot = NULL; 174 133 } 175 134 176 135 static int drm_minor_register(struct drm_device *dev, unsigned int type) ··· 259 270 * any other resources allocated at device initialization and drop the driver's 260 271 * reference to &drm_device using drm_dev_put(). 261 272 * 262 - * Note that the lifetime rules for &drm_device instance has still a lot of 263 - * historical baggage. Hence use the reference counting provided by 264 - * drm_dev_get() and drm_dev_put() only carefully. 273 + * Note that any allocation or resource which is visible to userspace must be 274 + * released only when the final drm_dev_put() is called, and not when the 275 + * driver is unbound from the underlying physical struct &device. Best to use 276 + * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and 277 + * related functions. 278 + * 279 + * devres managed resources like devm_kmalloc() can only be used for resources 280 + * directly related to the underlying hardware device, and only used in code 281 + * paths fully protected by drm_dev_enter() and drm_dev_exit(). 265 282 * 266 283 * Display driver example 267 284 * ~~~~~~~~~~~~~~~~~~~~~~ 268 285 * 269 286 * The following example shows a typical structure of a DRM display driver. 270 287 * The example focus on the probe() function and the other functions that is 271 - * almost always present and serves as a demonstration of devm_drm_dev_init() 272 - * usage with its accompanying drm_driver->release callback. 288 + * almost always present and serves as a demonstration of devm_drm_dev_init(). 273 289 * 274 290 * .. code-block:: c 275 291 * ··· 284 290 * struct clk *pclk; 285 291 * }; 286 292 * 287 - * static void driver_drm_release(struct drm_device *drm) 288 - * { 289 - * struct driver_device *priv = container_of(...); 290 - * 291 - * drm_mode_config_cleanup(drm); 292 - * drm_dev_fini(drm); 293 - * kfree(priv->userspace_facing); 294 - * kfree(priv); 295 - * } 296 - * 297 293 * static struct drm_driver driver_drm_driver = { 298 294 * [...] 299 - * .release = driver_drm_release, 300 295 * }; 301 296 * 302 297 * static int driver_probe(struct platform_device *pdev) ··· 305 322 * 306 323 * ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver); 307 324 * if (ret) { 308 - * kfree(drm); 325 + * kfree(priv); 309 326 * return ret; 310 327 * } 328 + * drmm_add_final_kfree(drm, priv); 311 329 * 312 - * drm_mode_config_init(drm); 330 + * ret = drmm_mode_config_init(drm); 331 + * if (ret) 332 + * return ret; 313 333 * 314 - * priv->userspace_facing = kzalloc(..., GFP_KERNEL); 334 + * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL); 315 335 * if (!priv->userspace_facing) 316 336 * return -ENOMEM; 317 337 * ··· 566 580 * used. 567 581 */ 568 582 583 + static void drm_dev_init_release(struct drm_device *dev, void *res) 584 + { 585 + drm_legacy_ctxbitmap_cleanup(dev); 586 + drm_legacy_remove_map_hash(dev); 587 + drm_fs_inode_free(dev->anon_inode); 588 + 589 + put_device(dev->dev); 590 + /* Prevent use-after-free in drm_managed_release when debugging is 591 + * enabled. Slightly awkward, but can't really be helped. */ 592 + dev->dev = NULL; 593 + mutex_destroy(&dev->master_mutex); 594 + mutex_destroy(&dev->clientlist_mutex); 595 + mutex_destroy(&dev->filelist_mutex); 596 + mutex_destroy(&dev->struct_mutex); 597 + drm_legacy_destroy_members(dev); 598 + } 599 + 569 600 /** 570 601 * drm_dev_init - Initialise new DRM device 571 602 * @dev: DRM device ··· 611 608 * arbitrary offset, you must supply a &drm_driver.release callback and control 612 609 * the finalization explicitly. 613 610 * 611 + * Note that drivers must call drmm_add_final_kfree() after this function has 612 + * completed successfully. 613 + * 614 614 * RETURNS: 615 615 * 0 on success, or error code on failure. 616 616 */ ··· 635 629 dev->dev = get_device(parent); 636 630 dev->driver = driver; 637 631 632 + INIT_LIST_HEAD(&dev->managed.resources); 633 + spin_lock_init(&dev->managed.lock); 634 + 638 635 /* no per-device feature limits by default */ 639 636 dev->driver_features = ~0u; 640 637 ··· 653 644 mutex_init(&dev->clientlist_mutex); 654 645 mutex_init(&dev->master_mutex); 655 646 647 + ret = drmm_add_action(dev, drm_dev_init_release, NULL); 648 + if (ret) 649 + return ret; 650 + 656 651 dev->anon_inode = drm_fs_inode_new(); 657 652 if (IS_ERR(dev->anon_inode)) { 658 653 ret = PTR_ERR(dev->anon_inode); 659 654 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); 660 - goto err_free; 655 + goto err; 661 656 } 662 657 663 658 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 664 659 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); 665 660 if (ret) 666 - goto err_minors; 661 + goto err; 667 662 } 668 663 669 664 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); 670 665 if (ret) 671 - goto err_minors; 666 + goto err; 672 667 673 668 ret = drm_legacy_create_map_hash(dev); 674 669 if (ret) 675 - goto err_minors; 670 + goto err; 676 671 677 672 drm_legacy_ctxbitmap_init(dev); 678 673 ··· 684 671 ret = drm_gem_init(dev); 685 672 if (ret) { 686 673 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); 687 - goto err_ctxbitmap; 674 + goto err; 688 675 } 689 676 } 690 677 691 678 ret = drm_dev_set_unique(dev, dev_name(parent)); 692 679 if (ret) 693 - goto err_setunique; 680 + goto err; 694 681 695 682 return 0; 696 683 697 - err_setunique: 698 - if (drm_core_check_feature(dev, DRIVER_GEM)) 699 - drm_gem_destroy(dev); 700 - err_ctxbitmap: 701 - drm_legacy_ctxbitmap_cleanup(dev); 702 - drm_legacy_remove_map_hash(dev); 703 - err_minors: 704 - drm_minor_free(dev, DRM_MINOR_PRIMARY); 705 - drm_minor_free(dev, DRM_MINOR_RENDER); 706 - drm_fs_inode_free(dev->anon_inode); 707 - err_free: 708 - put_device(dev->dev); 709 - mutex_destroy(&dev->master_mutex); 710 - mutex_destroy(&dev->clientlist_mutex); 711 - mutex_destroy(&dev->filelist_mutex); 712 - mutex_destroy(&dev->struct_mutex); 713 - drm_legacy_destroy_members(dev); 684 + err: 685 + drm_managed_release(dev); 686 + 714 687 return ret; 715 688 } 716 689 EXPORT_SYMBOL(drm_dev_init); ··· 713 714 * @driver: DRM driver 714 715 * 715 716 * Managed drm_dev_init(). The DRM device initialized with this function is 716 - * automatically put on driver detach using drm_dev_put(). You must supply a 717 - * &drm_driver.release callback to control the finalization explicitly. 717 + * automatically put on driver detach using drm_dev_put(). 718 + * 719 + * Note that drivers must call drmm_add_final_kfree() after this function has 720 + * completed successfully. 718 721 * 719 722 * RETURNS: 720 723 * 0 on success, or error code on failure. ··· 726 725 struct drm_driver *driver) 727 726 { 728 727 int ret; 729 - 730 - if (WARN_ON(!driver->release)) 731 - return -EINVAL; 732 728 733 729 ret = drm_dev_init(dev, driver, parent); 734 730 if (ret) ··· 738 740 return ret; 739 741 } 740 742 EXPORT_SYMBOL(devm_drm_dev_init); 741 - 742 - /** 743 - * drm_dev_fini - Finalize a dead DRM device 744 - * @dev: DRM device 745 - * 746 - * Finalize a dead DRM device. This is the converse to drm_dev_init() and 747 - * frees up all data allocated by it. All driver private data should be 748 - * finalized first. Note that this function does not free the @dev, that is 749 - * left to the caller. 750 - * 751 - * The ref-count of @dev must be zero, and drm_dev_fini() should only be called 752 - * from a &drm_driver.release callback. 753 - */ 754 - void drm_dev_fini(struct drm_device *dev) 755 - { 756 - drm_vblank_cleanup(dev); 757 - 758 - if (drm_core_check_feature(dev, DRIVER_GEM)) 759 - drm_gem_destroy(dev); 760 - 761 - drm_legacy_ctxbitmap_cleanup(dev); 762 - drm_legacy_remove_map_hash(dev); 763 - drm_fs_inode_free(dev->anon_inode); 764 - 765 - drm_minor_free(dev, DRM_MINOR_PRIMARY); 766 - drm_minor_free(dev, DRM_MINOR_RENDER); 767 - 768 - put_device(dev->dev); 769 - 770 - mutex_destroy(&dev->master_mutex); 771 - mutex_destroy(&dev->clientlist_mutex); 772 - mutex_destroy(&dev->filelist_mutex); 773 - mutex_destroy(&dev->struct_mutex); 774 - drm_legacy_destroy_members(dev); 775 - kfree(dev->unique); 776 - } 777 - EXPORT_SYMBOL(drm_dev_fini); 778 743 779 744 /** 780 745 * drm_dev_alloc - Allocate new DRM device ··· 777 816 return ERR_PTR(ret); 778 817 } 779 818 819 + drmm_add_final_kfree(dev, dev); 820 + 780 821 return dev; 781 822 } 782 823 EXPORT_SYMBOL(drm_dev_alloc); ··· 787 824 { 788 825 struct drm_device *dev = container_of(ref, struct drm_device, ref); 789 826 790 - if (dev->driver->release) { 827 + if (dev->driver->release) 791 828 dev->driver->release(dev); 792 - } else { 793 - drm_dev_fini(dev); 794 - kfree(dev); 795 - } 829 + 830 + drm_managed_release(dev); 831 + 832 + if (dev->managed.final_kfree) 833 + kfree(dev->managed.final_kfree); 796 834 } 797 835 798 836 /** ··· 910 946 struct drm_driver *driver = dev->driver; 911 947 int ret; 912 948 949 + if (!driver->load) 950 + drm_mode_config_validate(dev); 951 + 952 + WARN_ON(!dev->managed.final_kfree); 953 + 913 954 if (drm_dev_needs_global_mutex(dev)) 914 955 mutex_lock(&drm_global_mutex); 915 956 ··· 1015 1046 */ 1016 1047 int drm_dev_set_unique(struct drm_device *dev, const char *name) 1017 1048 { 1018 - kfree(dev->unique); 1019 - dev->unique = kstrdup(name, GFP_KERNEL); 1049 + drmm_kfree(dev, dev->unique); 1050 + dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL); 1020 1051 1021 1052 return dev->unique ? 0 : -ENOMEM; 1022 1053 }
+46 -57
drivers/gpu/drm/drm_edid.c
··· 1583 1583 MODULE_PARM_DESC(edid_fixup, 1584 1584 "Minimum number of valid EDID header bytes (0-8, default 6)"); 1585 1585 1586 - static void drm_get_displayid(struct drm_connector *connector, 1587 - struct edid *edid); 1588 1586 static int validate_displayid(u8 *displayid, int length, int idx); 1589 1587 1590 1588 static int drm_edid_block_checksum(const u8 *raw_edid) ··· 2016 2018 struct edid *drm_get_edid(struct drm_connector *connector, 2017 2019 struct i2c_adapter *adapter) 2018 2020 { 2019 - struct edid *edid; 2020 - 2021 2021 if (connector->force == DRM_FORCE_OFF) 2022 2022 return NULL; 2023 2023 2024 2024 if (connector->force == DRM_FORCE_UNSPECIFIED && !drm_probe_ddc(adapter)) 2025 2025 return NULL; 2026 2026 2027 - edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter); 2028 - if (edid) 2029 - drm_get_displayid(connector, edid); 2030 - return edid; 2027 + return drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter); 2031 2028 } 2032 2029 EXPORT_SYMBOL(drm_get_edid); 2033 2030 ··· 3205 3212 } 3206 3213 3207 3214 3208 - static u8 *drm_find_displayid_extension(const struct edid *edid) 3215 + static u8 *drm_find_displayid_extension(const struct edid *edid, 3216 + int *length, int *idx) 3209 3217 { 3210 - return drm_find_edid_extension(edid, DISPLAYID_EXT); 3218 + u8 *displayid = drm_find_edid_extension(edid, DISPLAYID_EXT); 3219 + struct displayid_hdr *base; 3220 + int ret; 3221 + 3222 + if (!displayid) 3223 + return NULL; 3224 + 3225 + /* EDID extensions block checksum isn't for us */ 3226 + *length = EDID_LENGTH - 1; 3227 + *idx = 1; 3228 + 3229 + ret = validate_displayid(displayid, *length, *idx); 3230 + if (ret) 3231 + return NULL; 3232 + 3233 + base = (struct displayid_hdr *)&displayid[*idx]; 3234 + *length = *idx + sizeof(*base) + base->bytes; 3235 + 3236 + return displayid; 3211 3237 } 3212 3238 3213 3239 static u8 *drm_find_cea_extension(const struct edid *edid) 3214 3240 { 3215 - int ret; 3216 - int idx = 1; 3217 - int length = EDID_LENGTH; 3241 + int length, idx; 3218 3242 struct displayid_block *block; 3219 3243 u8 *cea; 3220 3244 u8 *displayid; ··· 3242 3232 return cea; 3243 3233 3244 3234 /* CEA blocks can also be found embedded in a DisplayID block */ 3245 - displayid = drm_find_displayid_extension(edid); 3235 + displayid = drm_find_displayid_extension(edid, &length, &idx); 3246 3236 if (!displayid) 3247 - return NULL; 3248 - 3249 - ret = validate_displayid(displayid, length, idx); 3250 - if (ret) 3251 3237 return NULL; 3252 3238 3253 3239 idx += sizeof(struct displayid_hdr); ··· 5090 5084 5091 5085 static int validate_displayid(u8 *displayid, int length, int idx) 5092 5086 { 5093 - int i; 5087 + int i, dispid_length; 5094 5088 u8 csum = 0; 5095 5089 struct displayid_hdr *base; 5096 5090 ··· 5099 5093 DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n", 5100 5094 base->rev, base->bytes, base->prod_id, base->ext_count); 5101 5095 5102 - if (base->bytes + 5 > length - idx) 5096 + /* +1 for DispID checksum */ 5097 + dispid_length = sizeof(*base) + base->bytes + 1; 5098 + if (dispid_length > length - idx) 5103 5099 return -EINVAL; 5104 - for (i = idx; i <= base->bytes + 5; i++) { 5105 - csum += displayid[i]; 5106 - } 5100 + 5101 + for (i = 0; i < dispid_length; i++) 5102 + csum += displayid[idx + i]; 5107 5103 if (csum) { 5108 5104 DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum); 5109 5105 return -EINVAL; 5110 5106 } 5107 + 5111 5108 return 0; 5112 5109 } 5113 5110 ··· 5189 5180 struct edid *edid) 5190 5181 { 5191 5182 u8 *displayid; 5192 - int ret; 5193 - int idx = 1; 5194 - int length = EDID_LENGTH; 5183 + int length, idx; 5195 5184 struct displayid_block *block; 5196 5185 int num_modes = 0; 5197 5186 5198 - displayid = drm_find_displayid_extension(edid); 5187 + displayid = drm_find_displayid_extension(edid, &length, &idx); 5199 5188 if (!displayid) 5200 - return 0; 5201 - 5202 - ret = validate_displayid(displayid, length, idx); 5203 - if (ret) 5204 5189 return 0; 5205 5190 5206 5191 idx += sizeof(struct displayid_hdr); ··· 5785 5782 EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode); 5786 5783 5787 5784 static int drm_parse_tiled_block(struct drm_connector *connector, 5788 - struct displayid_block *block) 5785 + const struct displayid_block *block) 5789 5786 { 5790 - struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block; 5787 + const struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block; 5791 5788 u16 w, h; 5792 5789 u8 tile_v_loc, tile_h_loc; 5793 5790 u8 num_v_tile, num_h_tile; ··· 5838 5835 return 0; 5839 5836 } 5840 5837 5841 - static int drm_parse_display_id(struct drm_connector *connector, 5842 - u8 *displayid, int length, 5843 - bool is_edid_extension) 5838 + static int drm_displayid_parse_tiled(struct drm_connector *connector, 5839 + const u8 *displayid, int length, int idx) 5844 5840 { 5845 - /* if this is an EDID extension the first byte will be 0x70 */ 5846 - int idx = 0; 5847 - struct displayid_block *block; 5841 + const struct displayid_block *block; 5848 5842 int ret; 5849 - 5850 - if (is_edid_extension) 5851 - idx = 1; 5852 - 5853 - ret = validate_displayid(displayid, length, idx); 5854 - if (ret) 5855 - return ret; 5856 5843 5857 5844 idx += sizeof(struct displayid_hdr); 5858 5845 for_each_displayid_db(displayid, block, idx, length) { ··· 5855 5862 if (ret) 5856 5863 return ret; 5857 5864 break; 5858 - case DATA_BLOCK_TYPE_1_DETAILED_TIMING: 5859 - /* handled in mode gathering code. */ 5860 - break; 5861 - case DATA_BLOCK_CTA: 5862 - /* handled in the cea parser code. */ 5863 - break; 5864 5865 default: 5865 5866 DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag); 5866 5867 break; ··· 5863 5876 return 0; 5864 5877 } 5865 5878 5866 - static void drm_get_displayid(struct drm_connector *connector, 5867 - struct edid *edid) 5879 + void drm_update_tile_info(struct drm_connector *connector, 5880 + const struct edid *edid) 5868 5881 { 5869 - void *displayid = NULL; 5882 + const void *displayid = NULL; 5883 + int length, idx; 5870 5884 int ret; 5885 + 5871 5886 connector->has_tile = false; 5872 - displayid = drm_find_displayid_extension(edid); 5887 + displayid = drm_find_displayid_extension(edid, &length, &idx); 5873 5888 if (!displayid) { 5874 5889 /* drop reference to any tile group we had */ 5875 5890 goto out_drop_ref; 5876 5891 } 5877 5892 5878 - ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true); 5893 + ret = drm_displayid_parse_tiled(connector, displayid, length, idx); 5879 5894 if (ret < 0) 5880 5895 goto out_drop_ref; 5881 5896 if (!connector->has_tile)
+20 -11
drivers/gpu/drm/drm_fb_helper.c
··· 514 514 if (ret) 515 515 goto err_release; 516 516 517 + /* 518 + * TODO: We really should be smarter here and alloc an apperture 519 + * for each IORESOURCE_MEM resource helper->dev->dev has and also 520 + * init the ranges of the appertures based on the resources. 521 + * Note some drivers currently count on there being only 1 empty 522 + * aperture and fill this themselves, these will need to be dealt 523 + * with somehow when fixing this. 524 + */ 517 525 info->apertures = alloc_apertures(1); 518 526 if (!info->apertures) { 519 527 ret = -ENOMEM; ··· 2170 2162 * 2171 2163 * This function sets up generic fbdev emulation for drivers that supports 2172 2164 * dumb buffers with a virtual address and that can be mmap'ed. 2165 + * drm_fbdev_generic_setup() shall be called after the DRM driver registered 2166 + * the new DRM device with drm_dev_register(). 2173 2167 * 2174 2168 * Restore, hotplug events and teardown are all taken care of. Drivers that do 2175 2169 * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. ··· 2188 2178 * Setup will be retried on the next hotplug event. 2189 2179 * 2190 2180 * The fbdev is destroyed by drm_dev_unregister(). 2191 - * 2192 - * Returns: 2193 - * Zero on success or negative error code on failure. 2194 2181 */ 2195 - int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) 2182 + void drm_fbdev_generic_setup(struct drm_device *dev, 2183 + unsigned int preferred_bpp) 2196 2184 { 2197 2185 struct drm_fb_helper *fb_helper; 2198 2186 int ret; 2199 2187 2200 - WARN(dev->fb_helper, "fb_helper is already set!\n"); 2188 + drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); 2189 + drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); 2201 2190 2202 2191 if (!drm_fbdev_emulation) 2203 - return 0; 2192 + return; 2204 2193 2205 2194 fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); 2206 - if (!fb_helper) 2207 - return -ENOMEM; 2195 + if (!fb_helper) { 2196 + drm_err(dev, "Failed to allocate fb_helper\n"); 2197 + return; 2198 + } 2208 2199 2209 2200 ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); 2210 2201 if (ret) { 2211 2202 kfree(fb_helper); 2212 2203 drm_err(dev, "Failed to register client: %d\n", ret); 2213 - return ret; 2204 + return; 2214 2205 } 2215 2206 2216 2207 if (!preferred_bpp) ··· 2225 2214 drm_dbg_kms(dev, "client hotplug ret=%d\n", ret); 2226 2215 2227 2216 drm_client_register(&fb_helper->client); 2228 - 2229 - return 0; 2230 2217 } 2231 2218 EXPORT_SYMBOL(drm_fbdev_generic_setup); 2232 2219
+4 -4
drivers/gpu/drm/drm_framebuffer.c
··· 1207 1207 { "framebuffer", drm_framebuffer_info, 0 }, 1208 1208 }; 1209 1209 1210 - int drm_framebuffer_debugfs_init(struct drm_minor *minor) 1210 + void drm_framebuffer_debugfs_init(struct drm_minor *minor) 1211 1211 { 1212 - return drm_debugfs_create_files(drm_framebuffer_debugfs_list, 1213 - ARRAY_SIZE(drm_framebuffer_debugfs_list), 1214 - minor->debugfs_root, minor); 1212 + drm_debugfs_create_files(drm_framebuffer_debugfs_list, 1213 + ARRAY_SIZE(drm_framebuffer_debugfs_list), 1214 + minor->debugfs_root, minor); 1215 1215 } 1216 1216 #endif
+11 -12
drivers/gpu/drm/drm_gem.c
··· 44 44 #include <drm/drm_drv.h> 45 45 #include <drm/drm_file.h> 46 46 #include <drm/drm_gem.h> 47 + #include <drm/drm_managed.h> 47 48 #include <drm/drm_print.h> 48 49 #include <drm/drm_vma_manager.h> 49 50 ··· 78 77 * up at a later date, and as our interface with shmfs for memory allocation. 79 78 */ 80 79 80 + static void 81 + drm_gem_init_release(struct drm_device *dev, void *ptr) 82 + { 83 + drm_vma_offset_manager_destroy(dev->vma_offset_manager); 84 + } 85 + 81 86 /** 82 87 * drm_gem_init - Initialize the GEM device fields 83 88 * @dev: drm_devic structure to initialize ··· 96 89 mutex_init(&dev->object_name_lock); 97 90 idr_init_base(&dev->object_name_idr, 1); 98 91 99 - vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); 92 + vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 93 + GFP_KERNEL); 100 94 if (!vma_offset_manager) { 101 95 DRM_ERROR("out of memory\n"); 102 96 return -ENOMEM; ··· 108 100 DRM_FILE_PAGE_OFFSET_START, 109 101 DRM_FILE_PAGE_OFFSET_SIZE); 110 102 111 - return 0; 112 - } 113 - 114 - void 115 - drm_gem_destroy(struct drm_device *dev) 116 - { 117 - 118 - drm_vma_offset_manager_destroy(dev->vma_offset_manager); 119 - kfree(dev->vma_offset_manager); 120 - dev->vma_offset_manager = NULL; 103 + return drmm_add_action(dev, drm_gem_init_release, NULL); 121 104 } 122 105 123 106 /** ··· 431 432 * drm_gem_handle_create - create a gem handle for an object 432 433 * @file_priv: drm file-private structure to register the handle for 433 434 * @obj: object to register 434 - * @handlep: pionter to return the created handle to the caller 435 + * @handlep: pointer to return the created handle to the caller 435 436 * 436 437 * Create a handle for this object. This adds a handle reference to the object, 437 438 * which includes a regular reference count. Callers will likely want to
+195 -25
drivers/gpu/drm/drm_gem_framebuffer_helper.c
··· 21 21 #include <drm/drm_modeset_helper.h> 22 22 #include <drm/drm_simple_kms_helper.h> 23 23 24 + #define AFBC_HEADER_SIZE 16 25 + #define AFBC_TH_LAYOUT_ALIGNMENT 8 26 + #define AFBC_HDR_ALIGN 64 27 + #define AFBC_SUPERBLOCK_PIXELS 256 28 + #define AFBC_SUPERBLOCK_ALIGNMENT 128 29 + #define AFBC_TH_BODY_START_ALIGNMENT 4096 30 + 24 31 /** 25 32 * DOC: overview 26 33 * ··· 61 54 } 62 55 EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj); 63 56 64 - static struct drm_framebuffer * 65 - drm_gem_fb_alloc(struct drm_device *dev, 57 + static int 58 + drm_gem_fb_init(struct drm_device *dev, 59 + struct drm_framebuffer *fb, 66 60 const struct drm_mode_fb_cmd2 *mode_cmd, 67 61 struct drm_gem_object **obj, unsigned int num_planes, 68 62 const struct drm_framebuffer_funcs *funcs) 69 63 { 70 - struct drm_framebuffer *fb; 71 64 int ret, i; 72 - 73 - fb = kzalloc(sizeof(*fb), GFP_KERNEL); 74 - if (!fb) 75 - return ERR_PTR(-ENOMEM); 76 65 77 66 drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); 78 67 ··· 79 76 if (ret) { 80 77 drm_err(dev, "Failed to init framebuffer: %d\n", ret); 81 78 kfree(fb); 82 - return ERR_PTR(ret); 83 79 } 84 80 85 - return fb; 81 + return ret; 86 82 } 87 83 88 84 /** ··· 125 123 EXPORT_SYMBOL(drm_gem_fb_create_handle); 126 124 127 125 /** 128 - * drm_gem_fb_create_with_funcs() - Helper function for the 129 - * &drm_mode_config_funcs.fb_create 130 - * callback 126 + * drm_gem_fb_init_with_funcs() - Helper function for implementing 127 + * &drm_mode_config_funcs.fb_create 128 + * callback in cases when the driver 129 + * allocates a subclass of 130 + * struct drm_framebuffer 131 131 * @dev: DRM device 132 + * @fb: framebuffer object 132 133 * @file: DRM file that holds the GEM handle(s) backing the framebuffer 133 134 * @mode_cmd: Metadata from the userspace framebuffer creation request 134 135 * @funcs: vtable to be used for the new framebuffer object ··· 139 134 * This function can be used to set &drm_framebuffer_funcs for drivers that need 140 135 * custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to 141 136 * change &drm_framebuffer_funcs. The function does buffer size validation. 137 + * The buffer size validation is for a general case, though, so users should 138 + * pay attention to the checks being appropriate for them or, at least, 139 + * non-conflicting. 142 140 * 143 141 * Returns: 144 - * Pointer to a &drm_framebuffer on success or an error pointer on failure. 142 + * Zero or a negative error code. 145 143 */ 146 - struct drm_framebuffer * 147 - drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, 148 - const struct drm_mode_fb_cmd2 *mode_cmd, 149 - const struct drm_framebuffer_funcs *funcs) 144 + int drm_gem_fb_init_with_funcs(struct drm_device *dev, 145 + struct drm_framebuffer *fb, 146 + struct drm_file *file, 147 + const struct drm_mode_fb_cmd2 *mode_cmd, 148 + const struct drm_framebuffer_funcs *funcs) 150 149 { 151 150 const struct drm_format_info *info; 152 151 struct drm_gem_object *objs[4]; 153 - struct drm_framebuffer *fb; 154 152 int ret, i; 155 153 156 154 info = drm_get_format_info(dev, mode_cmd); 157 155 if (!info) 158 - return ERR_PTR(-EINVAL); 156 + return -EINVAL; 159 157 160 158 for (i = 0; i < info->num_planes; i++) { 161 159 unsigned int width = mode_cmd->width / (i ? info->hsub : 1); ··· 183 175 } 184 176 } 185 177 186 - fb = drm_gem_fb_alloc(dev, mode_cmd, objs, i, funcs); 187 - if (IS_ERR(fb)) { 188 - ret = PTR_ERR(fb); 178 + ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs); 179 + if (ret) 189 180 goto err_gem_object_put; 190 - } 191 181 192 - return fb; 182 + return 0; 193 183 194 184 err_gem_object_put: 195 185 for (i--; i >= 0; i--) 196 186 drm_gem_object_put_unlocked(objs[i]); 197 187 198 - return ERR_PTR(ret); 188 + return ret; 189 + } 190 + EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs); 191 + 192 + /** 193 + * drm_gem_fb_create_with_funcs() - Helper function for the 194 + * &drm_mode_config_funcs.fb_create 195 + * callback 196 + * @dev: DRM device 197 + * @file: DRM file that holds the GEM handle(s) backing the framebuffer 198 + * @mode_cmd: Metadata from the userspace framebuffer creation request 199 + * @funcs: vtable to be used for the new framebuffer object 200 + * 201 + * This function can be used to set &drm_framebuffer_funcs for drivers that need 202 + * custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to 203 + * change &drm_framebuffer_funcs. The function does buffer size validation. 204 + * 205 + * Returns: 206 + * Pointer to a &drm_framebuffer on success or an error pointer on failure. 207 + */ 208 + struct drm_framebuffer * 209 + drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, 210 + const struct drm_mode_fb_cmd2 *mode_cmd, 211 + const struct drm_framebuffer_funcs *funcs) 212 + { 213 + struct drm_framebuffer *fb; 214 + int ret; 215 + 216 + fb = kzalloc(sizeof(*fb), GFP_KERNEL); 217 + if (!fb) 218 + return ERR_PTR(-ENOMEM); 219 + 220 + ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs); 221 + if (ret) { 222 + kfree(fb); 223 + return ERR_PTR(ret); 224 + } 225 + 226 + return fb; 199 227 } 200 228 EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_funcs); 201 229 ··· 308 264 &drm_gem_fb_funcs_dirtyfb); 309 265 } 310 266 EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty); 267 + 268 + static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev, 269 + const struct drm_mode_fb_cmd2 *mode_cmd) 270 + { 271 + const struct drm_format_info *info; 272 + 273 + info = drm_get_format_info(dev, mode_cmd); 274 + 275 + /* use whatever a driver has set */ 276 + if (info->cpp[0]) 277 + return info->cpp[0] * 8; 278 + 279 + /* guess otherwise */ 280 + switch (info->format) { 281 + case DRM_FORMAT_YUV420_8BIT: 282 + return 12; 283 + case DRM_FORMAT_YUV420_10BIT: 284 + return 15; 285 + case DRM_FORMAT_VUY101010: 286 + return 30; 287 + default: 288 + break; 289 + } 290 + 291 + /* all attempts failed */ 292 + return 0; 293 + } 294 + 295 + static int drm_gem_afbc_min_size(struct drm_device *dev, 296 + const struct drm_mode_fb_cmd2 *mode_cmd, 297 + struct drm_afbc_framebuffer *afbc_fb) 298 + { 299 + __u32 n_blocks, w_alignment, h_alignment, hdr_alignment; 300 + /* remove bpp when all users properly encode cpp in drm_format_info */ 301 + __u32 bpp; 302 + 303 + switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) { 304 + case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16: 305 + afbc_fb->block_width = 16; 306 + afbc_fb->block_height = 16; 307 + break; 308 + case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8: 309 + afbc_fb->block_width = 32; 310 + afbc_fb->block_height = 8; 311 + break; 312 + /* no user exists yet - fall through */ 313 + case AFBC_FORMAT_MOD_BLOCK_SIZE_64x4: 314 + case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4: 315 + default: 316 + drm_dbg_kms(dev, "Invalid AFBC_FORMAT_MOD_BLOCK_SIZE: %lld.\n", 317 + mode_cmd->modifier[0] 318 + & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK); 319 + return -EINVAL; 320 + } 321 + 322 + /* tiled header afbc */ 323 + w_alignment = afbc_fb->block_width; 324 + h_alignment = afbc_fb->block_height; 325 + hdr_alignment = AFBC_HDR_ALIGN; 326 + if (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_TILED) { 327 + w_alignment *= AFBC_TH_LAYOUT_ALIGNMENT; 328 + h_alignment *= AFBC_TH_LAYOUT_ALIGNMENT; 329 + hdr_alignment = AFBC_TH_BODY_START_ALIGNMENT; 330 + } 331 + 332 + afbc_fb->aligned_width = ALIGN(mode_cmd->width, w_alignment); 333 + afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment); 334 + afbc_fb->offset = mode_cmd->offsets[0]; 335 + 336 + bpp = drm_gem_afbc_get_bpp(dev, mode_cmd); 337 + if (!bpp) { 338 + drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp); 339 + return -EINVAL; 340 + } 341 + 342 + n_blocks = (afbc_fb->aligned_width * afbc_fb->aligned_height) 343 + / AFBC_SUPERBLOCK_PIXELS; 344 + afbc_fb->afbc_size = ALIGN(n_blocks * AFBC_HEADER_SIZE, hdr_alignment); 345 + afbc_fb->afbc_size += n_blocks * ALIGN(bpp * AFBC_SUPERBLOCK_PIXELS / 8, 346 + AFBC_SUPERBLOCK_ALIGNMENT); 347 + 348 + return 0; 349 + } 350 + 351 + /** 352 + * drm_gem_fb_afbc_init() - Helper function for drivers using afbc to 353 + * fill and validate all the afbc-specific 354 + * struct drm_afbc_framebuffer members 355 + * 356 + * @dev: DRM device 357 + * @afbc_fb: afbc-specific framebuffer 358 + * @mode_cmd: Metadata from the userspace framebuffer creation request 359 + * @afbc_fb: afbc framebuffer 360 + * 361 + * This function can be used by drivers which support afbc to complete 362 + * the preparation of struct drm_afbc_framebuffer. It must be called after 363 + * allocating the said struct and calling drm_gem_fb_init_with_funcs(). 364 + * It is caller's responsibility to put afbc_fb->base.obj objects in case 365 + * the call is unsuccessful. 366 + * 367 + * Returns: 368 + * Zero on success or a negative error value on failure. 369 + */ 370 + int drm_gem_fb_afbc_init(struct drm_device *dev, 371 + const struct drm_mode_fb_cmd2 *mode_cmd, 372 + struct drm_afbc_framebuffer *afbc_fb) 373 + { 374 + const struct drm_format_info *info; 375 + struct drm_gem_object **objs; 376 + int ret; 377 + 378 + objs = afbc_fb->base.obj; 379 + info = drm_get_format_info(dev, mode_cmd); 380 + if (!info) 381 + return -EINVAL; 382 + 383 + ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb); 384 + if (ret < 0) 385 + return ret; 386 + 387 + if (objs[0]->size < afbc_fb->afbc_size) 388 + return -EINVAL; 389 + 390 + return 0; 391 + } 392 + EXPORT_SYMBOL_GPL(drm_gem_fb_afbc_init); 311 393 312 394 /** 313 395 * drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
+102 -22
drivers/gpu/drm/drm_gem_vram_helper.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 3 + #include <linux/module.h> 4 + 3 5 #include <drm/drm_debugfs.h> 4 6 #include <drm/drm_device.h> 5 7 #include <drm/drm_drv.h> 6 8 #include <drm/drm_file.h> 7 9 #include <drm/drm_framebuffer.h> 10 + #include <drm/drm_gem_framebuffer_helper.h> 8 11 #include <drm/drm_gem_ttm_helper.h> 9 12 #include <drm/drm_gem_vram_helper.h> 10 13 #include <drm/drm_mode.h> ··· 21 18 /** 22 19 * DOC: overview 23 20 * 24 - * This library provides a GEM buffer object that is backed by video RAM 25 - * (VRAM). It can be used for framebuffer devices with dedicated memory. 21 + * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM 22 + * buffer object that is backed by video RAM (VRAM). It can be used for 23 + * framebuffer devices with dedicated memory. 26 24 * 27 25 * The data structure &struct drm_vram_mm and its helpers implement a memory 28 - * manager for simple framebuffer devices with dedicated video memory. Buffer 29 - * objects are either placed in video RAM or evicted to system memory. The rsp. 30 - * buffer object is provided by &struct drm_gem_vram_object. 26 + * manager for simple framebuffer devices with dedicated video memory. GEM 27 + * VRAM buffer objects are either placed in the video memory or remain evicted 28 + * to system memory. 29 + * 30 + * With the GEM interface userspace applications create, manage and destroy 31 + * graphics buffers, such as an on-screen framebuffer. GEM does not provide 32 + * an implementation of these interfaces. It's up to the DRM driver to 33 + * provide an implementation that suits the hardware. If the hardware device 34 + * contains dedicated video memory, the DRM driver can use the VRAM helper 35 + * library. Each active buffer object is stored in video RAM. Active 36 + * buffer are used for drawing the current frame, typically something like 37 + * the frame's scanout buffer or the cursor image. If there's no more space 38 + * left in VRAM, inactive GEM objects can be moved to system memory. 39 + * 40 + * The easiest way to use the VRAM helper library is to call 41 + * drm_vram_helper_alloc_mm(). The function allocates and initializes an 42 + * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use 43 + * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and 44 + * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations; 45 + * as illustrated below. 46 + * 47 + * .. code-block:: c 48 + * 49 + * struct file_operations fops ={ 50 + * .owner = THIS_MODULE, 51 + * DRM_VRAM_MM_FILE_OPERATION 52 + * }; 53 + * struct drm_driver drv = { 54 + * .driver_feature = DRM_ ... , 55 + * .fops = &fops, 56 + * DRM_GEM_VRAM_DRIVER 57 + * }; 58 + * 59 + * int init_drm_driver() 60 + * { 61 + * struct drm_device *dev; 62 + * uint64_t vram_base; 63 + * unsigned long vram_size; 64 + * int ret; 65 + * 66 + * // setup device, vram base and size 67 + * // ... 68 + * 69 + * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size); 70 + * if (ret) 71 + * return ret; 72 + * return 0; 73 + * } 74 + * 75 + * This creates an instance of &struct drm_vram_mm, exports DRM userspace 76 + * interfaces for GEM buffer management and initializes file operations to 77 + * allow for accessing created GEM buffers. With this setup, the DRM driver 78 + * manages an area of video RAM with VRAM MM and provides GEM VRAM objects 79 + * to userspace. 80 + * 81 + * To clean up the VRAM memory management, call drm_vram_helper_release_mm() 82 + * in the driver's clean-up code. 83 + * 84 + * .. code-block:: c 85 + * 86 + * void fini_drm_driver() 87 + * { 88 + * struct drm_device *dev = ...; 89 + * 90 + * drm_vram_helper_release_mm(dev); 91 + * } 92 + * 93 + * For drawing or scanout operations, buffer object have to be pinned in video 94 + * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or 95 + * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system 96 + * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards. 97 + * 98 + * A buffer object that is pinned in video RAM has a fixed address within that 99 + * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically 100 + * it's used to program the hardware's scanout engine for framebuffers, set 101 + * the cursor overlay's image for a mouse cursor, or use it as input to the 102 + * hardware's draing engine. 103 + * 104 + * To access a buffer object's memory from the DRM driver, call 105 + * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address 106 + * space and returns the memory address. Use drm_gem_vram_kunmap() to 107 + * release the mapping. 31 108 */ 32 109 33 110 /* ··· 753 670 * @plane: a DRM plane 754 671 * @new_state: the plane's new state 755 672 * 756 - * During plane updates, this function pins the GEM VRAM 757 - * objects of the plane's new framebuffer to VRAM. Call 758 - * drm_gem_vram_plane_helper_cleanup_fb() to unpin them. 673 + * During plane updates, this function sets the plane's fence and 674 + * pins the GEM VRAM objects of the plane's new framebuffer to VRAM. 675 + * Call drm_gem_vram_plane_helper_cleanup_fb() to unpin them. 759 676 * 760 677 * Returns: 761 678 * 0 on success, or ··· 780 697 if (ret) 781 698 goto err_drm_gem_vram_unpin; 782 699 } 700 + 701 + ret = drm_gem_fb_prepare_fb(plane, new_state); 702 + if (ret) 703 + goto err_drm_gem_vram_unpin; 783 704 784 705 return 0; 785 706 ··· 1105 1018 * struct drm_vram_mm 1106 1019 */ 1107 1020 1108 - #if defined(CONFIG_DEBUG_FS) 1109 1021 static int drm_vram_mm_debugfs(struct seq_file *m, void *data) 1110 1022 { 1111 1023 struct drm_info_node *node = (struct drm_info_node *) m->private; ··· 1121 1035 static const struct drm_info_list drm_vram_mm_debugfs_list[] = { 1122 1036 { "vram-mm", drm_vram_mm_debugfs, 0, NULL }, 1123 1037 }; 1124 - #endif 1125 1038 1126 1039 /** 1127 1040 * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file. 1128 1041 * 1129 1042 * @minor: drm minor device. 1130 1043 * 1131 - * Returns: 1132 - * 0 on success, or 1133 - * a negative error code otherwise. 1134 1044 */ 1135 - int drm_vram_mm_debugfs_init(struct drm_minor *minor) 1045 + void drm_vram_mm_debugfs_init(struct drm_minor *minor) 1136 1046 { 1137 - int ret = 0; 1138 - 1139 - #if defined(CONFIG_DEBUG_FS) 1140 - ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list, 1141 - ARRAY_SIZE(drm_vram_mm_debugfs_list), 1142 - minor->debugfs_root, minor); 1143 - #endif 1144 - return ret; 1047 + drm_debugfs_create_files(drm_vram_mm_debugfs_list, 1048 + ARRAY_SIZE(drm_vram_mm_debugfs_list), 1049 + minor->debugfs_root, minor); 1145 1050 } 1146 1051 EXPORT_SYMBOL(drm_vram_mm_debugfs_init); 1147 1052 ··· 1279 1202 return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp); 1280 1203 } 1281 1204 EXPORT_SYMBOL(drm_vram_helper_mode_valid); 1205 + 1206 + MODULE_DESCRIPTION("DRM VRAM memory-management helpers"); 1207 + MODULE_LICENSE("GPL");
+4 -3
drivers/gpu/drm/drm_internal.h
··· 89 89 struct drm_minor *drm_minor_acquire(unsigned int minor_id); 90 90 void drm_minor_release(struct drm_minor *minor); 91 91 92 + /* drm_managed.c */ 93 + void drm_managed_release(struct drm_device *dev); 94 + 92 95 /* drm_vblank.c */ 93 96 void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe); 94 - void drm_vblank_cleanup(struct drm_device *dev); 95 97 96 98 /* IOCTLS */ 97 99 int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, ··· 143 141 /* drm_gem.c */ 144 142 struct drm_gem_object; 145 143 int drm_gem_init(struct drm_device *dev); 146 - void drm_gem_destroy(struct drm_device *dev); 147 144 int drm_gem_handle_create_tail(struct drm_file *file_priv, 148 145 struct drm_gem_object *obj, 149 146 u32 *handlep); ··· 236 235 /* drm_framebuffer.c */ 237 236 void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent, 238 237 const struct drm_framebuffer *fb); 239 - int drm_framebuffer_debugfs_init(struct drm_minor *minor); 238 + void drm_framebuffer_debugfs_init(struct drm_minor *minor);
+2 -2
drivers/gpu/drm/drm_ioctl.c
··· 599 599 DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 600 600 DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), 601 601 602 - DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), 603 - DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), 602 + DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0), 603 + DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0), 604 604 605 605 DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), 606 606 DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+275
drivers/gpu/drm/drm_managed.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2020 Intel 4 + * 5 + * Based on drivers/base/devres.c 6 + */ 7 + 8 + #include <drm/drm_managed.h> 9 + 10 + #include <linux/list.h> 11 + #include <linux/slab.h> 12 + #include <linux/spinlock.h> 13 + 14 + #include <drm/drm_device.h> 15 + #include <drm/drm_print.h> 16 + 17 + /** 18 + * DOC: managed resources 19 + * 20 + * Inspired by struct &device managed resources, but tied to the lifetime of 21 + * struct &drm_device, which can outlive the underlying physical device, usually 22 + * when userspace has some open files and other handles to resources still open. 23 + * 24 + * Release actions can be added with drmm_add_action(), memory allocations can 25 + * be done directly with drmm_kmalloc() and the related functions. Everything 26 + * will be released on the final drm_dev_put() in reverse order of how the 27 + * release actions have been added and memory has been allocated since driver 28 + * loading started with drm_dev_init(). 29 + * 30 + * Note that release actions and managed memory can also be added and removed 31 + * during the lifetime of the driver, all the functions are fully concurrent 32 + * safe. But it is recommended to use managed resources only for resources that 33 + * change rarely, if ever, during the lifetime of the &drm_device instance. 34 + */ 35 + 36 + struct drmres_node { 37 + struct list_head entry; 38 + drmres_release_t release; 39 + const char *name; 40 + size_t size; 41 + }; 42 + 43 + struct drmres { 44 + struct drmres_node node; 45 + /* 46 + * Some archs want to perform DMA into kmalloc caches 47 + * and need a guaranteed alignment larger than 48 + * the alignment of a 64-bit integer. 49 + * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same 50 + * buffer alignment as if it was allocated by plain kmalloc(). 51 + */ 52 + u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; 53 + }; 54 + 55 + static void free_dr(struct drmres *dr) 56 + { 57 + kfree_const(dr->node.name); 58 + kfree(dr); 59 + } 60 + 61 + void drm_managed_release(struct drm_device *dev) 62 + { 63 + struct drmres *dr, *tmp; 64 + 65 + drm_dbg_drmres(dev, "drmres release begin\n"); 66 + list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) { 67 + drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n", 68 + dr, dr->node.name, dr->node.size); 69 + 70 + if (dr->node.release) 71 + dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL); 72 + 73 + list_del(&dr->node.entry); 74 + free_dr(dr); 75 + } 76 + drm_dbg_drmres(dev, "drmres release end\n"); 77 + } 78 + 79 + /* 80 + * Always inline so that kmalloc_track_caller tracks the actual interesting 81 + * caller outside of drm_managed.c. 82 + */ 83 + static __always_inline struct drmres * alloc_dr(drmres_release_t release, 84 + size_t size, gfp_t gfp, int nid) 85 + { 86 + size_t tot_size; 87 + struct drmres *dr; 88 + 89 + /* We must catch any near-SIZE_MAX cases that could overflow. */ 90 + if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size))) 91 + return NULL; 92 + 93 + dr = kmalloc_node_track_caller(tot_size, gfp, nid); 94 + if (unlikely(!dr)) 95 + return NULL; 96 + 97 + memset(dr, 0, offsetof(struct drmres, data)); 98 + 99 + INIT_LIST_HEAD(&dr->node.entry); 100 + dr->node.release = release; 101 + dr->node.size = size; 102 + 103 + return dr; 104 + } 105 + 106 + static void del_dr(struct drm_device *dev, struct drmres *dr) 107 + { 108 + list_del_init(&dr->node.entry); 109 + 110 + drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n", 111 + dr, dr->node.name, (unsigned long) dr->node.size); 112 + } 113 + 114 + static void add_dr(struct drm_device *dev, struct drmres *dr) 115 + { 116 + unsigned long flags; 117 + 118 + spin_lock_irqsave(&dev->managed.lock, flags); 119 + list_add(&dr->node.entry, &dev->managed.resources); 120 + spin_unlock_irqrestore(&dev->managed.lock, flags); 121 + 122 + drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n", 123 + dr, dr->node.name, (unsigned long) dr->node.size); 124 + } 125 + 126 + /** 127 + * drmm_add_final_kfree - add release action for the final kfree() 128 + * @dev: DRM device 129 + * @container: pointer to the kmalloc allocation containing @dev 130 + * 131 + * Since the allocation containing the struct &drm_device must be allocated 132 + * before it can be initialized with drm_dev_init() there's no way to allocate 133 + * that memory with drmm_kmalloc(). To side-step this chicken-egg problem the 134 + * pointer for this final kfree() must be specified by calling this function. It 135 + * will be released in the final drm_dev_put() for @dev, after all other release 136 + * actions installed through drmm_add_action() have been processed. 137 + */ 138 + void drmm_add_final_kfree(struct drm_device *dev, void *container) 139 + { 140 + WARN_ON(dev->managed.final_kfree); 141 + WARN_ON(dev < (struct drm_device *) container); 142 + WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container))); 143 + dev->managed.final_kfree = container; 144 + } 145 + EXPORT_SYMBOL(drmm_add_final_kfree); 146 + 147 + int __drmm_add_action(struct drm_device *dev, 148 + drmres_release_t action, 149 + void *data, const char *name) 150 + { 151 + struct drmres *dr; 152 + void **void_ptr; 153 + 154 + dr = alloc_dr(action, data ? sizeof(void*) : 0, 155 + GFP_KERNEL | __GFP_ZERO, 156 + dev_to_node(dev->dev)); 157 + if (!dr) { 158 + drm_dbg_drmres(dev, "failed to add action %s for %p\n", 159 + name, data); 160 + return -ENOMEM; 161 + } 162 + 163 + dr->node.name = kstrdup_const(name, GFP_KERNEL); 164 + if (data) { 165 + void_ptr = (void **)&dr->data; 166 + *void_ptr = data; 167 + } 168 + 169 + add_dr(dev, dr); 170 + 171 + return 0; 172 + } 173 + EXPORT_SYMBOL(__drmm_add_action); 174 + 175 + int __drmm_add_action_or_reset(struct drm_device *dev, 176 + drmres_release_t action, 177 + void *data, const char *name) 178 + { 179 + int ret; 180 + 181 + ret = __drmm_add_action(dev, action, data, name); 182 + if (ret) 183 + action(dev, data); 184 + 185 + return ret; 186 + } 187 + EXPORT_SYMBOL(__drmm_add_action_or_reset); 188 + 189 + /** 190 + * drmm_kmalloc - &drm_device managed kmalloc() 191 + * @dev: DRM device 192 + * @size: size of the memory allocation 193 + * @gfp: GFP allocation flags 194 + * 195 + * This is a &drm_device managed version of kmalloc(). The allocated memory is 196 + * automatically freed on the final drm_dev_put(). Memory can also be freed 197 + * before the final drm_dev_put() by calling drmm_kfree(). 198 + */ 199 + void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) 200 + { 201 + struct drmres *dr; 202 + 203 + dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev)); 204 + if (!dr) { 205 + drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n", 206 + size, gfp); 207 + return NULL; 208 + } 209 + dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL); 210 + 211 + add_dr(dev, dr); 212 + 213 + return dr->data; 214 + } 215 + EXPORT_SYMBOL(drmm_kmalloc); 216 + 217 + /** 218 + * drmm_kstrdup - &drm_device managed kstrdup() 219 + * @dev: DRM device 220 + * @s: 0-terminated string to be duplicated 221 + * @gfp: GFP allocation flags 222 + * 223 + * This is a &drm_device managed version of kstrdup(). The allocated memory is 224 + * automatically freed on the final drm_dev_put() and works exactly like a 225 + * memory allocation obtained by drmm_kmalloc(). 226 + */ 227 + char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp) 228 + { 229 + size_t size; 230 + char *buf; 231 + 232 + if (!s) 233 + return NULL; 234 + 235 + size = strlen(s) + 1; 236 + buf = drmm_kmalloc(dev, size, gfp); 237 + if (buf) 238 + memcpy(buf, s, size); 239 + return buf; 240 + } 241 + EXPORT_SYMBOL_GPL(drmm_kstrdup); 242 + 243 + /** 244 + * drmm_kfree - &drm_device managed kfree() 245 + * @dev: DRM device 246 + * @data: memory allocation to be freed 247 + * 248 + * This is a &drm_device managed version of kfree() which can be used to 249 + * release memory allocated through drmm_kmalloc() or any of its related 250 + * functions before the final drm_dev_put() of @dev. 251 + */ 252 + void drmm_kfree(struct drm_device *dev, void *data) 253 + { 254 + struct drmres *dr_match = NULL, *dr; 255 + unsigned long flags; 256 + 257 + if (!data) 258 + return; 259 + 260 + spin_lock_irqsave(&dev->managed.lock, flags); 261 + list_for_each_entry(dr, &dev->managed.resources, node.entry) { 262 + if (dr->data == data) { 263 + dr_match = dr; 264 + del_dr(dev, dr_match); 265 + break; 266 + } 267 + } 268 + spin_unlock_irqrestore(&dev->managed.lock, flags); 269 + 270 + if (WARN_ON(!dr_match)) 271 + return; 272 + 273 + free_dr(dr_match); 274 + } 275 + EXPORT_SYMBOL(drmm_kfree);
+7 -26
drivers/gpu/drm/drm_mipi_dbi.c
··· 169 169 EXPORT_SYMBOL(mipi_dbi_command_buf); 170 170 171 171 /* This should only be used by mipi_dbi_command() */ 172 - int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len) 172 + int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data, 173 + size_t len) 173 174 { 174 175 u8 *buf; 175 176 int ret; ··· 511 510 if (!dbidev->dbi.command) 512 511 return -EINVAL; 513 512 513 + ret = drmm_mode_config_init(drm); 514 + if (ret) 515 + return ret; 516 + 514 517 dbidev->tx_buf = devm_kmalloc(drm->dev, tx_buf_size, GFP_KERNEL); 515 518 if (!dbidev->tx_buf) 516 519 return -ENOMEM; ··· 582 577 rotation, bufsize); 583 578 } 584 579 EXPORT_SYMBOL(mipi_dbi_dev_init); 585 - 586 - /** 587 - * mipi_dbi_release - DRM driver release helper 588 - * @drm: DRM device 589 - * 590 - * This function finalizes and frees &mipi_dbi. 591 - * 592 - * Drivers can use this as their &drm_driver->release callback. 593 - */ 594 - void mipi_dbi_release(struct drm_device *drm) 595 - { 596 - struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(drm); 597 - 598 - DRM_DEBUG_DRIVER("\n"); 599 - 600 - drm_mode_config_cleanup(drm); 601 - drm_dev_fini(drm); 602 - kfree(dbidev); 603 - } 604 - EXPORT_SYMBOL(mipi_dbi_release); 605 580 606 581 /** 607 582 * mipi_dbi_hw_reset - Hardware reset of controller ··· 1293 1308 * controller or getting the read command values. 1294 1309 * Drivers can use this as their &drm_driver->debugfs_init callback. 1295 1310 * 1296 - * Returns: 1297 - * Zero on success, negative error code on failure. 1298 1311 */ 1299 - int mipi_dbi_debugfs_init(struct drm_minor *minor) 1312 + void mipi_dbi_debugfs_init(struct drm_minor *minor) 1300 1313 { 1301 1314 struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(minor->dev); 1302 1315 umode_t mode = S_IFREG | S_IWUSR; ··· 1303 1320 mode |= S_IRUGO; 1304 1321 debugfs_create_file("command", mode, minor->debugfs_root, dbidev, 1305 1322 &mipi_dbi_debugfs_command_fops); 1306 - 1307 - return 0; 1308 1323 } 1309 1324 EXPORT_SYMBOL(mipi_dbi_debugfs_init); 1310 1325
+107 -3
drivers/gpu/drm/drm_mode_config.c
··· 25 25 #include <drm/drm_drv.h> 26 26 #include <drm/drm_encoder.h> 27 27 #include <drm/drm_file.h> 28 + #include <drm/drm_managed.h> 28 29 #include <drm/drm_mode_config.h> 29 30 #include <drm/drm_print.h> 30 31 #include <linux/dma-resv.h> ··· 374 373 return 0; 375 374 } 376 375 376 + static void drm_mode_config_init_release(struct drm_device *dev, void *ptr) 377 + { 378 + drm_mode_config_cleanup(dev); 379 + } 380 + 377 381 /** 378 - * drm_mode_config_init - initialize DRM mode_configuration structure 382 + * drmm_mode_config_init - managed DRM mode_configuration structure 383 + * initialization 379 384 * @dev: DRM device 380 385 * 381 386 * Initialize @dev's mode_config structure, used for tracking the graphics ··· 391 384 * problem, since this should happen single threaded at init time. It is the 392 385 * driver's problem to ensure this guarantee. 393 386 * 387 + * Cleanup is automatically handled through registering drm_mode_config_cleanup 388 + * with drmm_add_action(). 389 + * 390 + * Returns: 0 on success, negative error value on failure. 394 391 */ 395 - void drm_mode_config_init(struct drm_device *dev) 392 + int drmm_mode_config_init(struct drm_device *dev) 396 393 { 397 394 mutex_init(&dev->mode_config.mutex); 398 395 drm_modeset_lock_init(&dev->mode_config.connection_mutex); ··· 454 443 drm_modeset_acquire_fini(&modeset_ctx); 455 444 dma_resv_fini(&resv); 456 445 } 446 + 447 + return drmm_add_action_or_reset(dev, drm_mode_config_init_release, 448 + NULL); 457 449 } 458 - EXPORT_SYMBOL(drm_mode_config_init); 450 + EXPORT_SYMBOL(drmm_mode_config_init); 459 451 460 452 /** 461 453 * drm_mode_config_cleanup - free up DRM mode_config info ··· 470 456 * Note that since this /should/ happen single-threaded at driver/device 471 457 * teardown time, no locking is required. It's the driver's job to ensure that 472 458 * this guarantee actually holds true. 459 + * 460 + * FIXME: With the managed drmm_mode_config_init() it is no longer necessary for 461 + * drivers to explicitly call this function. 473 462 */ 474 463 void drm_mode_config_cleanup(struct drm_device *dev) 475 464 { ··· 549 532 drm_modeset_lock_fini(&dev->mode_config.connection_mutex); 550 533 } 551 534 EXPORT_SYMBOL(drm_mode_config_cleanup); 535 + 536 + static u32 full_encoder_mask(struct drm_device *dev) 537 + { 538 + struct drm_encoder *encoder; 539 + u32 encoder_mask = 0; 540 + 541 + drm_for_each_encoder(encoder, dev) 542 + encoder_mask |= drm_encoder_mask(encoder); 543 + 544 + return encoder_mask; 545 + } 546 + 547 + /* 548 + * For some reason we want the encoder itself included in 549 + * possible_clones. Make life easy for drivers by allowing them 550 + * to leave possible_clones unset if no cloning is possible. 551 + */ 552 + static void fixup_encoder_possible_clones(struct drm_encoder *encoder) 553 + { 554 + if (encoder->possible_clones == 0) 555 + encoder->possible_clones = drm_encoder_mask(encoder); 556 + } 557 + 558 + static void validate_encoder_possible_clones(struct drm_encoder *encoder) 559 + { 560 + struct drm_device *dev = encoder->dev; 561 + u32 encoder_mask = full_encoder_mask(dev); 562 + struct drm_encoder *other; 563 + 564 + drm_for_each_encoder(other, dev) { 565 + WARN(!!(encoder->possible_clones & drm_encoder_mask(other)) != 566 + !!(other->possible_clones & drm_encoder_mask(encoder)), 567 + "possible_clones mismatch: " 568 + "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x vs. " 569 + "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x\n", 570 + encoder->base.id, encoder->name, 571 + drm_encoder_mask(encoder), encoder->possible_clones, 572 + other->base.id, other->name, 573 + drm_encoder_mask(other), other->possible_clones); 574 + } 575 + 576 + WARN((encoder->possible_clones & drm_encoder_mask(encoder)) == 0 || 577 + (encoder->possible_clones & ~encoder_mask) != 0, 578 + "Bogus possible_clones: " 579 + "[ENCODER:%d:%s] possible_clones=0x%x (full encoder mask=0x%x)\n", 580 + encoder->base.id, encoder->name, 581 + encoder->possible_clones, encoder_mask); 582 + } 583 + 584 + static u32 full_crtc_mask(struct drm_device *dev) 585 + { 586 + struct drm_crtc *crtc; 587 + u32 crtc_mask = 0; 588 + 589 + drm_for_each_crtc(crtc, dev) 590 + crtc_mask |= drm_crtc_mask(crtc); 591 + 592 + return crtc_mask; 593 + } 594 + 595 + static void validate_encoder_possible_crtcs(struct drm_encoder *encoder) 596 + { 597 + u32 crtc_mask = full_crtc_mask(encoder->dev); 598 + 599 + WARN((encoder->possible_crtcs & crtc_mask) == 0 || 600 + (encoder->possible_crtcs & ~crtc_mask) != 0, 601 + "Bogus possible_crtcs: " 602 + "[ENCODER:%d:%s] possible_crtcs=0x%x (full crtc mask=0x%x)\n", 603 + encoder->base.id, encoder->name, 604 + encoder->possible_crtcs, crtc_mask); 605 + } 606 + 607 + void drm_mode_config_validate(struct drm_device *dev) 608 + { 609 + struct drm_encoder *encoder; 610 + 611 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 612 + return; 613 + 614 + drm_for_each_encoder(encoder, dev) 615 + fixup_encoder_possible_clones(encoder); 616 + 617 + drm_for_each_encoder(encoder, dev) { 618 + validate_encoder_possible_clones(encoder); 619 + validate_encoder_possible_crtcs(encoder); 620 + } 621 + }
+3 -1
drivers/gpu/drm/drm_pci.c
··· 30 30 #include <drm/drm.h> 31 31 #include <drm/drm_agpsupport.h> 32 32 #include <drm/drm_drv.h> 33 - #include <drm/drm_pci.h> 34 33 #include <drm/drm_print.h> 35 34 36 35 #include "drm_internal.h" 37 36 #include "drm_legacy.h" 37 + 38 + #ifdef CONFIG_DRM_LEGACY 38 39 39 40 /** 40 41 * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA. ··· 94 93 } 95 94 96 95 EXPORT_SYMBOL(drm_pci_free); 96 + #endif 97 97 98 98 static int drm_get_pci_domain(struct drm_device *dev) 99 99 {
+75 -19
drivers/gpu/drm/drm_vblank.c
··· 30 30 #include <drm/drm_crtc.h> 31 31 #include <drm/drm_drv.h> 32 32 #include <drm/drm_framebuffer.h> 33 + #include <drm/drm_managed.h> 33 34 #include <drm/drm_modeset_helper_vtables.h> 34 35 #include <drm/drm_print.h> 35 36 #include <drm/drm_vblank.h> ··· 40 39 41 40 /** 42 41 * DOC: vblank handling 42 + * 43 + * From the computer's perspective, every time the monitor displays 44 + * a new frame the scanout engine has "scanned out" the display image 45 + * from top to bottom, one row of pixels at a time. The current row 46 + * of pixels is referred to as the current scanline. 47 + * 48 + * In addition to the display's visible area, there's usually a couple of 49 + * extra scanlines which aren't actually displayed on the screen. 50 + * These extra scanlines don't contain image data and are occasionally used 51 + * for features like audio and infoframes. The region made up of these 52 + * scanlines is referred to as the vertical blanking region, or vblank for 53 + * short. 54 + * 55 + * For historical reference, the vertical blanking period was designed to 56 + * give the electron gun (on CRTs) enough time to move back to the top of 57 + * the screen to start scanning out the next frame. Similar for horizontal 58 + * blanking periods. They were designed to give the electron gun enough 59 + * time to move back to the other side of the screen to start scanning the 60 + * next scanline. 61 + * 62 + * :: 63 + * 64 + * 65 + * physical → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽ 66 + * top of | | 67 + * display | | 68 + * | New frame | 69 + * | | 70 + * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| 71 + * |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| ← Scanline, 72 + * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| updates the 73 + * | | frame as it 74 + * | | travels down 75 + * | | ("sacn out") 76 + * | Old frame | 77 + * | | 78 + * | | 79 + * | | 80 + * | | physical 81 + * | | bottom of 82 + * vertical |⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽| ← display 83 + * blanking ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆ 84 + * region → ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆ 85 + * ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆ 86 + * start of → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽ 87 + * new frame 88 + * 89 + * "Physical top of display" is the reference point for the high-precision/ 90 + * corrected timestamp. 91 + * 92 + * On a lot of display hardware, programming needs to take effect during the 93 + * vertical blanking period so that settings like gamma, the image buffer 94 + * buffer to be scanned out, etc. can safely be changed without showing 95 + * any visual artifacts on the screen. In some unforgiving hardware, some of 96 + * this programming has to both start and end in the same vblank. To help 97 + * with the timing of the hardware programming, an interrupt is usually 98 + * available to notify the driver when it can start the updating of registers. 99 + * The interrupt is in this context named the vblank interrupt. 100 + * 101 + * The vblank interrupt may be fired at different points depending on the 102 + * hardware. Some hardware implementations will fire the interrupt when the 103 + * new frame start, other implementations will fire the interrupt at different 104 + * points in time. 43 105 * 44 106 * Vertical blanking plays a major role in graphics rendering. To achieve 45 107 * tear-free display, users must synchronize page flips and/or rendering to ··· 489 425 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 490 426 } 491 427 492 - void drm_vblank_cleanup(struct drm_device *dev) 428 + static void drm_vblank_init_release(struct drm_device *dev, void *ptr) 493 429 { 494 430 unsigned int pipe; 495 - 496 - /* Bail if the driver didn't call drm_vblank_init() */ 497 - if (dev->num_crtcs == 0) 498 - return; 499 431 500 432 for (pipe = 0; pipe < dev->num_crtcs; pipe++) { 501 433 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; ··· 501 441 502 442 del_timer_sync(&vblank->disable_timer); 503 443 } 504 - 505 - kfree(dev->vblank); 506 - 507 - dev->num_crtcs = 0; 508 444 } 509 445 510 446 /** ··· 509 453 * @num_crtcs: number of CRTCs supported by @dev 510 454 * 511 455 * This function initializes vblank support for @num_crtcs display pipelines. 512 - * Cleanup is handled by the DRM core, or through calling drm_dev_fini() for 513 - * drivers with a &drm_driver.release callback. 456 + * Cleanup is handled automatically through a cleanup function added with 457 + * drmm_add_action(). 514 458 * 515 459 * Returns: 516 460 * Zero on success or a negative error code on failure. 517 461 */ 518 462 int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs) 519 463 { 520 - int ret = -ENOMEM; 464 + int ret; 521 465 unsigned int i; 522 466 523 467 spin_lock_init(&dev->vbl_lock); 524 468 spin_lock_init(&dev->vblank_time_lock); 525 469 470 + dev->vblank = drmm_kcalloc(dev, num_crtcs, sizeof(*dev->vblank), GFP_KERNEL); 471 + if (!dev->vblank) 472 + return -ENOMEM; 473 + 526 474 dev->num_crtcs = num_crtcs; 527 475 528 - dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL); 529 - if (!dev->vblank) 530 - goto err; 476 + ret = drmm_add_action(dev, drm_vblank_init_release, NULL); 477 + if (ret) 478 + return ret; 531 479 532 480 for (i = 0; i < num_crtcs; i++) { 533 481 struct drm_vblank_crtc *vblank = &dev->vblank[i]; ··· 546 486 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n"); 547 487 548 488 return 0; 549 - 550 - err: 551 - dev->num_crtcs = 0; 552 - return ret; 553 489 } 554 490 EXPORT_SYMBOL(drm_vblank_init); 555 491
+2 -2
drivers/gpu/drm/drm_vm.c
··· 595 595 vma->vm_ops = &drm_vm_ops; 596 596 break; 597 597 } 598 + fallthrough; /* to _DRM_FRAME_BUFFER... */ 598 599 #endif 599 - /* fall through - to _DRM_FRAME_BUFFER... */ 600 600 case _DRM_FRAME_BUFFER: 601 601 case _DRM_REGISTERS: 602 602 offset = drm_core_get_reg_ofs(dev); ··· 621 621 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 622 622 return -EAGAIN; 623 623 vma->vm_page_prot = drm_dma_prot(map->type, vma); 624 - /* fall through - to _DRM_SHM */ 624 + fallthrough; /* to _DRM_SHM */ 625 625 case _DRM_SHM: 626 626 vma->vm_ops = &drm_vm_shm_ops; 627 627 vma->vm_private_data = (void *)map;
-94
drivers/gpu/drm/drm_vram_helper_common.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - 3 - #include <linux/module.h> 4 - 5 - /** 6 - * DOC: overview 7 - * 8 - * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM 9 - * buffer object that is backed by video RAM. It can be used for 10 - * framebuffer devices with dedicated memory. The video RAM is managed 11 - * by &struct drm_vram_mm (VRAM MM). 12 - * 13 - * With the GEM interface userspace applications create, manage and destroy 14 - * graphics buffers, such as an on-screen framebuffer. GEM does not provide 15 - * an implementation of these interfaces. It's up to the DRM driver to 16 - * provide an implementation that suits the hardware. If the hardware device 17 - * contains dedicated video memory, the DRM driver can use the VRAM helper 18 - * library. Each active buffer object is stored in video RAM. Active 19 - * buffer are used for drawing the current frame, typically something like 20 - * the frame's scanout buffer or the cursor image. If there's no more space 21 - * left in VRAM, inactive GEM objects can be moved to system memory. 22 - * 23 - * The easiest way to use the VRAM helper library is to call 24 - * drm_vram_helper_alloc_mm(). The function allocates and initializes an 25 - * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use 26 - * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and 27 - * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations; 28 - * as illustrated below. 29 - * 30 - * .. code-block:: c 31 - * 32 - * struct file_operations fops ={ 33 - * .owner = THIS_MODULE, 34 - * DRM_VRAM_MM_FILE_OPERATION 35 - * }; 36 - * struct drm_driver drv = { 37 - * .driver_feature = DRM_ ... , 38 - * .fops = &fops, 39 - * DRM_GEM_VRAM_DRIVER 40 - * }; 41 - * 42 - * int init_drm_driver() 43 - * { 44 - * struct drm_device *dev; 45 - * uint64_t vram_base; 46 - * unsigned long vram_size; 47 - * int ret; 48 - * 49 - * // setup device, vram base and size 50 - * // ... 51 - * 52 - * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size); 53 - * if (ret) 54 - * return ret; 55 - * return 0; 56 - * } 57 - * 58 - * This creates an instance of &struct drm_vram_mm, exports DRM userspace 59 - * interfaces for GEM buffer management and initializes file operations to 60 - * allow for accessing created GEM buffers. With this setup, the DRM driver 61 - * manages an area of video RAM with VRAM MM and provides GEM VRAM objects 62 - * to userspace. 63 - * 64 - * To clean up the VRAM memory management, call drm_vram_helper_release_mm() 65 - * in the driver's clean-up code. 66 - * 67 - * .. code-block:: c 68 - * 69 - * void fini_drm_driver() 70 - * { 71 - * struct drm_device *dev = ...; 72 - * 73 - * drm_vram_helper_release_mm(dev); 74 - * } 75 - * 76 - * For drawing or scanout operations, buffer object have to be pinned in video 77 - * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or 78 - * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system 79 - * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards. 80 - * 81 - * A buffer object that is pinned in video RAM has a fixed address within that 82 - * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically 83 - * it's used to program the hardware's scanout engine for framebuffers, set 84 - * the cursor overlay's image for a mouse cursor, or use it as input to the 85 - * hardware's draing engine. 86 - * 87 - * To access a buffer object's memory from the DRM driver, call 88 - * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address 89 - * space and returns the memory address. Use drm_gem_vram_kunmap() to 90 - * release the mapping. 91 - */ 92 - 93 - MODULE_DESCRIPTION("DRM VRAM memory-management helpers"); 94 - MODULE_LICENSE("GPL");
+4 -14
drivers/gpu/drm/etnaviv/etnaviv_drv.c
··· 231 231 {"ring", show_each_gpu, 0, etnaviv_ring_show}, 232 232 }; 233 233 234 - static int etnaviv_debugfs_init(struct drm_minor *minor) 234 + static void etnaviv_debugfs_init(struct drm_minor *minor) 235 235 { 236 - struct drm_device *dev = minor->dev; 237 - int ret; 238 - 239 - ret = drm_debugfs_create_files(etnaviv_debugfs_list, 240 - ARRAY_SIZE(etnaviv_debugfs_list), 241 - minor->debugfs_root, minor); 242 - 243 - if (ret) { 244 - dev_err(dev->dev, "could not install etnaviv_debugfs_list\n"); 245 - return ret; 246 - } 247 - 248 - return ret; 236 + drm_debugfs_create_files(etnaviv_debugfs_list, 237 + ARRAY_SIZE(etnaviv_debugfs_list), 238 + minor->debugfs_root, minor); 249 239 } 250 240 #endif 251 241
+2 -6
drivers/gpu/drm/exynos/exynos_dp.c
··· 25 25 #include <drm/drm_panel.h> 26 26 #include <drm/drm_print.h> 27 27 #include <drm/drm_probe_helper.h> 28 + #include <drm/drm_simple_kms_helper.h> 28 29 #include <drm/exynos_drm.h> 29 30 30 31 #include "exynos_drm_crtc.h" ··· 136 135 .disable = exynos_dp_nop, 137 136 }; 138 137 139 - static const struct drm_encoder_funcs exynos_dp_encoder_funcs = { 140 - .destroy = drm_encoder_cleanup, 141 - }; 142 - 143 138 static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp) 144 139 { 145 140 int ret; ··· 164 167 return ret; 165 168 } 166 169 167 - drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs, 168 - DRM_MODE_ENCODER_TMDS, NULL); 170 + drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS); 169 171 170 172 drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs); 171 173
+2 -6
drivers/gpu/drm/exynos/exynos_drm_dpi.c
··· 14 14 #include <drm/drm_panel.h> 15 15 #include <drm/drm_print.h> 16 16 #include <drm/drm_probe_helper.h> 17 + #include <drm/drm_simple_kms_helper.h> 17 18 18 19 #include <video/of_videomode.h> 19 20 #include <video/videomode.h> ··· 150 149 .disable = exynos_dpi_disable, 151 150 }; 152 151 153 - static const struct drm_encoder_funcs exynos_dpi_encoder_funcs = { 154 - .destroy = drm_encoder_cleanup, 155 - }; 156 - 157 152 enum { 158 153 FIMD_PORT_IN0, 159 154 FIMD_PORT_IN1, ··· 198 201 { 199 202 int ret; 200 203 201 - drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs, 202 - DRM_MODE_ENCODER_TMDS, NULL); 204 + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); 203 205 204 206 drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs); 205 207
+2 -6
drivers/gpu/drm/exynos/exynos_drm_dsi.c
··· 30 30 #include <drm/drm_panel.h> 31 31 #include <drm/drm_print.h> 32 32 #include <drm/drm_probe_helper.h> 33 + #include <drm/drm_simple_kms_helper.h> 33 34 34 35 #include "exynos_drm_crtc.h" 35 36 #include "exynos_drm_drv.h" ··· 1524 1523 .disable = exynos_dsi_disable, 1525 1524 }; 1526 1525 1527 - static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = { 1528 - .destroy = drm_encoder_cleanup, 1529 - }; 1530 - 1531 1526 MODULE_DEVICE_TABLE(of, exynos_dsi_of_match); 1532 1527 1533 1528 static int exynos_dsi_host_attach(struct mipi_dsi_host *host, ··· 1701 1704 struct drm_bridge *in_bridge; 1702 1705 int ret; 1703 1706 1704 - drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs, 1705 - DRM_MODE_ENCODER_TMDS, NULL); 1707 + drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS); 1706 1708 1707 1709 drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs); 1708 1710
+2 -6
drivers/gpu/drm/exynos/exynos_drm_vidi.c
··· 14 14 #include <drm/drm_atomic_helper.h> 15 15 #include <drm/drm_edid.h> 16 16 #include <drm/drm_probe_helper.h> 17 + #include <drm/drm_simple_kms_helper.h> 17 18 #include <drm/drm_vblank.h> 18 19 #include <drm/exynos_drm.h> 19 20 ··· 370 369 .disable = exynos_vidi_disable, 371 370 }; 372 371 373 - static const struct drm_encoder_funcs exynos_vidi_encoder_funcs = { 374 - .destroy = drm_encoder_cleanup, 375 - }; 376 - 377 372 static int vidi_bind(struct device *dev, struct device *master, void *data) 378 373 { 379 374 struct vidi_context *ctx = dev_get_drvdata(dev); ··· 403 406 return PTR_ERR(ctx->crtc); 404 407 } 405 408 406 - drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs, 407 - DRM_MODE_ENCODER_TMDS, NULL); 409 + drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS); 408 410 409 411 drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs); 410 412
+2 -6
drivers/gpu/drm/exynos/exynos_hdmi.c
··· 38 38 #include <drm/drm_edid.h> 39 39 #include <drm/drm_print.h> 40 40 #include <drm/drm_probe_helper.h> 41 + #include <drm/drm_simple_kms_helper.h> 41 42 42 43 #include "exynos_drm_crtc.h" 43 44 #include "regs-hdmi.h" ··· 1560 1559 .disable = hdmi_disable, 1561 1560 }; 1562 1561 1563 - static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = { 1564 - .destroy = drm_encoder_cleanup, 1565 - }; 1566 - 1567 1562 static void hdmi_audio_shutdown(struct device *dev, void *data) 1568 1563 { 1569 1564 struct hdmi_context *hdata = dev_get_drvdata(dev); ··· 1840 1843 1841 1844 hdata->phy_clk.enable = hdmiphy_clk_enable; 1842 1845 1843 - drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs, 1844 - DRM_MODE_ENCODER_TMDS, NULL); 1846 + drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS); 1845 1847 1846 1848 drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs); 1847 1849
+3 -11
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
··· 13 13 #include <drm/drm_of.h> 14 14 #include <drm/drm_panel.h> 15 15 #include <drm/drm_probe_helper.h> 16 + #include <drm/drm_simple_kms_helper.h> 16 17 17 18 #include "fsl_dcu_drm_drv.h" 18 19 #include "fsl_tcon.h" 19 - 20 - static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder) 21 - { 22 - drm_encoder_cleanup(encoder); 23 - } 24 - 25 - static const struct drm_encoder_funcs encoder_funcs = { 26 - .destroy = fsl_dcu_drm_encoder_destroy, 27 - }; 28 20 29 21 int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, 30 22 struct drm_crtc *crtc) ··· 30 38 if (fsl_dev->tcon) 31 39 fsl_tcon_bypass_enable(fsl_dev->tcon); 32 40 33 - ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, 34 - DRM_MODE_ENCODER_LVDS, NULL); 41 + ret = drm_simple_encoder_init(fsl_dev->drm, encoder, 42 + DRM_MODE_ENCODER_LVDS); 35 43 if (ret < 0) 36 44 return ret; 37 45
+3 -11
drivers/gpu/drm/gma500/cdv_intel_crt.c
··· 28 28 #include <linux/i2c.h> 29 29 #include <linux/pm_runtime.h> 30 30 31 + #include <drm/drm_simple_kms_helper.h> 32 + 31 33 #include "cdv_device.h" 32 34 #include "intel_bios.h" 33 35 #include "power.h" ··· 239 237 .best_encoder = gma_best_encoder, 240 238 }; 241 239 242 - static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder) 243 - { 244 - drm_encoder_cleanup(encoder); 245 - } 246 - 247 - static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = { 248 - .destroy = cdv_intel_crt_enc_destroy, 249 - }; 250 - 251 240 void cdv_intel_crt_init(struct drm_device *dev, 252 241 struct psb_intel_mode_device *mode_dev) 253 242 { ··· 264 271 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 265 272 266 273 encoder = &gma_encoder->base; 267 - drm_encoder_init(dev, encoder, 268 - &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL); 274 + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC); 269 275 270 276 gma_connector_attach_encoder(gma_connector, gma_encoder); 271 277
+3 -13
drivers/gpu/drm/gma500/cdv_intel_dp.c
··· 32 32 #include <drm/drm_crtc.h> 33 33 #include <drm/drm_crtc_helper.h> 34 34 #include <drm/drm_dp_helper.h> 35 + #include <drm/drm_simple_kms_helper.h> 35 36 36 37 #include "gma_display.h" 37 38 #include "psb_drv.h" ··· 1909 1908 kfree(connector); 1910 1909 } 1911 1910 1912 - static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder) 1913 - { 1914 - drm_encoder_cleanup(encoder); 1915 - } 1916 - 1917 1911 static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = { 1918 1912 .dpms = cdv_intel_dp_dpms, 1919 1913 .mode_fixup = cdv_intel_dp_mode_fixup, ··· 1930 1934 .mode_valid = cdv_intel_dp_mode_valid, 1931 1935 .best_encoder = gma_best_encoder, 1932 1936 }; 1933 - 1934 - static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = { 1935 - .destroy = cdv_intel_dp_encoder_destroy, 1936 - }; 1937 - 1938 1937 1939 1938 static void cdv_intel_dp_add_properties(struct drm_connector *connector) 1940 1939 { ··· 2007 2016 encoder = &gma_encoder->base; 2008 2017 2009 2018 drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); 2010 - drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, 2011 - DRM_MODE_ENCODER_TMDS, NULL); 2019 + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); 2012 2020 2013 2021 gma_connector_attach_encoder(gma_connector, gma_encoder); 2014 2022 ··· 2110 2120 if (ret == 0) { 2111 2121 /* if this fails, presume the device is a ghost */ 2112 2122 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2113 - cdv_intel_dp_encoder_destroy(encoder); 2123 + drm_encoder_cleanup(encoder); 2114 2124 cdv_intel_dp_destroy(connector); 2115 2125 goto err_priv; 2116 2126 } else {
+2 -2
drivers/gpu/drm/gma500/cdv_intel_hdmi.c
··· 32 32 #include <drm/drm.h> 33 33 #include <drm/drm_crtc.h> 34 34 #include <drm/drm_edid.h> 35 + #include <drm/drm_simple_kms_helper.h> 35 36 36 37 #include "cdv_device.h" 37 38 #include "psb_drv.h" ··· 312 311 &cdv_hdmi_connector_funcs, 313 312 DRM_MODE_CONNECTOR_DVID); 314 313 315 - drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, 316 - DRM_MODE_ENCODER_TMDS, NULL); 314 + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); 317 315 318 316 gma_connector_attach_encoder(gma_connector, gma_encoder); 319 317 gma_encoder->type = INTEL_OUTPUT_HDMI;
+3 -14
drivers/gpu/drm/gma500/cdv_intel_lvds.c
··· 12 12 #include <linux/i2c.h> 13 13 #include <linux/pm_runtime.h> 14 14 15 + #include <drm/drm_simple_kms_helper.h> 16 + 15 17 #include "cdv_device.h" 16 18 #include "intel_bios.h" 17 19 #include "power.h" ··· 501 499 .destroy = cdv_intel_lvds_destroy, 502 500 }; 503 501 504 - 505 - static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder) 506 - { 507 - drm_encoder_cleanup(encoder); 508 - } 509 - 510 - static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = { 511 - .destroy = cdv_intel_lvds_enc_destroy, 512 - }; 513 - 514 502 /* 515 503 * Enumerate the child dev array parsed from VBT to check whether 516 504 * the LVDS is present. ··· 608 616 &cdv_intel_lvds_connector_funcs, 609 617 DRM_MODE_CONNECTOR_LVDS); 610 618 611 - drm_encoder_init(dev, encoder, 612 - &cdv_intel_lvds_enc_funcs, 613 - DRM_MODE_ENCODER_LVDS, NULL); 614 - 619 + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS); 615 620 616 621 gma_connector_attach_encoder(gma_connector, gma_encoder); 617 622 gma_encoder->type = INTEL_OUTPUT_LVDS;
+8 -8
drivers/gpu/drm/gma500/framebuffer.c
··· 577 577 break; 578 578 case INTEL_OUTPUT_SDVO: 579 579 crtc_mask = dev_priv->ops->sdvo_mask; 580 - clone_mask = (1 << INTEL_OUTPUT_SDVO); 580 + clone_mask = 0; 581 581 break; 582 582 case INTEL_OUTPUT_LVDS: 583 - crtc_mask = dev_priv->ops->lvds_mask; 584 - clone_mask = (1 << INTEL_OUTPUT_LVDS); 583 + crtc_mask = dev_priv->ops->lvds_mask; 584 + clone_mask = 0; 585 585 break; 586 586 case INTEL_OUTPUT_MIPI: 587 587 crtc_mask = (1 << 0); 588 - clone_mask = (1 << INTEL_OUTPUT_MIPI); 588 + clone_mask = 0; 589 589 break; 590 590 case INTEL_OUTPUT_MIPI2: 591 591 crtc_mask = (1 << 2); 592 - clone_mask = (1 << INTEL_OUTPUT_MIPI2); 592 + clone_mask = 0; 593 593 break; 594 594 case INTEL_OUTPUT_HDMI: 595 - crtc_mask = dev_priv->ops->hdmi_mask; 595 + crtc_mask = dev_priv->ops->hdmi_mask; 596 596 clone_mask = (1 << INTEL_OUTPUT_HDMI); 597 597 break; 598 598 case INTEL_OUTPUT_DISPLAYPORT: 599 599 crtc_mask = (1 << 0) | (1 << 1); 600 - clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT); 600 + clone_mask = 0; 601 601 break; 602 602 case INTEL_OUTPUT_EDP: 603 603 crtc_mask = (1 << 1); 604 - clone_mask = (1 << INTEL_OUTPUT_EDP); 604 + clone_mask = 0; 605 605 } 606 606 encoder->possible_crtcs = crtc_mask; 607 607 encoder->possible_clones =
+5 -6
drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
··· 27 27 28 28 #include <linux/delay.h> 29 29 30 + #include <drm/drm_simple_kms_helper.h> 31 + 30 32 #include "mdfld_dsi_dpi.h" 31 33 #include "mdfld_dsi_pkg_sender.h" 32 34 #include "mdfld_output.h" ··· 995 993 /*create drm encoder object*/ 996 994 connector = &dsi_connector->base.base; 997 995 encoder = &dpi_output->base.base.base; 998 - drm_encoder_init(dev, 999 - encoder, 1000 - p_funcs->encoder_funcs, 1001 - DRM_MODE_ENCODER_LVDS, NULL); 996 + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS); 1002 997 drm_encoder_helper_add(encoder, 1003 998 p_funcs->encoder_helper_funcs); 1004 999 ··· 1005 1006 /*set possible crtcs and clones*/ 1006 1007 if (dsi_connector->pipe) { 1007 1008 encoder->possible_crtcs = (1 << 2); 1008 - encoder->possible_clones = (1 << 1); 1009 + encoder->possible_clones = 0; 1009 1010 } else { 1010 1011 encoder->possible_crtcs = (1 << 0); 1011 - encoder->possible_clones = (1 << 0); 1012 + encoder->possible_clones = 0; 1012 1013 } 1013 1014 1014 1015 dsi_connector->base.encoder = &dpi_output->base.base;
-1
drivers/gpu/drm/gma500/mdfld_output.h
··· 51 51 }; 52 52 53 53 struct panel_funcs { 54 - const struct drm_encoder_funcs *encoder_funcs; 55 54 const struct drm_encoder_helper_funcs *encoder_helper_funcs; 56 55 struct drm_display_mode * (*get_config_mode)(struct drm_device *); 57 56 int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
-6
drivers/gpu/drm/gma500/mdfld_tmd_vid.c
··· 188 188 .commit = mdfld_dsi_dpi_commit, 189 189 }; 190 190 191 - /*TPO DPI encoder funcs*/ 192 - static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = { 193 - .destroy = drm_encoder_cleanup, 194 - }; 195 - 196 191 const struct panel_funcs mdfld_tmd_vid_funcs = { 197 - .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs, 198 192 .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs, 199 193 .get_config_mode = &tmd_vid_get_config_mode, 200 194 .get_panel_info = tmd_vid_get_panel_info,
-6
drivers/gpu/drm/gma500/mdfld_tpo_vid.c
··· 76 76 .commit = mdfld_dsi_dpi_commit, 77 77 }; 78 78 79 - /*TPO DPI encoder funcs*/ 80 - static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = { 81 - .destroy = drm_encoder_cleanup, 82 - }; 83 - 84 79 const struct panel_funcs mdfld_tpo_vid_funcs = { 85 - .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs, 86 80 .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs, 87 81 .get_config_mode = &tpo_vid_get_config_mode, 88 82 .get_panel_info = tpo_vid_get_panel_info,
+2 -12
drivers/gpu/drm/gma500/oaktrail_hdmi.c
··· 27 27 #include <linux/delay.h> 28 28 29 29 #include <drm/drm.h> 30 + #include <drm/drm_simple_kms_helper.h> 30 31 31 32 #include "psb_drv.h" 32 33 #include "psb_intel_drv.h" ··· 621 620 .destroy = oaktrail_hdmi_destroy, 622 621 }; 623 622 624 - static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder) 625 - { 626 - drm_encoder_cleanup(encoder); 627 - } 628 - 629 - static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = { 630 - .destroy = oaktrail_hdmi_enc_destroy, 631 - }; 632 - 633 623 void oaktrail_hdmi_init(struct drm_device *dev, 634 624 struct psb_intel_mode_device *mode_dev) 635 625 { ··· 643 651 &oaktrail_hdmi_connector_funcs, 644 652 DRM_MODE_CONNECTOR_DVID); 645 653 646 - drm_encoder_init(dev, encoder, 647 - &oaktrail_hdmi_enc_funcs, 648 - DRM_MODE_ENCODER_TMDS, NULL); 654 + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); 649 655 650 656 gma_connector_attach_encoder(gma_connector, gma_encoder); 651 657
+3 -2
drivers/gpu/drm/gma500/oaktrail_lvds.c
··· 13 13 14 14 #include <asm/intel-mid.h> 15 15 16 + #include <drm/drm_simple_kms_helper.h> 17 + 16 18 #include "intel_bios.h" 17 19 #include "power.h" 18 20 #include "psb_drv.h" ··· 313 311 &psb_intel_lvds_connector_funcs, 314 312 DRM_MODE_CONNECTOR_LVDS); 315 313 316 - drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, 317 - DRM_MODE_ENCODER_LVDS, NULL); 314 + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS); 318 315 319 316 gma_connector_attach_encoder(gma_connector, gma_encoder); 320 317 gma_encoder->type = INTEL_OUTPUT_LVDS;
-1
drivers/gpu/drm/gma500/psb_intel_drv.h
··· 252 252 struct drm_property *property, 253 253 uint64_t value); 254 254 extern void psb_intel_lvds_destroy(struct drm_connector *connector); 255 - extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs; 256 255 257 256 /* intel_gmbus.c */ 258 257 extern void gma_intel_i2c_reset(struct drm_device *dev);
+3 -15
drivers/gpu/drm/gma500/psb_intel_lvds.c
··· 11 11 #include <linux/i2c.h> 12 12 #include <linux/pm_runtime.h> 13 13 14 + #include <drm/drm_simple_kms_helper.h> 15 + 14 16 #include "intel_bios.h" 15 17 #include "power.h" 16 18 #include "psb_drv.h" ··· 623 621 .destroy = psb_intel_lvds_destroy, 624 622 }; 625 623 626 - 627 - static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder) 628 - { 629 - drm_encoder_cleanup(encoder); 630 - } 631 - 632 - const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = { 633 - .destroy = psb_intel_lvds_enc_destroy, 634 - }; 635 - 636 - 637 - 638 624 /** 639 625 * psb_intel_lvds_init - setup LVDS connectors on this device 640 626 * @dev: drm device ··· 673 683 &psb_intel_lvds_connector_funcs, 674 684 DRM_MODE_CONNECTOR_LVDS); 675 685 676 - drm_encoder_init(dev, encoder, 677 - &psb_intel_lvds_enc_funcs, 678 - DRM_MODE_ENCODER_LVDS, NULL); 686 + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS); 679 687 680 688 gma_connector_attach_encoder(gma_connector, gma_encoder); 681 689 gma_encoder->type = INTEL_OUTPUT_LVDS;
+4 -9
drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
··· 747 747 return -EINVAL; 748 748 } 749 749 750 - client = i2c_new_device(adapter, &info); 751 - if (!client) { 752 - pr_err("%s: i2c_new_device() failed\n", __func__); 750 + client = i2c_new_client_device(adapter, &info); 751 + if (IS_ERR(client)) { 752 + pr_err("%s: creating I2C device failed\n", __func__); 753 753 i2c_put_adapter(adapter); 754 - return -EINVAL; 754 + return PTR_ERR(client); 755 755 } 756 756 757 757 return 0; ··· 765 765 .commit = mdfld_dsi_dpi_commit, 766 766 }; 767 767 768 - static const struct drm_encoder_funcs tc35876x_encoder_funcs = { 769 - .destroy = drm_encoder_cleanup, 770 - }; 771 - 772 768 const struct panel_funcs mdfld_tc35876x_funcs = { 773 - .encoder_funcs = &tc35876x_encoder_funcs, 774 769 .encoder_helper_funcs = &tc35876x_encoder_helper_funcs, 775 770 .get_config_mode = tc35876x_get_config_mode, 776 771 .get_panel_info = tc35876x_get_panel_info,
+5 -4
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
··· 94 94 return -EINVAL; 95 95 } 96 96 97 + if (state->fb->pitches[0] % 128 != 0) { 98 + DRM_DEBUG_ATOMIC("wrong stride with 128-byte aligned\n"); 99 + return -EINVAL; 100 + } 97 101 return 0; 98 102 } 99 103 ··· 123 119 writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS); 124 120 125 121 reg = state->fb->width * (state->fb->format->cpp[0]); 126 - /* now line_pad is 16 */ 127 - reg = PADDING(16, reg); 128 122 129 - line_l = state->fb->width * state->fb->format->cpp[0]; 130 - line_l = PADDING(16, line_l); 123 + line_l = state->fb->pitches[0]; 131 124 writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) | 132 125 HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l), 133 126 priv->mmio + HIBMC_CRT_FB_WIDTH);
+2 -6
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
··· 94 94 priv->dev->mode_config.max_height = 1200; 95 95 96 96 priv->dev->mode_config.fb_base = priv->fb_base; 97 - priv->dev->mode_config.preferred_depth = 24; 97 + priv->dev->mode_config.preferred_depth = 32; 98 98 priv->dev->mode_config.prefer_shadow = 1; 99 99 100 100 priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs; ··· 307 307 /* reset all the states of crtc/plane/encoder/connector */ 308 308 drm_mode_config_reset(dev); 309 309 310 - ret = drm_fbdev_generic_setup(dev, 16); 311 - if (ret) { 312 - DRM_ERROR("failed to initialize fbdev: %d\n", ret); 313 - goto err; 314 - } 310 + drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); 315 311 316 312 return 0; 317 313
+1 -1
drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
··· 50 50 int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, 51 51 struct drm_mode_create_dumb *args) 52 52 { 53 - return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args); 53 + return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args); 54 54 } 55 55 56 56 const struct drm_mode_config_funcs hibmc_mode_funcs = {
+2 -7
drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
··· 20 20 #include <drm/drm_atomic_helper.h> 21 21 #include <drm/drm_bridge.h> 22 22 #include <drm/drm_device.h> 23 - #include <drm/drm_encoder_slave.h> 24 23 #include <drm/drm_mipi_dsi.h> 25 24 #include <drm/drm_of.h> 26 25 #include <drm/drm_print.h> 27 26 #include <drm/drm_probe_helper.h> 27 + #include <drm/drm_simple_kms_helper.h> 28 28 29 29 #include "dw_dsi_reg.h" 30 30 ··· 696 696 .disable = dsi_encoder_disable 697 697 }; 698 698 699 - static const struct drm_encoder_funcs dw_encoder_funcs = { 700 - .destroy = drm_encoder_cleanup, 701 - }; 702 - 703 699 static int dw_drm_encoder_init(struct device *dev, 704 700 struct drm_device *drm_dev, 705 701 struct drm_encoder *encoder) ··· 709 713 } 710 714 711 715 encoder->possible_crtcs = crtc_mask; 712 - ret = drm_encoder_init(drm_dev, encoder, &dw_encoder_funcs, 713 - DRM_MODE_ENCODER_DSI, NULL); 716 + ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI); 714 717 if (ret) { 715 718 DRM_ERROR("failed to init dsi encoder\n"); 716 719 return ret;
-1
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
··· 940 940 }; 941 941 942 942 struct kirin_drm_data ade_driver_data = { 943 - .register_connects = false, 944 943 .num_planes = ADE_CH_NUM, 945 944 .prim_plane = ADE_CH1, 946 945 .channel_formats = channel_formats,
-43
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
··· 219 219 return 0; 220 220 } 221 221 222 - static int kirin_drm_connectors_register(struct drm_device *dev) 223 - { 224 - struct drm_connector *connector; 225 - struct drm_connector *failed_connector; 226 - struct drm_connector_list_iter conn_iter; 227 - int ret; 228 - 229 - mutex_lock(&dev->mode_config.mutex); 230 - drm_connector_list_iter_begin(dev, &conn_iter); 231 - drm_for_each_connector_iter(connector, &conn_iter) { 232 - ret = drm_connector_register(connector); 233 - if (ret) { 234 - failed_connector = connector; 235 - goto err; 236 - } 237 - } 238 - drm_connector_list_iter_end(&conn_iter); 239 - mutex_unlock(&dev->mode_config.mutex); 240 - 241 - return 0; 242 - 243 - err: 244 - drm_connector_list_iter_begin(dev, &conn_iter); 245 - drm_for_each_connector_iter(connector, &conn_iter) { 246 - if (failed_connector == connector) 247 - break; 248 - drm_connector_unregister(connector); 249 - } 250 - drm_connector_list_iter_end(&conn_iter); 251 - mutex_unlock(&dev->mode_config.mutex); 252 - 253 - return ret; 254 - } 255 - 256 222 static int kirin_drm_bind(struct device *dev) 257 223 { 258 224 struct kirin_drm_data *driver_data; ··· 245 279 246 280 drm_fbdev_generic_setup(drm_dev, 32); 247 281 248 - /* connectors should be registered after drm device register */ 249 - if (driver_data->register_connects) { 250 - ret = kirin_drm_connectors_register(drm_dev); 251 - if (ret) 252 - goto err_drm_dev_unregister; 253 - } 254 - 255 282 return 0; 256 283 257 - err_drm_dev_unregister: 258 - drm_dev_unregister(drm_dev); 259 284 err_kms_cleanup: 260 285 kirin_drm_kms_cleanup(drm_dev); 261 286 err_drm_dev_put:
-1
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
··· 37 37 u32 channel_formats_cnt; 38 38 int config_max_width; 39 39 int config_max_height; 40 - bool register_connects; 41 40 u32 num_planes; 42 41 u32 prim_plane; 43 42
+5 -2
drivers/gpu/drm/i2c/sil164_drv.c
··· 393 393 return NULL; 394 394 } 395 395 396 - return i2c_new_device(adap, &info); 396 + return i2c_new_client_device(adap, &info); 397 397 } 398 398 399 399 static int ··· 402 402 struct drm_encoder_slave *encoder) 403 403 { 404 404 struct sil164_priv *priv; 405 + struct i2c_client *slave_client; 405 406 406 407 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 407 408 if (!priv) ··· 411 410 encoder->slave_priv = priv; 412 411 encoder->slave_funcs = &sil164_encoder_funcs; 413 412 414 - priv->duallink_slave = sil164_detect_slave(client); 413 + slave_client = sil164_detect_slave(client); 414 + if (!IS_ERR(slave_client)) 415 + priv->duallink_slave = slave_client; 415 416 416 417 return 0; 417 418 }
+6 -14
drivers/gpu/drm/i2c/tda998x_drv.c
··· 19 19 #include <drm/drm_of.h> 20 20 #include <drm/drm_print.h> 21 21 #include <drm/drm_probe_helper.h> 22 + #include <drm/drm_simple_kms_helper.h> 22 23 #include <drm/i2c/tda998x.h> 23 24 24 25 #include <media/cec-notifier.h> ··· 1950 1949 cec_info.platform_data = &priv->cec_glue; 1951 1950 cec_info.irq = client->irq; 1952 1951 1953 - priv->cec = i2c_new_device(client->adapter, &cec_info); 1954 - if (!priv->cec) { 1955 - ret = -ENODEV; 1952 + priv->cec = i2c_new_client_device(client->adapter, &cec_info); 1953 + if (IS_ERR(priv->cec)) { 1954 + ret = PTR_ERR(priv->cec); 1956 1955 goto fail; 1957 1956 } 1958 1957 ··· 1998 1997 1999 1998 /* DRM encoder functions */ 2000 1999 2001 - static void tda998x_encoder_destroy(struct drm_encoder *encoder) 2002 - { 2003 - drm_encoder_cleanup(encoder); 2004 - } 2005 - 2006 - static const struct drm_encoder_funcs tda998x_encoder_funcs = { 2007 - .destroy = tda998x_encoder_destroy, 2008 - }; 2009 - 2010 2000 static int tda998x_encoder_init(struct device *dev, struct drm_device *drm) 2011 2001 { 2012 2002 struct tda998x_priv *priv = dev_get_drvdata(dev); ··· 2015 2023 2016 2024 priv->encoder.possible_crtcs = crtcs; 2017 2025 2018 - ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs, 2019 - DRM_MODE_ENCODER_TMDS, NULL); 2026 + ret = drm_simple_encoder_init(drm, &priv->encoder, 2027 + DRM_MODE_ENCODER_TMDS); 2020 2028 if (ret) 2021 2029 goto err_encoder; 2022 2030
+6 -12
drivers/gpu/drm/i915/display/intel_display_debugfs.c
··· 631 631 } 632 632 633 633 static void intel_dp_mst_info(struct seq_file *m, 634 - struct intel_connector *intel_connector) 634 + struct intel_connector *intel_connector) 635 635 { 636 - struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); 637 - struct intel_dp_mst_encoder *intel_mst = 638 - enc_to_mst(intel_encoder); 639 - struct intel_digital_port *intel_dig_port = intel_mst->primary; 640 - struct intel_dp *intel_dp = &intel_dig_port->dp; 641 - bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, 642 - intel_connector->port); 636 + bool has_audio = intel_connector->port->has_audio; 643 637 644 638 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 645 639 } ··· 1931 1937 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, 1932 1938 }; 1933 1939 1934 - int intel_display_debugfs_register(struct drm_i915_private *i915) 1940 + void intel_display_debugfs_register(struct drm_i915_private *i915) 1935 1941 { 1936 1942 struct drm_minor *minor = i915->drm.primary; 1937 1943 int i; ··· 1944 1950 intel_display_debugfs_files[i].fops); 1945 1951 } 1946 1952 1947 - return drm_debugfs_create_files(intel_display_debugfs_list, 1948 - ARRAY_SIZE(intel_display_debugfs_list), 1949 - minor->debugfs_root, minor); 1953 + drm_debugfs_create_files(intel_display_debugfs_list, 1954 + ARRAY_SIZE(intel_display_debugfs_list), 1955 + minor->debugfs_root, minor); 1950 1956 } 1951 1957 1952 1958 static int i915_panel_show(struct seq_file *m, void *data)
+2 -2
drivers/gpu/drm/i915/display/intel_display_debugfs.h
··· 10 10 struct drm_i915_private; 11 11 12 12 #ifdef CONFIG_DEBUG_FS 13 - int intel_display_debugfs_register(struct drm_i915_private *i915); 13 + void intel_display_debugfs_register(struct drm_i915_private *i915); 14 14 int intel_connector_debugfs_add(struct drm_connector *connector); 15 15 #else 16 - static inline int intel_display_debugfs_register(struct drm_i915_private *i915) { return 0; } 16 + static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {} 17 17 static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; } 18 18 #endif 19 19
+1 -1
drivers/gpu/drm/i915/display/intel_display_types.h
··· 438 438 state of connector->polled in case hotplug storm detection changes it */ 439 439 u8 polled; 440 440 441 - void *port; /* store this opaque as its illegal to dereference it */ 441 + struct drm_dp_mst_port *port; 442 442 443 443 struct intel_dp *mst_port; 444 444
+1 -3
drivers/gpu/drm/i915/display/intel_dp_mst.c
··· 113 113 pipe_config->has_pch_encoder = false; 114 114 115 115 if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 116 - pipe_config->has_audio = 117 - drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, 118 - connector->port); 116 + pipe_config->has_audio = connector->port->has_audio; 119 117 else 120 118 pipe_config->has_audio = 121 119 intel_conn_state->force_audio == HDMI_AUDIO_ON;
-2
drivers/gpu/drm/i915/gem/i915_gem_phys.c
··· 10 10 11 11 #include <drm/drm.h> /* for drm_legacy.h! */ 12 12 #include <drm/drm_cache.h> 13 - #include <drm/drm_legacy.h> /* for drm_pci.h! */ 14 - #include <drm/drm_pci.h> 15 13 16 14 #include "gt/intel_gt.h" 17 15 #include "i915_drv.h"
+4 -4
drivers/gpu/drm/i915/i915_debugfs.c
··· 1884 1884 #endif 1885 1885 }; 1886 1886 1887 - int i915_debugfs_register(struct drm_i915_private *dev_priv) 1887 + void i915_debugfs_register(struct drm_i915_private *dev_priv) 1888 1888 { 1889 1889 struct drm_minor *minor = dev_priv->drm.primary; 1890 1890 int i; ··· 1901 1901 i915_debugfs_files[i].fops); 1902 1902 } 1903 1903 1904 - return drm_debugfs_create_files(i915_debugfs_list, 1905 - I915_DEBUGFS_ENTRIES, 1906 - minor->debugfs_root, minor); 1904 + drm_debugfs_create_files(i915_debugfs_list, 1905 + I915_DEBUGFS_ENTRIES, 1906 + minor->debugfs_root, minor); 1907 1907 }
+2 -2
drivers/gpu/drm/i915/i915_debugfs.h
··· 12 12 struct seq_file; 13 13 14 14 #ifdef CONFIG_DEBUG_FS 15 - int i915_debugfs_register(struct drm_i915_private *dev_priv); 15 + void i915_debugfs_register(struct drm_i915_private *dev_priv); 16 16 void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj); 17 17 #else 18 - static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; } 18 + static inline void i915_debugfs_register(struct drm_i915_private *dev_priv) {} 19 19 static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {} 20 20 #endif 21 21
+9 -13
drivers/gpu/drm/i915/i915_drv.c
··· 43 43 #include <drm/drm_atomic_helper.h> 44 44 #include <drm/drm_ioctl.h> 45 45 #include <drm/drm_irq.h> 46 + #include <drm/drm_managed.h> 46 47 #include <drm/drm_probe_helper.h> 47 48 48 49 #include "display/intel_acpi.h" ··· 889 888 return ERR_PTR(err); 890 889 } 891 890 891 + drmm_add_final_kfree(&i915->drm, i915); 892 + 892 893 i915->drm.pdev = pdev; 893 894 pci_set_drvdata(pdev, i915); 894 895 ··· 902 899 BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask)); 903 900 904 901 return i915; 905 - } 906 - 907 - static void i915_driver_destroy(struct drm_i915_private *i915) 908 - { 909 - struct pci_dev *pdev = i915->drm.pdev; 910 - 911 - drm_dev_fini(&i915->drm); 912 - kfree(i915); 913 - 914 - /* And make sure we never chase our dangling pointer from pci_dev */ 915 - pci_set_drvdata(pdev, NULL); 916 902 } 917 903 918 904 /** ··· 985 993 986 994 i915_welcome_messages(i915); 987 995 996 + i915->do_release = true; 997 + 988 998 return 0; 989 999 990 1000 out_cleanup_irq: ··· 1006 1012 pci_disable_device(pdev); 1007 1013 out_fini: 1008 1014 i915_probe_error(i915, "Device initialization failed (%d)\n", ret); 1009 - i915_driver_destroy(i915); 1015 + drm_dev_put(&i915->drm); 1010 1016 return ret; 1011 1017 } 1012 1018 ··· 1046 1052 struct drm_i915_private *dev_priv = to_i915(dev); 1047 1053 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1048 1054 1055 + if (!dev_priv->do_release) 1056 + return; 1057 + 1049 1058 disable_rpm_wakeref_asserts(rpm); 1050 1059 1051 1060 i915_gem_driver_release(dev_priv); ··· 1062 1065 intel_runtime_pm_driver_release(rpm); 1063 1066 1064 1067 i915_driver_late_release(dev_priv); 1065 - i915_driver_destroy(dev_priv); 1066 1068 } 1067 1069 1068 1070 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
+3
drivers/gpu/drm/i915/i915_drv.h
··· 822 822 struct drm_i915_private { 823 823 struct drm_device drm; 824 824 825 + /* FIXME: Device release actions should all be moved to drmm_ */ 826 + bool do_release; 827 + 825 828 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ 826 829 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ 827 830 struct intel_driver_caps caps;
+23 -9
drivers/gpu/drm/i915/selftests/mock_gem_device.c
··· 25 25 #include <linux/pm_domain.h> 26 26 #include <linux/pm_runtime.h> 27 27 28 + #include <drm/drm_managed.h> 29 + 28 30 #include "gt/intel_gt.h" 29 31 #include "gt/intel_gt_requests.h" 30 32 #include "gt/mock_engine.h" ··· 57 55 { 58 56 struct drm_i915_private *i915 = to_i915(dev); 59 57 58 + if (!i915->do_release) 59 + goto out; 60 + 60 61 mock_device_flush(i915); 61 62 intel_gt_driver_remove(&i915->gt); 62 63 ··· 76 71 77 72 drm_mode_config_cleanup(&i915->drm); 78 73 79 - drm_dev_fini(&i915->drm); 74 + out: 80 75 put_device(&i915->drm.pdev->dev); 76 + i915->drm.pdev = NULL; 81 77 } 82 78 83 79 static struct drm_driver mock_driver = { ··· 120 114 struct pci_dev *pdev; 121 115 int err; 122 116 123 - pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL); 117 + pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); 124 118 if (!pdev) 125 - goto err; 119 + return NULL; 120 + i915 = kzalloc(sizeof(*i915), GFP_KERNEL); 121 + if (!i915) { 122 + kfree(pdev); 123 + return NULL; 124 + } 126 125 127 126 device_initialize(&pdev->dev); 128 127 pdev->class = PCI_BASE_CLASS_DISPLAY << 16; ··· 140 129 pdev->dev.archdata.iommu = (void *)-1; 141 130 #endif 142 131 143 - i915 = (struct drm_i915_private *)(pdev + 1); 144 132 pci_set_drvdata(pdev, i915); 145 133 146 134 dev_pm_domain_set(&pdev->dev, &pm_domain); ··· 151 141 err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev); 152 142 if (err) { 153 143 pr_err("Failed to initialise mock GEM device: err=%d\n", err); 154 - goto put_device; 144 + put_device(&pdev->dev); 145 + kfree(i915); 146 + 147 + return NULL; 155 148 } 156 149 i915->drm.pdev = pdev; 150 + drmm_add_final_kfree(&i915->drm, i915); 157 151 158 152 intel_runtime_pm_init_early(&i915->runtime_pm); 159 153 ··· 202 188 __clear_bit(I915_WEDGED, &i915->gt.reset.flags); 203 189 intel_engines_driver_register(i915); 204 190 191 + i915->do_release = true; 192 + 205 193 return i915; 206 194 207 195 err_context: ··· 214 198 intel_gt_driver_late_release(&i915->gt); 215 199 intel_memory_regions_driver_release(i915); 216 200 drm_mode_config_cleanup(&i915->drm); 217 - drm_dev_fini(&i915->drm); 218 - put_device: 219 - put_device(&pdev->dev); 220 - err: 201 + drm_dev_put(&i915->drm); 202 + 221 203 return NULL; 222 204 }
+2 -6
drivers/gpu/drm/imx/dw_hdmi-imx.c
··· 18 18 #include <drm/drm_edid.h> 19 19 #include <drm/drm_encoder.h> 20 20 #include <drm/drm_of.h> 21 + #include <drm/drm_simple_kms_helper.h> 21 22 22 23 #include "imx-drm.h" 23 24 ··· 144 143 .atomic_check = dw_hdmi_imx_atomic_check, 145 144 }; 146 145 147 - static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = { 148 - .destroy = drm_encoder_cleanup, 149 - }; 150 - 151 146 static enum drm_mode_status 152 147 imx6q_hdmi_mode_valid(struct drm_connector *con, 153 148 const struct drm_display_mode *mode) ··· 233 236 return ret; 234 237 235 238 drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs); 236 - drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, 237 - DRM_MODE_ENCODER_TMDS, NULL); 239 + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); 238 240 239 241 platform_set_drvdata(pdev, hdmi); 240 242
+2 -8
drivers/gpu/drm/imx/imx-drm-core.c
··· 42 42 } 43 43 EXPORT_SYMBOL_GPL(imx_drm_connector_destroy); 44 44 45 - void imx_drm_encoder_destroy(struct drm_encoder *encoder) 46 - { 47 - drm_encoder_cleanup(encoder); 48 - } 49 - EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy); 50 - 51 45 static int imx_drm_atomic_check(struct drm_device *dev, 52 46 struct drm_atomic_state *state) 53 47 { ··· 133 139 134 140 encoder->possible_crtcs = crtc_mask; 135 141 136 - /* FIXME: this is the mask of outputs which can clone this output. */ 137 - encoder->possible_clones = ~0; 142 + /* FIXME: cloning support not clear, disable it all for now */ 143 + encoder->possible_clones = 0; 138 144 139 145 return 0; 140 146 }
-1
drivers/gpu/drm/imx/imx-drm.h
··· 38 38 struct drm_encoder *encoder, struct device_node *np); 39 39 40 40 void imx_drm_connector_destroy(struct drm_connector *connector); 41 - void imx_drm_encoder_destroy(struct drm_encoder *encoder); 42 41 43 42 int ipu_planes_assign_pre(struct drm_device *dev, 44 43 struct drm_atomic_state *state);
+2 -6
drivers/gpu/drm/imx/imx-ldb.c
··· 26 26 #include <drm/drm_panel.h> 27 27 #include <drm/drm_print.h> 28 28 #include <drm/drm_probe_helper.h> 29 + #include <drm/drm_simple_kms_helper.h> 29 30 30 31 #include "imx-drm.h" 31 32 ··· 394 393 .best_encoder = imx_ldb_connector_best_encoder, 395 394 }; 396 395 397 - static const struct drm_encoder_funcs imx_ldb_encoder_funcs = { 398 - .destroy = imx_drm_encoder_destroy, 399 - }; 400 - 401 396 static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { 402 397 .atomic_mode_set = imx_ldb_encoder_atomic_mode_set, 403 398 .enable = imx_ldb_encoder_enable, ··· 438 441 } 439 442 440 443 drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs); 441 - drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs, 442 - DRM_MODE_ENCODER_LVDS, NULL); 444 + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS); 443 445 444 446 if (imx_ldb_ch->bridge) { 445 447 ret = drm_bridge_attach(&imx_ldb_ch->encoder,
+2 -6
drivers/gpu/drm/imx/imx-tve.c
··· 21 21 #include <drm/drm_atomic_helper.h> 22 22 #include <drm/drm_fb_helper.h> 23 23 #include <drm/drm_probe_helper.h> 24 + #include <drm/drm_simple_kms_helper.h> 24 25 25 26 #include "imx-drm.h" 26 27 ··· 349 348 .mode_valid = imx_tve_connector_mode_valid, 350 349 }; 351 350 352 - static const struct drm_encoder_funcs imx_tve_encoder_funcs = { 353 - .destroy = imx_drm_encoder_destroy, 354 - }; 355 - 356 351 static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = { 357 352 .mode_set = imx_tve_encoder_mode_set, 358 353 .enable = imx_tve_encoder_enable, ··· 476 479 return ret; 477 480 478 481 drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs); 479 - drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs, 480 - encoder_type, NULL); 482 + drm_simple_encoder_init(drm, &tve->encoder, encoder_type); 481 483 482 484 drm_connector_helper_add(&tve->connector, 483 485 &imx_tve_connector_helper_funcs);
+2 -6
drivers/gpu/drm/imx/parallel-display.c
··· 18 18 #include <drm/drm_of.h> 19 19 #include <drm/drm_panel.h> 20 20 #include <drm/drm_probe_helper.h> 21 + #include <drm/drm_simple_kms_helper.h> 21 22 22 23 #include "imx-drm.h" 23 24 ··· 257 256 .best_encoder = imx_pd_connector_best_encoder, 258 257 }; 259 258 260 - static const struct drm_encoder_funcs imx_pd_encoder_funcs = { 261 - .destroy = imx_drm_encoder_destroy, 262 - }; 263 - 264 259 static const struct drm_bridge_funcs imx_pd_bridge_funcs = { 265 260 .enable = imx_pd_bridge_enable, 266 261 .disable = imx_pd_bridge_disable, ··· 285 288 */ 286 289 imxpd->connector.dpms = DRM_MODE_DPMS_OFF; 287 290 288 - drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs, 289 - DRM_MODE_ENCODER_NONE, NULL); 291 + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE); 290 292 291 293 imxpd->bridge.funcs = &imx_pd_bridge_funcs; 292 294 drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
+11 -23
drivers/gpu/drm/ingenic/ingenic-drm.c
··· 23 23 #include <drm/drm_fourcc.h> 24 24 #include <drm/drm_gem_framebuffer_helper.h> 25 25 #include <drm/drm_irq.h> 26 + #include <drm/drm_managed.h> 26 27 #include <drm/drm_of.h> 27 28 #include <drm/drm_panel.h> 28 29 #include <drm/drm_plane.h> 29 30 #include <drm/drm_plane_helper.h> 30 31 #include <drm/drm_probe_helper.h> 32 + #include <drm/drm_simple_kms_helper.h> 31 33 #include <drm/drm_vblank.h> 32 34 33 35 #define JZ_REG_LCD_CFG 0x00 ··· 490 488 return IRQ_HANDLED; 491 489 } 492 490 493 - static void ingenic_drm_release(struct drm_device *drm) 494 - { 495 - struct ingenic_drm *priv = drm_device_get_priv(drm); 496 - 497 - drm_mode_config_cleanup(drm); 498 - drm_dev_fini(drm); 499 - kfree(priv); 500 - } 501 - 502 491 static int ingenic_drm_enable_vblank(struct drm_crtc *crtc) 503 492 { 504 493 struct ingenic_drm *priv = drm_crtc_get_priv(crtc); ··· 533 540 .gem_prime_mmap = drm_gem_cma_prime_mmap, 534 541 535 542 .irq_handler = ingenic_drm_irq_handler, 536 - .release = ingenic_drm_release, 537 543 }; 538 544 539 545 static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = { ··· 584 592 .atomic_commit = drm_atomic_helper_commit, 585 593 }; 586 594 587 - static const struct drm_encoder_funcs ingenic_drm_encoder_funcs = { 588 - .destroy = drm_encoder_cleanup, 589 - }; 590 - 591 595 static void ingenic_drm_free_dma_hwdesc(void *d) 592 596 { 593 597 struct ingenic_drm *priv = d; ··· 627 639 kfree(priv); 628 640 return ret; 629 641 } 642 + drmm_add_final_kfree(drm, priv); 630 643 631 - drm_mode_config_init(drm); 644 + ret = drmm_mode_config_init(drm); 645 + if (ret) 646 + return ret; 647 + 632 648 drm->mode_config.min_width = 0; 633 649 drm->mode_config.min_height = 0; 634 650 drm->mode_config.max_width = soc_info->max_width; ··· 653 661 } 654 662 655 663 irq = platform_get_irq(pdev, 0); 656 - if (irq < 0) { 657 - dev_err(dev, "Failed to get platform irq"); 664 + if (irq < 0) 658 665 return irq; 659 - } 660 666 661 667 if (soc_info->needs_dev_clk) { 662 668 priv->lcd_clk = devm_clk_get(dev, "lcd"); ··· 720 730 drm_encoder_helper_add(&priv->encoder, 721 731 &ingenic_drm_encoder_helper_funcs); 722 732 723 - ret = drm_encoder_init(drm, &priv->encoder, &ingenic_drm_encoder_funcs, 724 - DRM_MODE_ENCODER_DPI, NULL); 733 + ret = drm_simple_encoder_init(drm, &priv->encoder, 734 + DRM_MODE_ENCODER_DPI); 725 735 if (ret) { 726 736 dev_err(dev, "Failed to init encoder: %i", ret); 727 737 return ret; ··· 781 791 goto err_devclk_disable; 782 792 } 783 793 784 - ret = drm_fbdev_generic_setup(drm, 32); 785 - if (ret) 786 - dev_warn(dev, "Unable to start fbdev emulation: %i", ret); 794 + drm_fbdev_generic_setup(drm, 32); 787 795 788 796 return 0; 789 797
+2
drivers/gpu/drm/lima/Kconfig
··· 10 10 depends on OF 11 11 select DRM_SCHED 12 12 select DRM_GEM_SHMEM_HELPER 13 + select PM_DEVFREQ 14 + select DEVFREQ_GOV_SIMPLE_ONDEMAND 13 15 help 14 16 DRM driver for ARM Mali 400/450 GPUs.
+3 -1
drivers/gpu/drm/lima/Makefile
··· 14 14 lima_sched.o \ 15 15 lima_ctx.o \ 16 16 lima_dlbu.o \ 17 - lima_bcast.o 17 + lima_bcast.o \ 18 + lima_trace.o \ 19 + lima_devfreq.o 18 20 19 21 obj-$(CONFIG_DRM_LIMA) += lima.o
+3
drivers/gpu/drm/lima/lima_ctx.c
··· 27 27 if (err < 0) 28 28 goto err_out0; 29 29 30 + ctx->pid = task_pid_nr(current); 31 + get_task_comm(ctx->pname, current); 32 + 30 33 return 0; 31 34 32 35 err_out0:
+5
drivers/gpu/drm/lima/lima_ctx.h
··· 5 5 #define __LIMA_CTX_H__ 6 6 7 7 #include <linux/xarray.h> 8 + #include <linux/sched.h> 8 9 9 10 #include "lima_device.h" 10 11 ··· 14 13 struct lima_device *dev; 15 14 struct lima_sched_context context[lima_pipe_num]; 16 15 atomic_t guilty; 16 + 17 + /* debug info */ 18 + char pname[TASK_COMM_LEN]; 19 + pid_t pid; 17 20 }; 18 21 19 22 struct lima_ctx_mgr {
+234
drivers/gpu/drm/lima/lima_devfreq.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com> 4 + * 5 + * Based on panfrost_devfreq.c: 6 + * Copyright 2019 Collabora ltd. 7 + */ 8 + #include <linux/clk.h> 9 + #include <linux/devfreq.h> 10 + #include <linux/devfreq_cooling.h> 11 + #include <linux/device.h> 12 + #include <linux/platform_device.h> 13 + #include <linux/pm_opp.h> 14 + #include <linux/property.h> 15 + 16 + #include "lima_device.h" 17 + #include "lima_devfreq.h" 18 + 19 + static void lima_devfreq_update_utilization(struct lima_devfreq *devfreq) 20 + { 21 + ktime_t now, last; 22 + 23 + now = ktime_get(); 24 + last = devfreq->time_last_update; 25 + 26 + if (devfreq->busy_count > 0) 27 + devfreq->busy_time += ktime_sub(now, last); 28 + else 29 + devfreq->idle_time += ktime_sub(now, last); 30 + 31 + devfreq->time_last_update = now; 32 + } 33 + 34 + static int lima_devfreq_target(struct device *dev, unsigned long *freq, 35 + u32 flags) 36 + { 37 + struct dev_pm_opp *opp; 38 + int err; 39 + 40 + opp = devfreq_recommended_opp(dev, freq, flags); 41 + if (IS_ERR(opp)) 42 + return PTR_ERR(opp); 43 + dev_pm_opp_put(opp); 44 + 45 + err = dev_pm_opp_set_rate(dev, *freq); 46 + if (err) 47 + return err; 48 + 49 + return 0; 50 + } 51 + 52 + static void lima_devfreq_reset(struct lima_devfreq *devfreq) 53 + { 54 + devfreq->busy_time = 0; 55 + devfreq->idle_time = 0; 56 + devfreq->time_last_update = ktime_get(); 57 + } 58 + 59 + static int lima_devfreq_get_dev_status(struct device *dev, 60 + struct devfreq_dev_status *status) 61 + { 62 + struct lima_device *ldev = dev_get_drvdata(dev); 63 + struct lima_devfreq *devfreq = &ldev->devfreq; 64 + unsigned long irqflags; 65 + 66 + status->current_frequency = clk_get_rate(ldev->clk_gpu); 67 + 68 + spin_lock_irqsave(&devfreq->lock, irqflags); 69 + 70 + lima_devfreq_update_utilization(devfreq); 71 + 72 + status->total_time = ktime_to_ns(ktime_add(devfreq->busy_time, 73 + devfreq->idle_time)); 74 + status->busy_time = ktime_to_ns(devfreq->busy_time); 75 + 76 + lima_devfreq_reset(devfreq); 77 + 78 + spin_unlock_irqrestore(&devfreq->lock, irqflags); 79 + 80 + dev_dbg(ldev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", 81 + status->busy_time, status->total_time, 82 + status->busy_time / (status->total_time / 100), 83 + status->current_frequency / 1000 / 1000); 84 + 85 + return 0; 86 + } 87 + 88 + static struct devfreq_dev_profile lima_devfreq_profile = { 89 + .polling_ms = 50, /* ~3 frames */ 90 + .target = lima_devfreq_target, 91 + .get_dev_status = lima_devfreq_get_dev_status, 92 + }; 93 + 94 + void lima_devfreq_fini(struct lima_device *ldev) 95 + { 96 + struct lima_devfreq *devfreq = &ldev->devfreq; 97 + 98 + if (devfreq->cooling) { 99 + devfreq_cooling_unregister(devfreq->cooling); 100 + devfreq->cooling = NULL; 101 + } 102 + 103 + if (devfreq->devfreq) { 104 + devm_devfreq_remove_device(&ldev->pdev->dev, 105 + devfreq->devfreq); 106 + devfreq->devfreq = NULL; 107 + } 108 + 109 + if (devfreq->opp_of_table_added) { 110 + dev_pm_opp_of_remove_table(&ldev->pdev->dev); 111 + devfreq->opp_of_table_added = false; 112 + } 113 + 114 + if (devfreq->regulators_opp_table) { 115 + dev_pm_opp_put_regulators(devfreq->regulators_opp_table); 116 + devfreq->regulators_opp_table = NULL; 117 + } 118 + 119 + if (devfreq->clkname_opp_table) { 120 + dev_pm_opp_put_clkname(devfreq->clkname_opp_table); 121 + devfreq->clkname_opp_table = NULL; 122 + } 123 + } 124 + 125 + int lima_devfreq_init(struct lima_device *ldev) 126 + { 127 + struct thermal_cooling_device *cooling; 128 + struct device *dev = &ldev->pdev->dev; 129 + struct opp_table *opp_table; 130 + struct devfreq *devfreq; 131 + struct lima_devfreq *ldevfreq = &ldev->devfreq; 132 + struct dev_pm_opp *opp; 133 + unsigned long cur_freq; 134 + int ret; 135 + 136 + if (!device_property_present(dev, "operating-points-v2")) 137 + /* Optional, continue without devfreq */ 138 + return 0; 139 + 140 + spin_lock_init(&ldevfreq->lock); 141 + 142 + opp_table = dev_pm_opp_set_clkname(dev, "core"); 143 + if (IS_ERR(opp_table)) { 144 + ret = PTR_ERR(opp_table); 145 + goto err_fini; 146 + } 147 + 148 + ldevfreq->clkname_opp_table = opp_table; 149 + 150 + opp_table = dev_pm_opp_set_regulators(dev, 151 + (const char *[]){ "mali" }, 152 + 1); 153 + if (IS_ERR(opp_table)) { 154 + ret = PTR_ERR(opp_table); 155 + 156 + /* Continue if the optional regulator is missing */ 157 + if (ret != -ENODEV) 158 + goto err_fini; 159 + } else { 160 + ldevfreq->regulators_opp_table = opp_table; 161 + } 162 + 163 + ret = dev_pm_opp_of_add_table(dev); 164 + if (ret) 165 + goto err_fini; 166 + ldevfreq->opp_of_table_added = true; 167 + 168 + lima_devfreq_reset(ldevfreq); 169 + 170 + cur_freq = clk_get_rate(ldev->clk_gpu); 171 + 172 + opp = devfreq_recommended_opp(dev, &cur_freq, 0); 173 + if (IS_ERR(opp)) { 174 + ret = PTR_ERR(opp); 175 + goto err_fini; 176 + } 177 + 178 + lima_devfreq_profile.initial_freq = cur_freq; 179 + dev_pm_opp_put(opp); 180 + 181 + devfreq = devm_devfreq_add_device(dev, &lima_devfreq_profile, 182 + DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL); 183 + if (IS_ERR(devfreq)) { 184 + dev_err(dev, "Couldn't initialize GPU devfreq\n"); 185 + ret = PTR_ERR(devfreq); 186 + goto err_fini; 187 + } 188 + 189 + ldevfreq->devfreq = devfreq; 190 + 191 + cooling = of_devfreq_cooling_register(dev->of_node, devfreq); 192 + if (IS_ERR(cooling)) 193 + dev_info(dev, "Failed to register cooling device\n"); 194 + else 195 + ldevfreq->cooling = cooling; 196 + 197 + return 0; 198 + 199 + err_fini: 200 + lima_devfreq_fini(ldev); 201 + return ret; 202 + } 203 + 204 + void lima_devfreq_record_busy(struct lima_devfreq *devfreq) 205 + { 206 + unsigned long irqflags; 207 + 208 + if (!devfreq->devfreq) 209 + return; 210 + 211 + spin_lock_irqsave(&devfreq->lock, irqflags); 212 + 213 + lima_devfreq_update_utilization(devfreq); 214 + 215 + devfreq->busy_count++; 216 + 217 + spin_unlock_irqrestore(&devfreq->lock, irqflags); 218 + } 219 + 220 + void lima_devfreq_record_idle(struct lima_devfreq *devfreq) 221 + { 222 + unsigned long irqflags; 223 + 224 + if (!devfreq->devfreq) 225 + return; 226 + 227 + spin_lock_irqsave(&devfreq->lock, irqflags); 228 + 229 + lima_devfreq_update_utilization(devfreq); 230 + 231 + WARN_ON(--devfreq->busy_count < 0); 232 + 233 + spin_unlock_irqrestore(&devfreq->lock, irqflags); 234 + }
+41
drivers/gpu/drm/lima/lima_devfreq.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com> */ 3 + 4 + #ifndef __LIMA_DEVFREQ_H__ 5 + #define __LIMA_DEVFREQ_H__ 6 + 7 + #include <linux/spinlock.h> 8 + #include <linux/ktime.h> 9 + 10 + struct devfreq; 11 + struct opp_table; 12 + struct thermal_cooling_device; 13 + 14 + struct lima_device; 15 + 16 + struct lima_devfreq { 17 + struct devfreq *devfreq; 18 + struct opp_table *clkname_opp_table; 19 + struct opp_table *regulators_opp_table; 20 + struct thermal_cooling_device *cooling; 21 + bool opp_of_table_added; 22 + 23 + ktime_t busy_time; 24 + ktime_t idle_time; 25 + ktime_t time_last_update; 26 + int busy_count; 27 + /* 28 + * Protect busy_time, idle_time, time_last_update and busy_count 29 + * because these can be updated concurrently, for example by the GP 30 + * and PP interrupts. 31 + */ 32 + spinlock_t lock; 33 + }; 34 + 35 + int lima_devfreq_init(struct lima_device *ldev); 36 + void lima_devfreq_fini(struct lima_device *ldev); 37 + 38 + void lima_devfreq_record_busy(struct lima_devfreq *devfreq); 39 + void lima_devfreq_record_idle(struct lima_devfreq *devfreq); 40 + 41 + #endif
+17
drivers/gpu/drm/lima/lima_device.c
··· 214 214 struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp; 215 215 int err; 216 216 217 + pipe->ldev = dev; 218 + 217 219 err = lima_sched_pipe_init(pipe, "gp"); 218 220 if (err) 219 221 return err; ··· 245 243 { 246 244 struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; 247 245 int err, i; 246 + 247 + pipe->ldev = dev; 248 248 249 249 err = lima_sched_pipe_init(pipe, "pp"); 250 250 if (err) ··· 348 344 if (err) 349 345 goto err_out5; 350 346 347 + ldev->dump.magic = LIMA_DUMP_MAGIC; 348 + ldev->dump.version_major = LIMA_DUMP_MAJOR; 349 + ldev->dump.version_minor = LIMA_DUMP_MINOR; 350 + INIT_LIST_HEAD(&ldev->error_task_list); 351 + mutex_init(&ldev->error_task_list_lock); 352 + 351 353 dev_info(ldev->dev, "bus rate = %lu\n", clk_get_rate(ldev->clk_bus)); 352 354 dev_info(ldev->dev, "mod rate = %lu", clk_get_rate(ldev->clk_gpu)); 353 355 ··· 380 370 void lima_device_fini(struct lima_device *ldev) 381 371 { 382 372 int i; 373 + struct lima_sched_error_task *et, *tmp; 374 + 375 + list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) { 376 + list_del(&et->list); 377 + kvfree(et); 378 + } 379 + mutex_destroy(&ldev->error_task_list_lock); 383 380 384 381 lima_fini_pp_pipe(ldev); 385 382 lima_fini_gp_pipe(ldev);
+11
drivers/gpu/drm/lima/lima_device.h
··· 6 6 7 7 #include <drm/drm_device.h> 8 8 #include <linux/delay.h> 9 + #include <linux/list.h> 10 + #include <linux/mutex.h> 9 11 10 12 #include "lima_sched.h" 13 + #include "lima_dump.h" 14 + #include "lima_devfreq.h" 11 15 12 16 enum lima_gpu_id { 13 17 lima_gpu_mali400 = 0, ··· 98 94 99 95 u32 *dlbu_cpu; 100 96 dma_addr_t dlbu_dma; 97 + 98 + struct lima_devfreq devfreq; 99 + 100 + /* debug info */ 101 + struct lima_dump_head dump; 102 + struct list_head error_task_list; 103 + struct mutex error_task_list_lock; 101 104 }; 102 105 103 106 static inline struct lima_device *
+110 -2
drivers/gpu/drm/lima/lima_drv.c
··· 10 10 #include <drm/drm_prime.h> 11 11 #include <drm/lima_drm.h> 12 12 13 + #include "lima_device.h" 13 14 #include "lima_drv.h" 14 15 #include "lima_gem.h" 15 16 #include "lima_vm.h" 16 17 17 18 int lima_sched_timeout_ms; 18 19 uint lima_heap_init_nr_pages = 8; 20 + uint lima_max_error_tasks; 19 21 20 22 MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms"); 21 23 module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444); 22 24 23 25 MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages"); 24 26 module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444); 27 + 28 + MODULE_PARM_DESC(max_error_tasks, "max number of error tasks to save"); 29 + module_param_named(max_error_tasks, lima_max_error_tasks, uint, 0644); 25 30 26 31 static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file) 27 32 { ··· 277 272 .gem_prime_mmap = drm_gem_prime_mmap, 278 273 }; 279 274 275 + struct lima_block_reader { 276 + void *dst; 277 + size_t base; 278 + size_t count; 279 + size_t off; 280 + ssize_t read; 281 + }; 282 + 283 + static bool lima_read_block(struct lima_block_reader *reader, 284 + void *src, size_t src_size) 285 + { 286 + size_t max_off = reader->base + src_size; 287 + 288 + if (reader->off < max_off) { 289 + size_t size = min_t(size_t, max_off - reader->off, 290 + reader->count); 291 + 292 + memcpy(reader->dst, src + (reader->off - reader->base), size); 293 + 294 + reader->dst += size; 295 + reader->off += size; 296 + reader->read += size; 297 + reader->count -= size; 298 + } 299 + 300 + reader->base = max_off; 301 + 302 + return !!reader->count; 303 + } 304 + 305 + static ssize_t lima_error_state_read(struct file *filp, struct kobject *kobj, 306 + struct bin_attribute *attr, char *buf, 307 + loff_t off, size_t count) 308 + { 309 + struct device *dev = kobj_to_dev(kobj); 310 + struct lima_device *ldev = dev_get_drvdata(dev); 311 + struct lima_sched_error_task *et; 312 + struct lima_block_reader reader = { 313 + .dst = buf, 314 + .count = count, 315 + .off = off, 316 + }; 317 + 318 + mutex_lock(&ldev->error_task_list_lock); 319 + 320 + if (lima_read_block(&reader, &ldev->dump, sizeof(ldev->dump))) { 321 + list_for_each_entry(et, &ldev->error_task_list, list) { 322 + if (!lima_read_block(&reader, et->data, et->size)) 323 + break; 324 + } 325 + } 326 + 327 + mutex_unlock(&ldev->error_task_list_lock); 328 + return reader.read; 329 + } 330 + 331 + static ssize_t lima_error_state_write(struct file *file, struct kobject *kobj, 332 + struct bin_attribute *attr, char *buf, 333 + loff_t off, size_t count) 334 + { 335 + struct device *dev = kobj_to_dev(kobj); 336 + struct lima_device *ldev = dev_get_drvdata(dev); 337 + struct lima_sched_error_task *et, *tmp; 338 + 339 + mutex_lock(&ldev->error_task_list_lock); 340 + 341 + list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) { 342 + list_del(&et->list); 343 + kvfree(et); 344 + } 345 + 346 + ldev->dump.size = 0; 347 + ldev->dump.num_tasks = 0; 348 + 349 + mutex_unlock(&ldev->error_task_list_lock); 350 + 351 + return count; 352 + } 353 + 354 + static const struct bin_attribute lima_error_state_attr = { 355 + .attr.name = "error", 356 + .attr.mode = 0600, 357 + .size = 0, 358 + .read = lima_error_state_read, 359 + .write = lima_error_state_write, 360 + }; 361 + 280 362 static int lima_pdev_probe(struct platform_device *pdev) 281 363 { 282 364 struct lima_device *ldev; ··· 398 306 if (err) 399 307 goto err_out1; 400 308 309 + err = lima_devfreq_init(ldev); 310 + if (err) { 311 + dev_err(&pdev->dev, "Fatal error during devfreq init\n"); 312 + goto err_out2; 313 + } 314 + 401 315 /* 402 316 * Register the DRM device with the core and the connectors with 403 317 * sysfs. 404 318 */ 405 319 err = drm_dev_register(ddev, 0); 406 320 if (err < 0) 407 - goto err_out2; 321 + goto err_out3; 322 + 323 + platform_set_drvdata(pdev, ldev); 324 + 325 + if (sysfs_create_bin_file(&ldev->dev->kobj, &lima_error_state_attr)) 326 + dev_warn(ldev->dev, "fail to create error state sysfs\n"); 408 327 409 328 return 0; 410 329 411 - err_out2: 330 + err_out3: 412 331 lima_device_fini(ldev); 332 + err_out2: 333 + lima_devfreq_fini(ldev); 413 334 err_out1: 414 335 drm_dev_put(ddev); 415 336 err_out0: ··· 435 330 struct lima_device *ldev = platform_get_drvdata(pdev); 436 331 struct drm_device *ddev = ldev->ddev; 437 332 333 + sysfs_remove_bin_file(&ldev->dev->kobj, &lima_error_state_attr); 334 + platform_set_drvdata(pdev, NULL); 438 335 drm_dev_unregister(ddev); 336 + lima_devfreq_fini(ldev); 439 337 lima_device_fini(ldev); 440 338 drm_dev_put(ddev); 441 339 lima_sched_slab_fini();
+1
drivers/gpu/drm/lima/lima_drv.h
··· 10 10 11 11 extern int lima_sched_timeout_ms; 12 12 extern uint lima_heap_init_nr_pages; 13 + extern uint lima_max_error_tasks; 13 14 14 15 struct lima_vm; 15 16 struct lima_bo;
+77
drivers/gpu/drm/lima/lima_dump.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2020 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_DUMP_H__ 5 + #define __LIMA_DUMP_H__ 6 + 7 + #include <linux/types.h> 8 + 9 + /** 10 + * dump file format for all the information to start a lima task 11 + * 12 + * top level format 13 + * | magic code "LIMA" | format version | num tasks | data size | 14 + * | reserved | reserved | reserved | reserved | 15 + * | task 1 ID | task 1 size | num chunks | reserved | task 1 data | 16 + * | task 2 ID | task 2 size | num chunks | reserved | task 2 data | 17 + * ... 18 + * 19 + * task data format 20 + * | chunk 1 ID | chunk 1 size | reserved | reserved | chunk 1 data | 21 + * | chunk 2 ID | chunk 2 size | reserved | reserved | chunk 2 data | 22 + * ... 23 + * 24 + */ 25 + 26 + #define LIMA_DUMP_MAJOR 1 27 + #define LIMA_DUMP_MINOR 0 28 + 29 + #define LIMA_DUMP_MAGIC 0x414d494c 30 + 31 + struct lima_dump_head { 32 + __u32 magic; 33 + __u16 version_major; 34 + __u16 version_minor; 35 + __u32 num_tasks; 36 + __u32 size; 37 + __u32 reserved[4]; 38 + }; 39 + 40 + #define LIMA_DUMP_TASK_GP 0 41 + #define LIMA_DUMP_TASK_PP 1 42 + #define LIMA_DUMP_TASK_NUM 2 43 + 44 + struct lima_dump_task { 45 + __u32 id; 46 + __u32 size; 47 + __u32 num_chunks; 48 + __u32 reserved; 49 + }; 50 + 51 + #define LIMA_DUMP_CHUNK_FRAME 0 52 + #define LIMA_DUMP_CHUNK_BUFFER 1 53 + #define LIMA_DUMP_CHUNK_PROCESS_NAME 2 54 + #define LIMA_DUMP_CHUNK_PROCESS_ID 3 55 + #define LIMA_DUMP_CHUNK_NUM 4 56 + 57 + struct lima_dump_chunk { 58 + __u32 id; 59 + __u32 size; 60 + __u32 reserved[2]; 61 + }; 62 + 63 + struct lima_dump_chunk_buffer { 64 + __u32 id; 65 + __u32 size; 66 + __u32 va; 67 + __u32 reserved; 68 + }; 69 + 70 + struct lima_dump_chunk_pid { 71 + __u32 id; 72 + __u32 size; 73 + __u32 pid; 74 + __u32 reserved; 75 + }; 76 + 77 + #endif
+141 -1
drivers/gpu/drm/lima/lima_sched.c
··· 3 3 4 4 #include <linux/kthread.h> 5 5 #include <linux/slab.h> 6 - #include <linux/xarray.h> 6 + #include <linux/vmalloc.h> 7 7 8 + #include "lima_devfreq.h" 8 9 #include "lima_drv.h" 9 10 #include "lima_sched.h" 10 11 #include "lima_vm.h" 11 12 #include "lima_mmu.h" 12 13 #include "lima_l2_cache.h" 13 14 #include "lima_gem.h" 15 + #include "lima_trace.h" 14 16 15 17 struct lima_fence { 16 18 struct dma_fence base; ··· 178 176 { 179 177 struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished); 180 178 179 + trace_lima_task_submit(task); 181 180 drm_sched_entity_push_job(&task->base, &context->base); 182 181 return fence; 183 182 } ··· 217 214 */ 218 215 ret = dma_fence_get(task->fence); 219 216 217 + lima_devfreq_record_busy(&pipe->ldev->devfreq); 218 + 220 219 pipe->current_task = task; 221 220 222 221 /* this is needed for MMU to work correctly, otherwise GP/PP ··· 255 250 if (last_vm) 256 251 lima_vm_put(last_vm); 257 252 253 + trace_lima_task_run(task); 254 + 258 255 pipe->error = false; 259 256 pipe->task_run(pipe, task); 260 257 261 258 return task->fence; 259 + } 260 + 261 + static void lima_sched_build_error_task_list(struct lima_sched_task *task) 262 + { 263 + struct lima_sched_error_task *et; 264 + struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched); 265 + struct lima_ip *ip = pipe->processor[0]; 266 + int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp; 267 + struct lima_device *dev = ip->dev; 268 + struct lima_sched_context *sched_ctx = 269 + container_of(task->base.entity, 270 + struct lima_sched_context, base); 271 + struct lima_ctx *ctx = 272 + container_of(sched_ctx, struct lima_ctx, context[pipe_id]); 273 + struct lima_dump_task *dt; 274 + struct lima_dump_chunk *chunk; 275 + struct lima_dump_chunk_pid *pid_chunk; 276 + struct lima_dump_chunk_buffer *buffer_chunk; 277 + u32 size, task_size, mem_size; 278 + int i; 279 + 280 + mutex_lock(&dev->error_task_list_lock); 281 + 282 + if (dev->dump.num_tasks >= lima_max_error_tasks) { 283 + dev_info(dev->dev, "fail to save task state: error task list is full\n"); 284 + goto out; 285 + } 286 + 287 + /* frame chunk */ 288 + size = sizeof(struct lima_dump_chunk) + pipe->frame_size; 289 + /* process name chunk */ 290 + size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname); 291 + /* pid chunk */ 292 + size += sizeof(struct lima_dump_chunk); 293 + /* buffer chunks */ 294 + for (i = 0; i < task->num_bos; i++) { 295 + struct lima_bo *bo = task->bos[i]; 296 + 297 + size += sizeof(struct lima_dump_chunk); 298 + size += bo->heap_size ? bo->heap_size : lima_bo_size(bo); 299 + } 300 + 301 + task_size = size + sizeof(struct lima_dump_task); 302 + mem_size = task_size + sizeof(*et); 303 + et = kvmalloc(mem_size, GFP_KERNEL); 304 + if (!et) { 305 + dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n", 306 + mem_size); 307 + goto out; 308 + } 309 + 310 + et->data = et + 1; 311 + et->size = task_size; 312 + 313 + dt = et->data; 314 + memset(dt, 0, sizeof(*dt)); 315 + dt->id = pipe_id; 316 + dt->size = size; 317 + 318 + chunk = (struct lima_dump_chunk *)(dt + 1); 319 + memset(chunk, 0, sizeof(*chunk)); 320 + chunk->id = LIMA_DUMP_CHUNK_FRAME; 321 + chunk->size = pipe->frame_size; 322 + memcpy(chunk + 1, task->frame, pipe->frame_size); 323 + dt->num_chunks++; 324 + 325 + chunk = (void *)(chunk + 1) + chunk->size; 326 + memset(chunk, 0, sizeof(*chunk)); 327 + chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME; 328 + chunk->size = sizeof(ctx->pname); 329 + memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname)); 330 + dt->num_chunks++; 331 + 332 + pid_chunk = (void *)(chunk + 1) + chunk->size; 333 + memset(pid_chunk, 0, sizeof(*pid_chunk)); 334 + pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID; 335 + pid_chunk->pid = ctx->pid; 336 + dt->num_chunks++; 337 + 338 + buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size; 339 + for (i = 0; i < task->num_bos; i++) { 340 + struct lima_bo *bo = task->bos[i]; 341 + void *data; 342 + 343 + memset(buffer_chunk, 0, sizeof(*buffer_chunk)); 344 + buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER; 345 + buffer_chunk->va = lima_vm_get_va(task->vm, bo); 346 + 347 + if (bo->heap_size) { 348 + buffer_chunk->size = bo->heap_size; 349 + 350 + data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT, 351 + VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 352 + if (!data) { 353 + kvfree(et); 354 + goto out; 355 + } 356 + 357 + memcpy(buffer_chunk + 1, data, buffer_chunk->size); 358 + 359 + vunmap(data); 360 + } else { 361 + buffer_chunk->size = lima_bo_size(bo); 362 + 363 + data = drm_gem_shmem_vmap(&bo->base.base); 364 + if (IS_ERR_OR_NULL(data)) { 365 + kvfree(et); 366 + goto out; 367 + } 368 + 369 + memcpy(buffer_chunk + 1, data, buffer_chunk->size); 370 + 371 + drm_gem_shmem_vunmap(&bo->base.base, data); 372 + } 373 + 374 + buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size; 375 + dt->num_chunks++; 376 + } 377 + 378 + list_add(&et->list, &dev->error_task_list); 379 + dev->dump.size += et->size; 380 + dev->dump.num_tasks++; 381 + 382 + dev_info(dev->dev, "save error task state success\n"); 383 + 384 + out: 385 + mutex_unlock(&dev->error_task_list_lock); 262 386 } 263 387 264 388 static void lima_sched_timedout_job(struct drm_sched_job *job) ··· 401 267 drm_sched_stop(&pipe->base, &task->base); 402 268 403 269 drm_sched_increase_karma(&task->base); 270 + 271 + lima_sched_build_error_task_list(task); 404 272 405 273 pipe->task_error(pipe); 406 274 ··· 420 284 421 285 pipe->current_vm = NULL; 422 286 pipe->current_task = NULL; 287 + 288 + lima_devfreq_record_idle(&pipe->ldev->devfreq); 423 289 424 290 drm_sched_resubmit_jobs(&pipe->base); 425 291 drm_sched_start(&pipe->base, true); ··· 502 364 } else { 503 365 pipe->task_fini(pipe); 504 366 dma_fence_signal(task->fence); 367 + 368 + lima_devfreq_record_idle(&pipe->ldev->devfreq); 505 369 } 506 370 }
+11
drivers/gpu/drm/lima/lima_sched.h
··· 5 5 #define __LIMA_SCHED_H__ 6 6 7 7 #include <drm/gpu_scheduler.h> 8 + #include <linux/list.h> 9 + #include <linux/xarray.h> 8 10 11 + struct lima_device; 9 12 struct lima_vm; 13 + 14 + struct lima_sched_error_task { 15 + struct list_head list; 16 + void *data; 17 + u32 size; 18 + }; 10 19 11 20 struct lima_sched_task { 12 21 struct drm_sched_job base; ··· 52 43 u64 fence_context; 53 44 u32 fence_seqno; 54 45 spinlock_t fence_lock; 46 + 47 + struct lima_device *ldev; 55 48 56 49 struct lima_sched_task *current_task; 57 50 struct lima_vm *current_vm;
+7
drivers/gpu/drm/lima/lima_trace.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2020 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include "lima_sched.h" 5 + 6 + #define CREATE_TRACE_POINTS 7 + #include "lima_trace.h"
+50
drivers/gpu/drm/lima/lima_trace.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2020 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #if !defined(_LIMA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 5 + #define _LIMA_TRACE_H_ 6 + 7 + #include <linux/tracepoint.h> 8 + 9 + #undef TRACE_SYSTEM 10 + #define TRACE_SYSTEM lima 11 + #define TRACE_INCLUDE_FILE lima_trace 12 + 13 + DECLARE_EVENT_CLASS(lima_task, 14 + TP_PROTO(struct lima_sched_task *task), 15 + TP_ARGS(task), 16 + TP_STRUCT__entry( 17 + __field(uint64_t, task_id) 18 + __field(unsigned int, context) 19 + __field(unsigned int, seqno) 20 + __string(pipe, task->base.sched->name) 21 + ), 22 + 23 + TP_fast_assign( 24 + __entry->task_id = task->base.id; 25 + __entry->context = task->base.s_fence->finished.context; 26 + __entry->seqno = task->base.s_fence->finished.seqno; 27 + __assign_str(pipe, task->base.sched->name) 28 + ), 29 + 30 + TP_printk("task=%llu, context=%u seqno=%u pipe=%s", 31 + __entry->task_id, __entry->context, __entry->seqno, 32 + __get_str(pipe)) 33 + ); 34 + 35 + DEFINE_EVENT(lima_task, lima_task_submit, 36 + TP_PROTO(struct lima_sched_task *task), 37 + TP_ARGS(task) 38 + ); 39 + 40 + DEFINE_EVENT(lima_task, lima_task_run, 41 + TP_PROTO(struct lima_sched_task *task), 42 + TP_ARGS(task) 43 + ); 44 + 45 + #endif 46 + 47 + /* This part must be outside protection */ 48 + #undef TRACE_INCLUDE_PATH 49 + #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/lima 50 + #include <trace/define_trace.h>
+11 -24
drivers/gpu/drm/mcde/mcde_drv.c
··· 72 72 #include <drm/drm_gem.h> 73 73 #include <drm/drm_gem_cma_helper.h> 74 74 #include <drm/drm_gem_framebuffer_helper.h> 75 + #include <drm/drm_managed.h> 75 76 #include <drm/drm_of.h> 76 77 #include <drm/drm_probe_helper.h> 77 78 #include <drm/drm_panel.h> ··· 184 183 ret = drm_vblank_init(drm, 1); 185 184 if (ret) { 186 185 dev_err(drm->dev, "failed to init vblank\n"); 187 - goto out_config; 186 + return ret; 188 187 } 189 188 190 189 ret = mcde_display_init(drm); 191 190 if (ret) { 192 191 dev_err(drm->dev, "failed to init display\n"); 193 - goto out_config; 192 + return ret; 194 193 } 195 194 196 195 /* ··· 204 203 mcde->bridge); 205 204 if (ret) { 206 205 dev_err(drm->dev, "failed to attach display output bridge\n"); 207 - goto out_config; 206 + return ret; 208 207 } 209 208 210 209 drm_mode_config_reset(drm); ··· 212 211 drm_fbdev_generic_setup(drm, 32); 213 212 214 213 return 0; 215 - 216 - out_config: 217 - drm_mode_config_cleanup(drm); 218 - return ret; 219 - } 220 - 221 - static void mcde_release(struct drm_device *drm) 222 - { 223 - struct mcde *mcde = drm->dev_private; 224 - 225 - drm_mode_config_cleanup(drm); 226 - drm_dev_fini(drm); 227 - kfree(mcde); 228 214 } 229 215 230 216 DEFINE_DRM_GEM_CMA_FOPS(drm_fops); ··· 219 231 static struct drm_driver mcde_drm_driver = { 220 232 .driver_features = 221 233 DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, 222 - .release = mcde_release, 223 234 .lastclose = drm_fb_helper_lastclose, 224 235 .ioctls = NULL, 225 236 .fops = &drm_fops, ··· 246 259 struct drm_device *drm = dev_get_drvdata(dev); 247 260 int ret; 248 261 249 - drm_mode_config_init(drm); 262 + ret = drmm_mode_config_init(drm); 263 + if (ret) 264 + return ret; 250 265 251 266 ret = component_bind_all(drm->dev, drm); 252 267 if (ret) { ··· 312 323 return -ENOMEM; 313 324 mcde->dev = dev; 314 325 315 - ret = drm_dev_init(&mcde->drm, &mcde_drm_driver, dev); 326 + ret = devm_drm_dev_init(dev, &mcde->drm, &mcde_drm_driver); 316 327 if (ret) { 317 328 kfree(mcde); 318 329 return ret; 319 330 } 320 331 drm = &mcde->drm; 321 332 drm->dev_private = mcde; 333 + drmm_add_final_kfree(drm, mcde); 322 334 platform_set_drvdata(pdev, drm); 323 335 324 336 /* Enable continuous updates: this is what Linux' framebuffer expects */ ··· 331 341 if (IS_ERR(mcde->epod)) { 332 342 ret = PTR_ERR(mcde->epod); 333 343 dev_err(dev, "can't get EPOD regulator\n"); 334 - goto dev_unref; 344 + return ret; 335 345 } 336 346 ret = regulator_enable(mcde->epod); 337 347 if (ret) { 338 348 dev_err(dev, "can't enable EPOD regulator\n"); 339 - goto dev_unref; 349 + return ret; 340 350 } 341 351 mcde->vana = devm_regulator_get(dev, "vana"); 342 352 if (IS_ERR(mcde->vana)) { ··· 487 497 regulator_disable(mcde->vana); 488 498 regulator_epod_off: 489 499 regulator_disable(mcde->epod); 490 - dev_unref: 491 - drm_dev_put(drm); 492 500 return ret; 493 501 494 502 } ··· 500 512 clk_disable_unprepare(mcde->mcde_clk); 501 513 regulator_disable(mcde->vana); 502 514 regulator_disable(mcde->epod); 503 - drm_dev_put(drm); 504 515 505 516 return 0; 506 517 }
+3 -11
drivers/gpu/drm/mediatek/mtk_dpi.c
··· 20 20 #include <drm/drm_bridge.h> 21 21 #include <drm/drm_crtc.h> 22 22 #include <drm/drm_of.h> 23 + #include <drm/drm_simple_kms_helper.h> 23 24 24 25 #include "mtk_dpi_regs.h" 25 26 #include "mtk_drm_ddp_comp.h" ··· 510 509 return 0; 511 510 } 512 511 513 - static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder) 514 - { 515 - drm_encoder_cleanup(encoder); 516 - } 517 - 518 - static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = { 519 - .destroy = mtk_dpi_encoder_destroy, 520 - }; 521 - 522 512 static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder, 523 513 const struct drm_display_mode *mode, 524 514 struct drm_display_mode *adjusted_mode) ··· 588 596 return ret; 589 597 } 590 598 591 - ret = drm_encoder_init(drm_dev, &dpi->encoder, &mtk_dpi_encoder_funcs, 592 - DRM_MODE_ENCODER_TMDS, NULL); 599 + ret = drm_simple_encoder_init(drm_dev, &dpi->encoder, 600 + DRM_MODE_ENCODER_TMDS); 593 601 if (ret) { 594 602 dev_err(dev, "Failed to initialize decoder: %d\n", ret); 595 603 goto err_unregister;
+5 -8
drivers/gpu/drm/mediatek/mtk_drm_drv.c
··· 162 162 } 163 163 private->mutex_dev = &pdev->dev; 164 164 165 - drm_mode_config_init(drm); 165 + ret = drmm_mode_config_init(drm); 166 + if (ret) 167 + return ret; 166 168 167 169 drm->mode_config.min_width = 64; 168 170 drm->mode_config.min_height = 64; ··· 181 179 182 180 ret = component_bind_all(drm->dev, drm); 183 181 if (ret) 184 - goto err_config_cleanup; 182 + return ret; 185 183 186 184 /* 187 185 * We currently support two fixed data streams, each optional, ··· 257 255 dma_dev->dma_parms = NULL; 258 256 err_component_unbind: 259 257 component_unbind_all(drm->dev, drm); 260 - err_config_cleanup: 261 - drm_mode_config_cleanup(drm); 262 258 263 259 return ret; 264 260 } ··· 272 272 private->dma_dev->dma_parms = NULL; 273 273 274 274 component_unbind_all(drm->dev, drm); 275 - drm_mode_config_cleanup(drm); 276 275 } 277 276 278 277 static const struct file_operations mtk_drm_fops = { ··· 347 348 if (ret < 0) 348 349 goto err_deinit; 349 350 350 - ret = drm_fbdev_generic_setup(drm, 32); 351 - if (ret) 352 - DRM_ERROR("Failed to initialize fbdev: %d\n", ret); 351 + drm_fbdev_generic_setup(drm, 32); 353 352 354 353 return 0; 355 354
+3 -11
drivers/gpu/drm/mediatek/mtk_dsi.c
··· 22 22 #include <drm/drm_panel.h> 23 23 #include <drm/drm_print.h> 24 24 #include <drm/drm_probe_helper.h> 25 + #include <drm/drm_simple_kms_helper.h> 25 26 26 27 #include "mtk_drm_ddp_comp.h" 27 28 ··· 788 787 dsi->enabled = false; 789 788 } 790 789 791 - static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder) 792 - { 793 - drm_encoder_cleanup(encoder); 794 - } 795 - 796 - static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = { 797 - .destroy = mtk_dsi_encoder_destroy, 798 - }; 799 - 800 790 static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder, 801 791 const struct drm_display_mode *mode, 802 792 struct drm_display_mode *adjusted_mode) ··· 880 888 { 881 889 int ret; 882 890 883 - ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs, 884 - DRM_MODE_ENCODER_DSI, NULL); 891 + ret = drm_simple_encoder_init(drm, &dsi->encoder, 892 + DRM_MODE_ENCODER_DSI); 885 893 if (ret) { 886 894 DRM_ERROR("Failed to encoder init to drm\n"); 887 895 return ret;
+3 -2
drivers/gpu/drm/meson/meson_drv.c
··· 284 284 /* Remove early framebuffers (ie. simplefb) */ 285 285 meson_remove_framebuffers(); 286 286 287 - drm_mode_config_init(drm); 287 + ret = drmm_mode_config_init(drm); 288 + if (ret) 289 + goto free_drm; 288 290 drm->mode_config.max_width = 3840; 289 291 drm->mode_config.max_height = 2160; 290 292 drm->mode_config.funcs = &meson_mode_config_funcs; ··· 381 379 drm_dev_unregister(drm); 382 380 drm_irq_uninstall(drm); 383 381 drm_kms_helper_poll_fini(drm); 384 - drm_mode_config_cleanup(drm); 385 382 drm_dev_put(drm); 386 383 } 387 384
+2
drivers/gpu/drm/mgag200/mgag200_drv.c
··· 77 77 if (ret) 78 78 goto err_mgag200_driver_unload; 79 79 80 + drm_fbdev_generic_setup(dev, 0); 81 + 80 82 return 0; 81 83 82 84 err_mgag200_driver_unload:
-4
drivers/gpu/drm/mgag200/mgag200_main.c
··· 181 181 dev_warn(&dev->pdev->dev, 182 182 "Could not initialize cursors. Not doing hardware cursors.\n"); 183 183 184 - r = drm_fbdev_generic_setup(mdev->dev, 0); 185 - if (r) 186 - goto err_modeset; 187 - 188 184 return 0; 189 185 190 186 err_modeset:
+5 -13
drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
··· 148 148 DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n"); 149 149 150 150 151 - int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) 151 + void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) 152 152 { 153 153 struct drm_device *dev; 154 - int ret; 155 154 156 155 if (!minor) 157 - return 0; 156 + return; 158 157 159 158 dev = minor->dev; 160 159 161 - ret = drm_debugfs_create_files(a5xx_debugfs_list, 162 - ARRAY_SIZE(a5xx_debugfs_list), 163 - minor->debugfs_root, minor); 164 - 165 - if (ret) { 166 - DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n"); 167 - return ret; 168 - } 160 + drm_debugfs_create_files(a5xx_debugfs_list, 161 + ARRAY_SIZE(a5xx_debugfs_list), 162 + minor->debugfs_root, minor); 169 163 170 164 debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev, 171 165 &reset_fops); 172 - 173 - return 0; 174 166 }
+1 -1
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
··· 41 41 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) 42 42 43 43 #ifdef CONFIG_DEBUG_FS 44 - int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor); 44 + void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor); 45 45 #endif 46 46 47 47 /*
+3 -11
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
··· 259 259 260 260 static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) 261 261 { 262 - struct drm_device *dev = minor->dev; 263 - int ret; 264 - 265 - ret = drm_debugfs_create_files(mdp5_debugfs_list, 266 - ARRAY_SIZE(mdp5_debugfs_list), 267 - minor->debugfs_root, minor); 268 - 269 - if (ret) { 270 - DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n"); 271 - return ret; 272 - } 262 + drm_debugfs_create_files(mdp5_debugfs_list, 263 + ARRAY_SIZE(mdp5_debugfs_list), 264 + minor->debugfs_root, minor); 273 265 274 266 return 0; 275 267 }
+6 -17
drivers/gpu/drm/msm/msm_debugfs.c
··· 214 214 return ret; 215 215 } 216 216 217 - int msm_debugfs_init(struct drm_minor *minor) 217 + void msm_debugfs_init(struct drm_minor *minor) 218 218 { 219 219 struct drm_device *dev = minor->dev; 220 220 struct msm_drm_private *priv = dev->dev_private; 221 - int ret; 222 221 223 - ret = drm_debugfs_create_files(msm_debugfs_list, 224 - ARRAY_SIZE(msm_debugfs_list), 225 - minor->debugfs_root, minor); 226 - 227 - if (ret) { 228 - DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n"); 229 - return ret; 230 - } 222 + drm_debugfs_create_files(msm_debugfs_list, 223 + ARRAY_SIZE(msm_debugfs_list), 224 + minor->debugfs_root, minor); 231 225 232 226 debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root, 233 227 dev, &msm_gpu_fops); 234 228 235 - if (priv->kms && priv->kms->funcs->debugfs_init) { 236 - ret = priv->kms->funcs->debugfs_init(priv->kms, minor); 237 - if (ret) 238 - return ret; 239 - } 240 - 241 - return ret; 229 + if (priv->kms && priv->kms->funcs->debugfs_init) 230 + priv->kms->funcs->debugfs_init(priv->kms, minor); 242 231 } 243 232 #endif 244 233
+1 -1
drivers/gpu/drm/msm/msm_debugfs.h
··· 8 8 #define __MSM_DEBUGFS_H__ 9 9 10 10 #ifdef CONFIG_DEBUG_FS 11 - int msm_debugfs_init(struct drm_minor *minor); 11 + void msm_debugfs_init(struct drm_minor *minor); 12 12 #endif 13 13 14 14 #endif /* __MSM_DEBUGFS_H__ */
+1 -1
drivers/gpu/drm/msm/msm_gpu.h
··· 57 57 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, 58 58 struct drm_printer *p); 59 59 /* for generation specific debugfs: */ 60 - int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor); 60 + void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor); 61 61 #endif 62 62 unsigned long (*gpu_busy)(struct msm_gpu *gpu); 63 63 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
+2 -4
drivers/gpu/drm/nouveau/nouveau_debugfs.c
··· 217 217 {"pstate", &nouveau_pstate_fops}, 218 218 }; 219 219 220 - int 220 + void 221 221 nouveau_drm_debugfs_init(struct drm_minor *minor) 222 222 { 223 223 struct nouveau_drm *drm = nouveau_drm(minor->dev); ··· 240 240 */ 241 241 dentry = debugfs_lookup("vbios.rom", minor->debugfs_root); 242 242 if (!dentry) 243 - return 0; 243 + return; 244 244 245 245 d_inode(dentry)->i_size = drm->vbios.length; 246 246 dput(dentry); 247 - 248 - return 0; 249 247 } 250 248 251 249 int
+3 -5
drivers/gpu/drm/nouveau/nouveau_debugfs.h
··· 18 18 return nouveau_drm(dev)->debugfs; 19 19 } 20 20 21 - extern int nouveau_drm_debugfs_init(struct drm_minor *); 21 + extern void nouveau_drm_debugfs_init(struct drm_minor *); 22 22 extern int nouveau_debugfs_init(struct nouveau_drm *); 23 23 extern void nouveau_debugfs_fini(struct nouveau_drm *); 24 24 #else 25 - static inline int 25 + static inline void 26 26 nouveau_drm_debugfs_init(struct drm_minor *minor) 27 - { 28 - return 0; 29 - } 27 + {} 30 28 31 29 static inline int 32 30 nouveau_debugfs_init(struct nouveau_drm *drm)
+2 -2
drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
··· 36 36 37 37 request_module("%s%s", I2C_MODULE_PREFIX, info->type); 38 38 39 - client = i2c_new_device(&bus->i2c, info); 40 - if (!client) 39 + client = i2c_new_client_device(&bus->i2c, info); 40 + if (IS_ERR(client)) 41 41 return false; 42 42 43 43 if (!client->dev.driver ||
+7 -22
drivers/gpu/drm/omapdrm/omap_debugfs.c
··· 80 80 {"tiler_map", tiler_map_show, 0}, 81 81 }; 82 82 83 - int omap_debugfs_init(struct drm_minor *minor) 83 + void omap_debugfs_init(struct drm_minor *minor) 84 84 { 85 - struct drm_device *dev = minor->dev; 86 - int ret; 87 - 88 - ret = drm_debugfs_create_files(omap_debugfs_list, 89 - ARRAY_SIZE(omap_debugfs_list), 90 - minor->debugfs_root, minor); 91 - 92 - if (ret) { 93 - dev_err(dev->dev, "could not install omap_debugfs_list\n"); 94 - return ret; 95 - } 85 + drm_debugfs_create_files(omap_debugfs_list, 86 + ARRAY_SIZE(omap_debugfs_list), 87 + minor->debugfs_root, minor); 96 88 97 89 if (dmm_is_available()) 98 - ret = drm_debugfs_create_files(omap_dmm_debugfs_list, 99 - ARRAY_SIZE(omap_dmm_debugfs_list), 100 - minor->debugfs_root, minor); 101 - 102 - if (ret) { 103 - dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n"); 104 - return ret; 105 - } 106 - 107 - return ret; 90 + drm_debugfs_create_files(omap_dmm_debugfs_list, 91 + ARRAY_SIZE(omap_dmm_debugfs_list), 92 + minor->debugfs_root, minor); 108 93 } 109 94 110 95 #endif
+1 -1
drivers/gpu/drm/omapdrm/omap_drv.h
··· 82 82 }; 83 83 84 84 85 - int omap_debugfs_init(struct drm_minor *minor); 85 + void omap_debugfs_init(struct drm_minor *minor); 86 86 87 87 #endif /* __OMAPDRM_DRV_H__ */
+11
drivers/gpu/drm/panel/Kconfig
··· 137 137 24 bit RGB per pixel. It provides a MIPI DSI interface to 138 138 the host and has a built-in LED backlight. 139 139 140 + config DRM_PANEL_LEADTEK_LTK050H3146W 141 + tristate "Leadtek LTK050H3146W panel" 142 + depends on OF 143 + depends on DRM_MIPI_DSI 144 + depends on BACKLIGHT_CLASS_DEVICE 145 + help 146 + Say Y here if you want to enable support for Leadtek LTK050H3146W 147 + TFT-LCD modules. The panel has a 720x1280 resolution and uses 148 + 24 bit RGB per pixel. It provides a MIPI DSI interface to 149 + the host and has a built-in LED backlight. 150 + 140 151 config DRM_PANEL_LEADTEK_LTK500HD1829 141 152 tristate "Leadtek LTK500HD1829 panel" 142 153 depends on OF
+1
drivers/gpu/drm/panel/Makefile
··· 12 12 obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o 13 13 obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o 14 14 obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o 15 + obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o 15 16 obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o 16 17 obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o 17 18 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+31
drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
··· 696 696 .init_cmds = auo_b101uan08_3_init_cmd, 697 697 }; 698 698 699 + static const struct drm_display_mode boe_tv105wum_nw0_default_mode = { 700 + .clock = 159260, 701 + .hdisplay = 1200, 702 + .hsync_start = 1200 + 80, 703 + .hsync_end = 1200 + 80 + 24, 704 + .htotal = 1200 + 80 + 24 + 60, 705 + .vdisplay = 1920, 706 + .vsync_start = 1920 + 10, 707 + .vsync_end = 1920 + 10 + 2, 708 + .vtotal = 1920 + 10 + 2 + 14, 709 + .vrefresh = 60, 710 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 711 + }; 712 + 713 + static const struct panel_desc boe_tv105wum_nw0_desc = { 714 + .modes = &boe_tv105wum_nw0_default_mode, 715 + .bpc = 8, 716 + .size = { 717 + .width_mm = 141, 718 + .height_mm = 226, 719 + }, 720 + .lanes = 4, 721 + .format = MIPI_DSI_FMT_RGB888, 722 + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | 723 + MIPI_DSI_MODE_LPM, 724 + .init_cmds = boe_init_cmd, 725 + }; 726 + 699 727 static int boe_panel_get_modes(struct drm_panel *panel, 700 728 struct drm_connector *connector) 701 729 { ··· 861 833 }, 862 834 { .compatible = "auo,b101uan08.3", 863 835 .data = &auo_b101uan08_3_desc 836 + }, 837 + { .compatible = "boe,tv105wum-nw0", 838 + .data = &boe_tv105wum_nw0_desc 864 839 }, 865 840 { /* sentinel */ } 866 841 };
+691
drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH 4 + */ 5 + 6 + #include <linux/delay.h> 7 + #include <linux/gpio/consumer.h> 8 + #include <linux/media-bus-format.h> 9 + #include <linux/module.h> 10 + #include <linux/of.h> 11 + #include <linux/of_device.h> 12 + #include <linux/regulator/consumer.h> 13 + 14 + #include <video/display_timing.h> 15 + #include <video/mipi_display.h> 16 + 17 + #include <drm/drm_mipi_dsi.h> 18 + #include <drm/drm_modes.h> 19 + #include <drm/drm_panel.h> 20 + #include <drm/drm_print.h> 21 + 22 + struct ltk050h3146w_cmd { 23 + char cmd; 24 + char data; 25 + }; 26 + 27 + struct ltk050h3146w; 28 + struct ltk050h3146w_desc { 29 + const struct drm_display_mode *mode; 30 + int (*init)(struct ltk050h3146w *ctx); 31 + }; 32 + 33 + struct ltk050h3146w { 34 + struct device *dev; 35 + struct drm_panel panel; 36 + struct gpio_desc *reset_gpio; 37 + struct regulator *vci; 38 + struct regulator *iovcc; 39 + const struct ltk050h3146w_desc *panel_desc; 40 + bool prepared; 41 + }; 42 + 43 + static const struct ltk050h3146w_cmd page1_cmds[] = { 44 + { 0x22, 0x0A }, /* BGR SS GS */ 45 + { 0x31, 0x00 }, /* column inversion */ 46 + { 0x53, 0xA2 }, /* VCOM1 */ 47 + { 0x55, 0xA2 }, /* VCOM2 */ 48 + { 0x50, 0x81 }, /* VREG1OUT=5V */ 49 + { 0x51, 0x85 }, /* VREG2OUT=-5V */ 50 + { 0x62, 0x0D }, /* EQT Time setting */ 51 + /* 52 + * The vendor init selected page 1 here _again_ 53 + * Is this supposed to be page 2? 54 + */ 55 + { 0xA0, 0x00 }, 56 + { 0xA1, 0x1A }, 57 + { 0xA2, 0x28 }, 58 + { 0xA3, 0x13 }, 59 + { 0xA4, 0x16 }, 60 + { 0xA5, 0x29 }, 61 + { 0xA6, 0x1D }, 62 + { 0xA7, 0x1E }, 63 + { 0xA8, 0x84 }, 64 + { 0xA9, 0x1C }, 65 + { 0xAA, 0x28 }, 66 + { 0xAB, 0x75 }, 67 + { 0xAC, 0x1A }, 68 + { 0xAD, 0x19 }, 69 + { 0xAE, 0x4D }, 70 + { 0xAF, 0x22 }, 71 + { 0xB0, 0x28 }, 72 + { 0xB1, 0x54 }, 73 + { 0xB2, 0x66 }, 74 + { 0xB3, 0x39 }, 75 + { 0xC0, 0x00 }, 76 + { 0xC1, 0x1A }, 77 + { 0xC2, 0x28 }, 78 + { 0xC3, 0x13 }, 79 + { 0xC4, 0x16 }, 80 + { 0xC5, 0x29 }, 81 + { 0xC6, 0x1D }, 82 + { 0xC7, 0x1E }, 83 + { 0xC8, 0x84 }, 84 + { 0xC9, 0x1C }, 85 + { 0xCA, 0x28 }, 86 + { 0xCB, 0x75 }, 87 + { 0xCC, 0x1A }, 88 + { 0xCD, 0x19 }, 89 + { 0xCE, 0x4D }, 90 + { 0xCF, 0x22 }, 91 + { 0xD0, 0x28 }, 92 + { 0xD1, 0x54 }, 93 + { 0xD2, 0x66 }, 94 + { 0xD3, 0x39 }, 95 + }; 96 + 97 + static const struct ltk050h3146w_cmd page3_cmds[] = { 98 + { 0x01, 0x00 }, 99 + { 0x02, 0x00 }, 100 + { 0x03, 0x73 }, 101 + { 0x04, 0x00 }, 102 + { 0x05, 0x00 }, 103 + { 0x06, 0x0a }, 104 + { 0x07, 0x00 }, 105 + { 0x08, 0x00 }, 106 + { 0x09, 0x01 }, 107 + { 0x0a, 0x00 }, 108 + { 0x0b, 0x00 }, 109 + { 0x0c, 0x01 }, 110 + { 0x0d, 0x00 }, 111 + { 0x0e, 0x00 }, 112 + { 0x0f, 0x1d }, 113 + { 0x10, 0x1d }, 114 + { 0x11, 0x00 }, 115 + { 0x12, 0x00 }, 116 + { 0x13, 0x00 }, 117 + { 0x14, 0x00 }, 118 + { 0x15, 0x00 }, 119 + { 0x16, 0x00 }, 120 + { 0x17, 0x00 }, 121 + { 0x18, 0x00 }, 122 + { 0x19, 0x00 }, 123 + { 0x1a, 0x00 }, 124 + { 0x1b, 0x00 }, 125 + { 0x1c, 0x00 }, 126 + { 0x1d, 0x00 }, 127 + { 0x1e, 0x40 }, 128 + { 0x1f, 0x80 }, 129 + { 0x20, 0x06 }, 130 + { 0x21, 0x02 }, 131 + { 0x22, 0x00 }, 132 + { 0x23, 0x00 }, 133 + { 0x24, 0x00 }, 134 + { 0x25, 0x00 }, 135 + { 0x26, 0x00 }, 136 + { 0x27, 0x00 }, 137 + { 0x28, 0x33 }, 138 + { 0x29, 0x03 }, 139 + { 0x2a, 0x00 }, 140 + { 0x2b, 0x00 }, 141 + { 0x2c, 0x00 }, 142 + { 0x2d, 0x00 }, 143 + { 0x2e, 0x00 }, 144 + { 0x2f, 0x00 }, 145 + { 0x30, 0x00 }, 146 + { 0x31, 0x00 }, 147 + { 0x32, 0x00 }, 148 + { 0x33, 0x00 }, 149 + { 0x34, 0x04 }, 150 + { 0x35, 0x00 }, 151 + { 0x36, 0x00 }, 152 + { 0x37, 0x00 }, 153 + { 0x38, 0x3C }, 154 + { 0x39, 0x35 }, 155 + { 0x3A, 0x01 }, 156 + { 0x3B, 0x40 }, 157 + { 0x3C, 0x00 }, 158 + { 0x3D, 0x01 }, 159 + { 0x3E, 0x00 }, 160 + { 0x3F, 0x00 }, 161 + { 0x40, 0x00 }, 162 + { 0x41, 0x88 }, 163 + { 0x42, 0x00 }, 164 + { 0x43, 0x00 }, 165 + { 0x44, 0x1F }, 166 + { 0x50, 0x01 }, 167 + { 0x51, 0x23 }, 168 + { 0x52, 0x45 }, 169 + { 0x53, 0x67 }, 170 + { 0x54, 0x89 }, 171 + { 0x55, 0xab }, 172 + { 0x56, 0x01 }, 173 + { 0x57, 0x23 }, 174 + { 0x58, 0x45 }, 175 + { 0x59, 0x67 }, 176 + { 0x5a, 0x89 }, 177 + { 0x5b, 0xab }, 178 + { 0x5c, 0xcd }, 179 + { 0x5d, 0xef }, 180 + { 0x5e, 0x11 }, 181 + { 0x5f, 0x01 }, 182 + { 0x60, 0x00 }, 183 + { 0x61, 0x15 }, 184 + { 0x62, 0x14 }, 185 + { 0x63, 0x0E }, 186 + { 0x64, 0x0F }, 187 + { 0x65, 0x0C }, 188 + { 0x66, 0x0D }, 189 + { 0x67, 0x06 }, 190 + { 0x68, 0x02 }, 191 + { 0x69, 0x07 }, 192 + { 0x6a, 0x02 }, 193 + { 0x6b, 0x02 }, 194 + { 0x6c, 0x02 }, 195 + { 0x6d, 0x02 }, 196 + { 0x6e, 0x02 }, 197 + { 0x6f, 0x02 }, 198 + { 0x70, 0x02 }, 199 + { 0x71, 0x02 }, 200 + { 0x72, 0x02 }, 201 + { 0x73, 0x02 }, 202 + { 0x74, 0x02 }, 203 + { 0x75, 0x01 }, 204 + { 0x76, 0x00 }, 205 + { 0x77, 0x14 }, 206 + { 0x78, 0x15 }, 207 + { 0x79, 0x0E }, 208 + { 0x7a, 0x0F }, 209 + { 0x7b, 0x0C }, 210 + { 0x7c, 0x0D }, 211 + { 0x7d, 0x06 }, 212 + { 0x7e, 0x02 }, 213 + { 0x7f, 0x07 }, 214 + { 0x80, 0x02 }, 215 + { 0x81, 0x02 }, 216 + { 0x82, 0x02 }, 217 + { 0x83, 0x02 }, 218 + { 0x84, 0x02 }, 219 + { 0x85, 0x02 }, 220 + { 0x86, 0x02 }, 221 + { 0x87, 0x02 }, 222 + { 0x88, 0x02 }, 223 + { 0x89, 0x02 }, 224 + { 0x8A, 0x02 }, 225 + }; 226 + 227 + static const struct ltk050h3146w_cmd page4_cmds[] = { 228 + { 0x70, 0x00 }, 229 + { 0x71, 0x00 }, 230 + { 0x82, 0x0F }, /* VGH_MOD clamp level=15v */ 231 + { 0x84, 0x0F }, /* VGH clamp level 15V */ 232 + { 0x85, 0x0D }, /* VGL clamp level (-10V) */ 233 + { 0x32, 0xAC }, 234 + { 0x8C, 0x80 }, 235 + { 0x3C, 0xF5 }, 236 + { 0xB5, 0x07 }, /* GAMMA OP */ 237 + { 0x31, 0x45 }, /* SOURCE OP */ 238 + { 0x3A, 0x24 }, /* PS_EN OFF */ 239 + { 0x88, 0x33 }, /* LVD */ 240 + }; 241 + 242 + static inline 243 + struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel) 244 + { 245 + return container_of(panel, struct ltk050h3146w, panel); 246 + } 247 + 248 + #define dsi_dcs_write_seq(dsi, cmd, seq...) do { \ 249 + static const u8 d[] = { seq }; \ 250 + int ret; \ 251 + ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \ 252 + if (ret < 0) \ 253 + return ret; \ 254 + } while (0) 255 + 256 + static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx) 257 + { 258 + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); 259 + int ret; 260 + 261 + /* 262 + * Init sequence was supplied by the panel vendor without much 263 + * documentation. 264 + */ 265 + dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8); 266 + dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06, 267 + 0x01); 268 + dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5); 269 + dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5); 270 + dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00); 271 + 272 + dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07); 273 + dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f, 274 + 0x28, 0x04, 0xcc, 0xcc, 0xcc); 275 + dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04); 276 + dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2); 277 + dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03); 278 + dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12); 279 + dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80, 280 + 0x80); 281 + dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f, 282 + 0x16, 0x00, 0x00); 283 + dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50, 284 + 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f, 285 + 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67, 286 + 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55, 287 + 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08); 288 + dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a, 289 + 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f, 290 + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); 291 + dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b, 292 + 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f, 293 + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); 294 + dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05, 295 + 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f, 296 + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); 297 + dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04, 298 + 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f, 299 + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); 300 + dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20, 301 + 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03, 302 + 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08); 303 + dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00, 304 + 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05, 305 + 0x21, 0x00, 0x60); 306 + dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00); 307 + dsi_dcs_write_seq(dsi, 0xde, 0x02); 308 + dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c); 309 + dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04); 310 + dsi_dcs_write_seq(dsi, 0xc1, 0x11); 311 + dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37); 312 + dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84); 313 + dsi_dcs_write_seq(dsi, 0xde, 0x00); 314 + 315 + ret = mipi_dsi_dcs_set_tear_on(dsi, 1); 316 + if (ret < 0) { 317 + DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n", 318 + ret); 319 + return ret; 320 + } 321 + 322 + msleep(60); 323 + 324 + return 0; 325 + } 326 + 327 + static const struct drm_display_mode ltk050h3146w_mode = { 328 + .hdisplay = 720, 329 + .hsync_start = 720 + 42, 330 + .hsync_end = 720 + 42 + 8, 331 + .htotal = 720 + 42 + 8 + 42, 332 + .vdisplay = 1280, 333 + .vsync_start = 1280 + 12, 334 + .vsync_end = 1280 + 12 + 4, 335 + .vtotal = 1280 + 12 + 4 + 18, 336 + .clock = 64018, 337 + .width_mm = 62, 338 + .height_mm = 110, 339 + }; 340 + 341 + static const struct ltk050h3146w_desc ltk050h3146w_data = { 342 + .mode = &ltk050h3146w_mode, 343 + .init = ltk050h3146w_init_sequence, 344 + }; 345 + 346 + static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page) 347 + { 348 + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); 349 + u8 d[3] = { 0x98, 0x81, page }; 350 + 351 + return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d)); 352 + } 353 + 354 + static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page, 355 + const struct ltk050h3146w_cmd *cmds, 356 + int num) 357 + { 358 + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); 359 + int i, ret; 360 + 361 + ret = ltk050h3146w_a2_select_page(ctx, page); 362 + if (ret < 0) { 363 + DRM_DEV_ERROR(ctx->dev, "failed to select page %d: %d\n", 364 + page, ret); 365 + return ret; 366 + } 367 + 368 + for (i = 0; i < num; i++) { 369 + ret = mipi_dsi_generic_write(dsi, &cmds[i], 370 + sizeof(struct ltk050h3146w_cmd)); 371 + if (ret < 0) { 372 + DRM_DEV_ERROR(ctx->dev, 373 + "failed to write page %d init cmds: %d\n", 374 + page, ret); 375 + return ret; 376 + } 377 + } 378 + 379 + return 0; 380 + } 381 + 382 + static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx) 383 + { 384 + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); 385 + int ret; 386 + 387 + /* 388 + * Init sequence was supplied by the panel vendor without much 389 + * documentation. 390 + */ 391 + ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds, 392 + ARRAY_SIZE(page3_cmds)); 393 + if (ret < 0) 394 + return ret; 395 + 396 + ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds, 397 + ARRAY_SIZE(page4_cmds)); 398 + if (ret < 0) 399 + return ret; 400 + 401 + ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds, 402 + ARRAY_SIZE(page1_cmds)); 403 + if (ret < 0) 404 + return ret; 405 + 406 + ret = ltk050h3146w_a2_select_page(ctx, 0); 407 + if (ret < 0) { 408 + DRM_DEV_ERROR(ctx->dev, "failed to select page 0: %d\n", ret); 409 + return ret; 410 + } 411 + 412 + /* vendor code called this without param, where there should be one */ 413 + ret = mipi_dsi_dcs_set_tear_on(dsi, 0); 414 + if (ret < 0) { 415 + DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n", 416 + ret); 417 + return ret; 418 + } 419 + 420 + msleep(60); 421 + 422 + return 0; 423 + } 424 + 425 + static const struct drm_display_mode ltk050h3146w_a2_mode = { 426 + .hdisplay = 720, 427 + .hsync_start = 720 + 42, 428 + .hsync_end = 720 + 42 + 10, 429 + .htotal = 720 + 42 + 10 + 60, 430 + .vdisplay = 1280, 431 + .vsync_start = 1280 + 18, 432 + .vsync_end = 1280 + 18 + 4, 433 + .vtotal = 1280 + 18 + 4 + 12, 434 + .clock = 65595, 435 + .width_mm = 62, 436 + .height_mm = 110, 437 + }; 438 + 439 + static const struct ltk050h3146w_desc ltk050h3146w_a2_data = { 440 + .mode = &ltk050h3146w_a2_mode, 441 + .init = ltk050h3146w_a2_init_sequence, 442 + }; 443 + 444 + static int ltk050h3146w_unprepare(struct drm_panel *panel) 445 + { 446 + struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); 447 + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); 448 + int ret; 449 + 450 + if (!ctx->prepared) 451 + return 0; 452 + 453 + ret = mipi_dsi_dcs_set_display_off(dsi); 454 + if (ret < 0) { 455 + DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n", 456 + ret); 457 + return ret; 458 + } 459 + 460 + mipi_dsi_dcs_enter_sleep_mode(dsi); 461 + if (ret < 0) { 462 + DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n", 463 + ret); 464 + return ret; 465 + } 466 + 467 + regulator_disable(ctx->iovcc); 468 + regulator_disable(ctx->vci); 469 + 470 + ctx->prepared = false; 471 + 472 + return 0; 473 + } 474 + 475 + static int ltk050h3146w_prepare(struct drm_panel *panel) 476 + { 477 + struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); 478 + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); 479 + int ret; 480 + 481 + if (ctx->prepared) 482 + return 0; 483 + 484 + DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n"); 485 + ret = regulator_enable(ctx->vci); 486 + if (ret < 0) { 487 + DRM_DEV_ERROR(ctx->dev, 488 + "Failed to enable vci supply: %d\n", ret); 489 + return ret; 490 + } 491 + ret = regulator_enable(ctx->iovcc); 492 + if (ret < 0) { 493 + DRM_DEV_ERROR(ctx->dev, 494 + "Failed to enable iovcc supply: %d\n", ret); 495 + goto disable_vci; 496 + } 497 + 498 + gpiod_set_value_cansleep(ctx->reset_gpio, 1); 499 + usleep_range(5000, 6000); 500 + gpiod_set_value_cansleep(ctx->reset_gpio, 0); 501 + msleep(20); 502 + 503 + ret = ctx->panel_desc->init(ctx); 504 + if (ret < 0) { 505 + DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n", 506 + ret); 507 + goto disable_iovcc; 508 + } 509 + 510 + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); 511 + if (ret < 0) { 512 + DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret); 513 + goto disable_iovcc; 514 + } 515 + 516 + /* T9: 120ms */ 517 + msleep(120); 518 + 519 + ret = mipi_dsi_dcs_set_display_on(dsi); 520 + if (ret < 0) { 521 + DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret); 522 + goto disable_iovcc; 523 + } 524 + 525 + msleep(50); 526 + 527 + ctx->prepared = true; 528 + 529 + return 0; 530 + 531 + disable_iovcc: 532 + regulator_disable(ctx->iovcc); 533 + disable_vci: 534 + regulator_disable(ctx->vci); 535 + return ret; 536 + } 537 + 538 + static int ltk050h3146w_get_modes(struct drm_panel *panel, 539 + struct drm_connector *connector) 540 + { 541 + struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); 542 + struct drm_display_mode *mode; 543 + 544 + mode = drm_mode_duplicate(connector->dev, ctx->panel_desc->mode); 545 + if (!mode) 546 + return -ENOMEM; 547 + 548 + drm_mode_set_name(mode); 549 + 550 + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; 551 + connector->display_info.width_mm = mode->width_mm; 552 + connector->display_info.height_mm = mode->height_mm; 553 + drm_mode_probed_add(connector, mode); 554 + 555 + return 1; 556 + } 557 + 558 + static const struct drm_panel_funcs ltk050h3146w_funcs = { 559 + .unprepare = ltk050h3146w_unprepare, 560 + .prepare = ltk050h3146w_prepare, 561 + .get_modes = ltk050h3146w_get_modes, 562 + }; 563 + 564 + static int ltk050h3146w_probe(struct mipi_dsi_device *dsi) 565 + { 566 + struct device *dev = &dsi->dev; 567 + struct ltk050h3146w *ctx; 568 + int ret; 569 + 570 + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 571 + if (!ctx) 572 + return -ENOMEM; 573 + 574 + ctx->panel_desc = of_device_get_match_data(dev); 575 + if (!ctx->panel_desc) 576 + return -EINVAL; 577 + 578 + ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); 579 + if (IS_ERR(ctx->reset_gpio)) { 580 + DRM_DEV_ERROR(dev, "cannot get reset gpio\n"); 581 + return PTR_ERR(ctx->reset_gpio); 582 + } 583 + 584 + ctx->vci = devm_regulator_get(dev, "vci"); 585 + if (IS_ERR(ctx->vci)) { 586 + ret = PTR_ERR(ctx->vci); 587 + if (ret != -EPROBE_DEFER) 588 + DRM_DEV_ERROR(dev, 589 + "Failed to request vci regulator: %d\n", 590 + ret); 591 + return ret; 592 + } 593 + 594 + ctx->iovcc = devm_regulator_get(dev, "iovcc"); 595 + if (IS_ERR(ctx->iovcc)) { 596 + ret = PTR_ERR(ctx->iovcc); 597 + if (ret != -EPROBE_DEFER) 598 + DRM_DEV_ERROR(dev, 599 + "Failed to request iovcc regulator: %d\n", 600 + ret); 601 + return ret; 602 + } 603 + 604 + mipi_dsi_set_drvdata(dsi, ctx); 605 + 606 + ctx->dev = dev; 607 + 608 + dsi->lanes = 4; 609 + dsi->format = MIPI_DSI_FMT_RGB888; 610 + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | 611 + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET; 612 + 613 + drm_panel_init(&ctx->panel, &dsi->dev, &ltk050h3146w_funcs, 614 + DRM_MODE_CONNECTOR_DSI); 615 + 616 + ret = drm_panel_of_backlight(&ctx->panel); 617 + if (ret) 618 + return ret; 619 + 620 + drm_panel_add(&ctx->panel); 621 + 622 + ret = mipi_dsi_attach(dsi); 623 + if (ret < 0) { 624 + DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret); 625 + drm_panel_remove(&ctx->panel); 626 + return ret; 627 + } 628 + 629 + return 0; 630 + } 631 + 632 + static void ltk050h3146w_shutdown(struct mipi_dsi_device *dsi) 633 + { 634 + struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi); 635 + int ret; 636 + 637 + ret = drm_panel_unprepare(&ctx->panel); 638 + if (ret < 0) 639 + DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n", 640 + ret); 641 + 642 + ret = drm_panel_disable(&ctx->panel); 643 + if (ret < 0) 644 + DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n", 645 + ret); 646 + } 647 + 648 + static int ltk050h3146w_remove(struct mipi_dsi_device *dsi) 649 + { 650 + struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi); 651 + int ret; 652 + 653 + ltk050h3146w_shutdown(dsi); 654 + 655 + ret = mipi_dsi_detach(dsi); 656 + if (ret < 0) 657 + DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n", 658 + ret); 659 + 660 + drm_panel_remove(&ctx->panel); 661 + 662 + return 0; 663 + } 664 + 665 + static const struct of_device_id ltk050h3146w_of_match[] = { 666 + { 667 + .compatible = "leadtek,ltk050h3146w", 668 + .data = &ltk050h3146w_data, 669 + }, 670 + { 671 + .compatible = "leadtek,ltk050h3146w-a2", 672 + .data = &ltk050h3146w_a2_data, 673 + }, 674 + { /* sentinel */ } 675 + }; 676 + MODULE_DEVICE_TABLE(of, ltk050h3146w_of_match); 677 + 678 + static struct mipi_dsi_driver ltk050h3146w_driver = { 679 + .driver = { 680 + .name = "panel-leadtek-ltk050h3146w", 681 + .of_match_table = ltk050h3146w_of_match, 682 + }, 683 + .probe = ltk050h3146w_probe, 684 + .remove = ltk050h3146w_remove, 685 + .shutdown = ltk050h3146w_shutdown, 686 + }; 687 + module_mipi_dsi_driver(ltk050h3146w_driver); 688 + 689 + MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>"); 690 + MODULE_DESCRIPTION("DRM driver for Leadtek LTK050H3146W MIPI DSI panel"); 691 + MODULE_LICENSE("GPL v2");
+1 -1
drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
··· 377 377 .vsync_end = 1280 + 30 + 4, 378 378 .vtotal = 1280 + 30 + 4 + 12, 379 379 .vrefresh = 60, 380 - .clock = 41600, 380 + .clock = 69217, 381 381 .width_mm = 62, 382 382 .height_mm = 110, 383 383 };
+36 -10
drivers/gpu/drm/panel/panel-novatek-nt39016.c
··· 49 49 #define NT39016_SYSTEM_STANDBY BIT(1) 50 50 51 51 struct nt39016_panel_info { 52 - struct drm_display_mode display_mode; 52 + const struct drm_display_mode *display_modes; 53 + unsigned int num_modes; 53 54 u16 width_mm, height_mm; 54 55 u32 bus_format, bus_flags; 55 56 }; ··· 213 212 struct nt39016 *panel = to_nt39016(drm_panel); 214 213 const struct nt39016_panel_info *panel_info = panel->panel_info; 215 214 struct drm_display_mode *mode; 215 + unsigned int i; 216 216 217 - mode = drm_mode_duplicate(connector->dev, &panel_info->display_mode); 218 - if (!mode) 219 - return -ENOMEM; 217 + for (i = 0; i < panel_info->num_modes; i++) { 218 + mode = drm_mode_duplicate(connector->dev, 219 + &panel_info->display_modes[i]); 220 + if (!mode) 221 + return -ENOMEM; 220 222 221 - drm_mode_set_name(mode); 223 + drm_mode_set_name(mode); 222 224 223 - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; 224 - drm_mode_probed_add(connector, mode); 225 + mode->type = DRM_MODE_TYPE_DRIVER; 226 + if (panel_info->num_modes == 1) 227 + mode->type |= DRM_MODE_TYPE_PREFERRED; 228 + 229 + drm_mode_probed_add(connector, mode); 230 + } 225 231 226 232 connector->display_info.bpc = 8; 227 233 connector->display_info.width_mm = panel_info->width_mm; ··· 238 230 &panel_info->bus_format, 1); 239 231 connector->display_info.bus_flags = panel_info->bus_flags; 240 232 241 - return 1; 233 + return panel_info->num_modes; 242 234 } 243 235 244 236 static const struct drm_panel_funcs nt39016_funcs = { ··· 324 316 return 0; 325 317 } 326 318 327 - static const struct nt39016_panel_info kd035g6_info = { 328 - .display_mode = { 319 + static const struct drm_display_mode kd035g6_display_modes[] = { 320 + { /* 60 Hz */ 329 321 .clock = 6000, 330 322 .hdisplay = 320, 331 323 .hsync_start = 320 + 10, ··· 338 330 .vrefresh = 60, 339 331 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 340 332 }, 333 + { /* 50 Hz */ 334 + .clock = 5400, 335 + .hdisplay = 320, 336 + .hsync_start = 320 + 42, 337 + .hsync_end = 320 + 42 + 50, 338 + .htotal = 320 + 42 + 50 + 20, 339 + .vdisplay = 240, 340 + .vsync_start = 240 + 5, 341 + .vsync_end = 240 + 5 + 1, 342 + .vtotal = 240 + 5 + 1 + 4, 343 + .vrefresh = 50, 344 + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 345 + }, 346 + }; 347 + 348 + static const struct nt39016_panel_info kd035g6_info = { 349 + .display_modes = kd035g6_display_modes, 350 + .num_modes = ARRAY_SIZE(kd035g6_display_modes), 341 351 .width_mm = 71, 342 352 .height_mm = 53, 343 353 .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+29
drivers/gpu/drm/panel/panel-simple.c
··· 3065 3065 .bus_format = MEDIA_BUS_FMT_RGB666_1X18, 3066 3066 }; 3067 3067 3068 + static const struct drm_display_mode starry_kr070pe2t_mode = { 3069 + .clock = 33000, 3070 + .hdisplay = 800, 3071 + .hsync_start = 800 + 209, 3072 + .hsync_end = 800 + 209 + 1, 3073 + .htotal = 800 + 209 + 1 + 45, 3074 + .vdisplay = 480, 3075 + .vsync_start = 480 + 22, 3076 + .vsync_end = 480 + 22 + 1, 3077 + .vtotal = 480 + 22 + 1 + 22, 3078 + .vrefresh = 60, 3079 + }; 3080 + 3081 + static const struct panel_desc starry_kr070pe2t = { 3082 + .modes = &starry_kr070pe2t_mode, 3083 + .num_modes = 1, 3084 + .bpc = 8, 3085 + .size = { 3086 + .width = 152, 3087 + .height = 86, 3088 + }, 3089 + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, 3090 + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, 3091 + .connector_type = DRM_MODE_CONNECTOR_LVDS, 3092 + }; 3093 + 3068 3094 static const struct drm_display_mode starry_kr122ea0sra_mode = { 3069 3095 .clock = 147000, 3070 3096 .hdisplay = 1920, ··· 3741 3715 }, { 3742 3716 .compatible = "shelly,sca07010-bfn-lnn", 3743 3717 .data = &shelly_sca07010_bfn_lnn, 3718 + }, { 3719 + .compatible = "starry,kr070pe2t", 3720 + .data = &starry_kr070pe2t, 3744 3721 }, { 3745 3722 .compatible = "starry,kr122ea0sra", 3746 3723 .data = &starry_kr122ea0sra,
+4 -4
drivers/gpu/drm/pl111/pl111_debugfs.c
··· 51 51 {"regs", pl111_debugfs_regs, 0}, 52 52 }; 53 53 54 - int 54 + void 55 55 pl111_debugfs_init(struct drm_minor *minor) 56 56 { 57 - return drm_debugfs_create_files(pl111_debugfs_list, 58 - ARRAY_SIZE(pl111_debugfs_list), 59 - minor->debugfs_root, minor); 57 + drm_debugfs_create_files(pl111_debugfs_list, 58 + ARRAY_SIZE(pl111_debugfs_list), 59 + minor->debugfs_root, minor); 60 60 }
+1 -1
drivers/gpu/drm/pl111/pl111_drm.h
··· 84 84 85 85 int pl111_display_init(struct drm_device *dev); 86 86 irqreturn_t pl111_irq(int irq, void *data); 87 - int pl111_debugfs_init(struct drm_minor *minor); 87 + void pl111_debugfs_init(struct drm_minor *minor); 88 88 89 89 #endif /* _PL111_DRM_H_ */
+6 -6
drivers/gpu/drm/pl111/pl111_drv.c
··· 90 90 struct drm_panel *panel = NULL; 91 91 struct drm_bridge *bridge = NULL; 92 92 bool defer = false; 93 - int ret = 0; 93 + int ret; 94 94 int i; 95 95 96 - drm_mode_config_init(dev); 96 + ret = drmm_mode_config_init(dev); 97 + if (ret) 98 + return ret; 99 + 97 100 mode_config = &dev->mode_config; 98 101 mode_config->funcs = &mode_config_funcs; 99 102 mode_config->min_width = 1; ··· 157 154 DRM_MODE_CONNECTOR_Unknown); 158 155 if (IS_ERR(bridge)) { 159 156 ret = PTR_ERR(bridge); 160 - goto out_config; 157 + goto finish; 161 158 } 162 159 } else if (bridge) { 163 160 dev_info(dev->dev, "Using non-panel bridge\n"); ··· 200 197 out_bridge: 201 198 if (panel) 202 199 drm_panel_bridge_remove(bridge); 203 - out_config: 204 - drm_mode_config_cleanup(dev); 205 200 finish: 206 201 return ret; 207 202 } ··· 344 343 drm_dev_unregister(drm); 345 344 if (priv->panel) 346 345 drm_panel_bridge_remove(priv->bridge); 347 - drm_mode_config_cleanup(drm); 348 346 drm_dev_put(drm); 349 347 of_reserved_mem_device_release(dev); 350 348
+7 -14
drivers/gpu/drm/qxl/qxl_debugfs.c
··· 79 79 #define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list) 80 80 #endif 81 81 82 - int 82 + void 83 83 qxl_debugfs_init(struct drm_minor *minor) 84 84 { 85 85 #if defined(CONFIG_DEBUG_FS) 86 - int r; 87 86 struct qxl_device *dev = 88 87 (struct qxl_device *) minor->dev->dev_private; 89 88 90 89 drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES, 91 90 minor->debugfs_root, minor); 92 91 93 - r = qxl_ttm_debugfs_init(dev); 94 - if (r) { 95 - DRM_ERROR("Failed to init TTM debugfs\n"); 96 - return r; 97 - } 92 + qxl_ttm_debugfs_init(dev); 98 93 #endif 99 - return 0; 100 94 } 101 95 102 - int qxl_debugfs_add_files(struct qxl_device *qdev, 103 - struct drm_info_list *files, 104 - unsigned int nfiles) 96 + void qxl_debugfs_add_files(struct qxl_device *qdev, 97 + struct drm_info_list *files, 98 + unsigned int nfiles) 105 99 { 106 100 unsigned int i; 107 101 108 102 for (i = 0; i < qdev->debugfs_count; i++) { 109 103 if (qdev->debugfs[i].files == files) { 110 104 /* Already registered */ 111 - return 0; 105 + return; 112 106 } 113 107 } 114 108 ··· 110 116 if (i > QXL_DEBUGFS_MAX_COMPONENTS) { 111 117 DRM_ERROR("Reached maximum number of debugfs components.\n"); 112 118 DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n"); 113 - return -EINVAL; 119 + return; 114 120 } 115 121 qdev->debugfs[qdev->debugfs_count].files = files; 116 122 qdev->debugfs[qdev->debugfs_count].num_files = nfiles; ··· 120 126 qdev->ddev.primary->debugfs_root, 121 127 qdev->ddev.primary); 122 128 #endif 123 - return 0; 124 129 }
-2
drivers/gpu/drm/qxl/qxl_drv.c
··· 144 144 */ 145 145 qxl_modeset_fini(qdev); 146 146 qxl_device_fini(qdev); 147 - dev->dev_private = NULL; 148 - kfree(qdev); 149 147 } 150 148 151 149 static void
+5 -8
drivers/gpu/drm/qxl/qxl_drv.h
··· 190 190 unsigned int num_files; 191 191 }; 192 192 193 - int qxl_debugfs_add_files(struct qxl_device *rdev, 194 - struct drm_info_list *files, 195 - unsigned int nfiles); 196 193 int qxl_debugfs_fence_init(struct qxl_device *rdev); 197 194 198 195 struct qxl_device; ··· 439 442 440 443 /* debugfs */ 441 444 442 - int qxl_debugfs_init(struct drm_minor *minor); 443 - int qxl_ttm_debugfs_init(struct qxl_device *qdev); 445 + void qxl_debugfs_init(struct drm_minor *minor); 446 + void qxl_ttm_debugfs_init(struct qxl_device *qdev); 444 447 445 448 /* qxl_prime.c */ 446 449 int qxl_gem_prime_pin(struct drm_gem_object *obj); ··· 458 461 int qxl_irq_init(struct qxl_device *qdev); 459 462 irqreturn_t qxl_irq_handler(int irq, void *arg); 460 463 461 - int qxl_debugfs_add_files(struct qxl_device *qdev, 462 - struct drm_info_list *files, 463 - unsigned int nfiles); 464 + void qxl_debugfs_add_files(struct qxl_device *qdev, 465 + struct drm_info_list *files, 466 + unsigned int nfiles); 464 467 465 468 int qxl_surface_id_alloc(struct qxl_device *qdev, 466 469 struct qxl_bo *surf);
+3 -1
drivers/gpu/drm/qxl/qxl_kms.c
··· 27 27 #include <linux/pci.h> 28 28 29 29 #include <drm/drm_drv.h> 30 + #include <drm/drm_managed.h> 30 31 #include <drm/drm_probe_helper.h> 31 32 32 33 #include "qxl_drv.h" ··· 122 121 qdev->ddev.pdev = pdev; 123 122 pci_set_drvdata(pdev, &qdev->ddev); 124 123 qdev->ddev.dev_private = qdev; 124 + drmm_add_final_kfree(&qdev->ddev, qdev); 125 125 126 126 mutex_init(&qdev->gem.mutex); 127 127 mutex_init(&qdev->update_area_mutex); ··· 220 218 &(qdev->ram_header->cursor_ring_hdr), 221 219 sizeof(struct qxl_command), 222 220 QXL_CURSOR_RING_SIZE, 223 - qdev->io_base + QXL_IO_NOTIFY_CMD, 221 + qdev->io_base + QXL_IO_NOTIFY_CURSOR, 224 222 false, 225 223 &qdev->cursor_event); 226 224
+2 -4
drivers/gpu/drm/qxl/qxl_ttm.c
··· 322 322 } 323 323 #endif 324 324 325 - int qxl_ttm_debugfs_init(struct qxl_device *qdev) 325 + void qxl_ttm_debugfs_init(struct qxl_device *qdev) 326 326 { 327 327 #if defined(CONFIG_DEBUG_FS) 328 328 static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES]; ··· 343 343 qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv; 344 344 345 345 } 346 - return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i); 347 - #else 348 - return 0; 346 + qxl_debugfs_add_files(qdev, qxl_mem_types_list, i); 349 347 #endif 350 348 }
+2 -1
drivers/gpu/drm/r128/ati_pcigart.c
··· 32 32 */ 33 33 34 34 #include <linux/export.h> 35 + #include <linux/pci.h> 35 36 36 37 #include <drm/drm_device.h> 37 - #include <drm/drm_pci.h> 38 + #include <drm/drm_legacy.h> 38 39 #include <drm/drm_print.h> 39 40 40 41 #include "ati_pcigart.h"
+2 -2
drivers/gpu/drm/radeon/radeon_atombios.c
··· 2111 2111 ucOverdriveThermalController]; 2112 2112 info.addr = power_info->info.ucOverdriveControllerAddress >> 1; 2113 2113 strlcpy(info.type, name, sizeof(info.type)); 2114 - i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); 2114 + i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info); 2115 2115 } 2116 2116 } 2117 2117 num_modes = power_info->info.ucNumOfPowerModeEntries; ··· 2351 2351 const char *name = pp_lib_thermal_controller_names[controller->ucType]; 2352 2352 info.addr = controller->ucI2cAddress >> 1; 2353 2353 strlcpy(info.type, name, sizeof(info.type)); 2354 - i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); 2354 + i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info); 2355 2355 } 2356 2356 } else { 2357 2357 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+2 -2
drivers/gpu/drm/radeon/radeon_combios.c
··· 2704 2704 const char *name = thermal_controller_names[thermal_controller]; 2705 2705 info.addr = i2c_addr >> 1; 2706 2706 strlcpy(info.type, name, sizeof(info.type)); 2707 - i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); 2707 + i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info); 2708 2708 } 2709 2709 } 2710 2710 } else { ··· 2721 2721 const char *name = "f75375"; 2722 2722 info.addr = 0x28; 2723 2723 strlcpy(info.type, name, sizeof(info.type)); 2724 - i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); 2724 + i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info); 2725 2725 DRM_INFO("Possible %s thermal controller at 0x%02x\n", 2726 2726 name, info.addr); 2727 2727 }
+1 -1
drivers/gpu/drm/radeon/radeon_drv.c
··· 36 36 #include <linux/pm_runtime.h> 37 37 #include <linux/vga_switcheroo.h> 38 38 #include <linux/mmu_notifier.h> 39 + #include <linux/pci.h> 39 40 40 41 #include <drm/drm_agpsupport.h> 41 42 #include <drm/drm_crtc_helper.h> ··· 45 44 #include <drm/drm_file.h> 46 45 #include <drm/drm_gem.h> 47 46 #include <drm/drm_ioctl.h> 48 - #include <drm/drm_pci.h> 49 47 #include <drm/drm_pciids.h> 50 48 #include <drm/drm_probe_helper.h> 51 49 #include <drm/drm_vblank.h>
-1
drivers/gpu/drm/rcar-du/rcar_du_drv.c
··· 530 530 drm_dev_unregister(ddev); 531 531 532 532 drm_kms_helper_poll_fini(ddev); 533 - drm_mode_config_cleanup(ddev); 534 533 535 534 drm_dev_put(ddev); 536 535
+3 -11
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
··· 13 13 #include <drm/drm_crtc.h> 14 14 #include <drm/drm_modeset_helper_vtables.h> 15 15 #include <drm/drm_panel.h> 16 + #include <drm/drm_simple_kms_helper.h> 16 17 17 18 #include "rcar_du_drv.h" 18 19 #include "rcar_du_encoder.h" ··· 23 22 /* ----------------------------------------------------------------------------- 24 23 * Encoder 25 24 */ 26 - 27 - static const struct drm_encoder_helper_funcs encoder_helper_funcs = { 28 - }; 29 - 30 - static const struct drm_encoder_funcs encoder_funcs = { 31 - .destroy = drm_encoder_cleanup, 32 - }; 33 25 34 26 static unsigned int rcar_du_encoder_count_ports(struct device_node *node) 35 27 { ··· 104 110 } 105 111 } 106 112 107 - ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, 108 - DRM_MODE_ENCODER_NONE, NULL); 113 + ret = drm_simple_encoder_init(rcdu->ddev, encoder, 114 + DRM_MODE_ENCODER_NONE); 109 115 if (ret < 0) 110 116 goto done; 111 - 112 - drm_encoder_helper_add(encoder, &encoder_helper_funcs); 113 117 114 118 /* 115 119 * Attach the bridge to the encoder. The bridge will create the
+3 -1
drivers/gpu/drm/rcar-du/rcar_du_kms.c
··· 712 712 unsigned int i; 713 713 int ret; 714 714 715 - drm_mode_config_init(dev); 715 + ret = drmm_mode_config_init(dev); 716 + if (ret) 717 + return ret; 716 718 717 719 dev->mode_config.min_width = 0; 718 720 dev->mode_config.min_height = 0;
+3 -6
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
··· 26 26 #include <drm/drm_of.h> 27 27 #include <drm/drm_panel.h> 28 28 #include <drm/drm_probe_helper.h> 29 + #include <drm/drm_simple_kms_helper.h> 29 30 30 31 #include "rockchip_drm_drv.h" 31 32 #include "rockchip_drm_vop.h" ··· 259 258 .atomic_check = rockchip_dp_drm_encoder_atomic_check, 260 259 }; 261 260 262 - static struct drm_encoder_funcs rockchip_dp_encoder_funcs = { 263 - .destroy = drm_encoder_cleanup, 264 - }; 265 - 266 261 static int rockchip_dp_of_probe(struct rockchip_dp_device *dp) 267 262 { 268 263 struct device *dev = dp->dev; ··· 306 309 dev->of_node); 307 310 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 308 311 309 - ret = drm_encoder_init(drm_dev, encoder, &rockchip_dp_encoder_funcs, 310 - DRM_MODE_ENCODER_TMDS, NULL); 312 + ret = drm_simple_encoder_init(drm_dev, encoder, 313 + DRM_MODE_ENCODER_TMDS); 311 314 if (ret) { 312 315 DRM_ERROR("failed to initialize encoder with drm\n"); 313 316 return ret;
+3 -6
drivers/gpu/drm/rockchip/cdn-dp-core.c
··· 20 20 #include <drm/drm_edid.h> 21 21 #include <drm/drm_of.h> 22 22 #include <drm/drm_probe_helper.h> 23 + #include <drm/drm_simple_kms_helper.h> 23 24 24 25 #include "cdn-dp-core.h" 25 26 #include "cdn-dp-reg.h" ··· 690 689 .atomic_check = cdn_dp_encoder_atomic_check, 691 690 }; 692 691 693 - static const struct drm_encoder_funcs cdn_dp_encoder_funcs = { 694 - .destroy = drm_encoder_cleanup, 695 - }; 696 - 697 692 static int cdn_dp_parse_dt(struct cdn_dp_device *dp) 698 693 { 699 694 struct device *dev = dp->dev; ··· 1027 1030 dev->of_node); 1028 1031 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 1029 1032 1030 - ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs, 1031 - DRM_MODE_ENCODER_TMDS, NULL); 1033 + ret = drm_simple_encoder_init(drm_dev, encoder, 1034 + DRM_MODE_ENCODER_TMDS); 1032 1035 if (ret) { 1033 1036 DRM_ERROR("failed to initialize encoder with drm\n"); 1034 1037 return ret;
+2 -6
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
··· 21 21 #include <drm/bridge/dw_mipi_dsi.h> 22 22 #include <drm/drm_mipi_dsi.h> 23 23 #include <drm/drm_of.h> 24 + #include <drm/drm_simple_kms_helper.h> 24 25 25 26 #include "rockchip_drm_drv.h" 26 27 #include "rockchip_drm_vop.h" ··· 790 789 .disable = dw_mipi_dsi_encoder_disable, 791 790 }; 792 791 793 - static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = { 794 - .destroy = drm_encoder_cleanup, 795 - }; 796 - 797 792 static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi, 798 793 struct drm_device *drm_dev) 799 794 { ··· 799 802 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, 800 803 dsi->dev->of_node); 801 804 802 - ret = drm_encoder_init(drm_dev, encoder, &dw_mipi_dsi_encoder_funcs, 803 - DRM_MODE_ENCODER_DSI, NULL); 805 + ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI); 804 806 if (ret) { 805 807 DRM_ERROR("Failed to initialize encoder with drm\n"); 806 808 return ret;
+2 -6
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
··· 14 14 #include <drm/drm_edid.h> 15 15 #include <drm/drm_of.h> 16 16 #include <drm/drm_probe_helper.h> 17 + #include <drm/drm_simple_kms_helper.h> 17 18 18 19 #include "rockchip_drm_drv.h" 19 20 #include "rockchip_drm_vop.h" ··· 237 236 238 237 return (valid) ? MODE_OK : MODE_BAD; 239 238 } 240 - 241 - static const struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = { 242 - .destroy = drm_encoder_cleanup, 243 - }; 244 239 245 240 static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder) 246 241 { ··· 543 546 } 544 547 545 548 drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs); 546 - drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs, 547 - DRM_MODE_ENCODER_TMDS, NULL); 549 + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); 548 550 549 551 platform_set_drvdata(pdev, hdmi); 550 552
+2 -6
drivers/gpu/drm/rockchip/inno_hdmi.c
··· 19 19 #include <drm/drm_edid.h> 20 20 #include <drm/drm_of.h> 21 21 #include <drm/drm_probe_helper.h> 22 + #include <drm/drm_simple_kms_helper.h> 22 23 23 24 #include "rockchip_drm_drv.h" 24 25 #include "rockchip_drm_vop.h" ··· 533 532 .atomic_check = inno_hdmi_encoder_atomic_check, 534 533 }; 535 534 536 - static struct drm_encoder_funcs inno_hdmi_encoder_funcs = { 537 - .destroy = drm_encoder_cleanup, 538 - }; 539 - 540 535 static enum drm_connector_status 541 536 inno_hdmi_connector_detect(struct drm_connector *connector, bool force) 542 537 { ··· 614 617 return -EPROBE_DEFER; 615 618 616 619 drm_encoder_helper_add(encoder, &inno_hdmi_encoder_helper_funcs); 617 - drm_encoder_init(drm, encoder, &inno_hdmi_encoder_funcs, 618 - DRM_MODE_ENCODER_TMDS, NULL); 620 + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); 619 621 620 622 hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD; 621 623
+2 -6
drivers/gpu/drm/rockchip/rk3066_hdmi.c
··· 6 6 7 7 #include <drm/drm_of.h> 8 8 #include <drm/drm_probe_helper.h> 9 + #include <drm/drm_simple_kms_helper.h> 9 10 10 11 #include <linux/clk.h> 11 12 #include <linux/mfd/syscon.h> ··· 452 451 .atomic_check = rk3066_hdmi_encoder_atomic_check, 453 452 }; 454 453 455 - static const struct drm_encoder_funcs rk3066_hdmi_encoder_funcs = { 456 - .destroy = drm_encoder_cleanup, 457 - }; 458 - 459 454 static enum drm_connector_status 460 455 rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force) 461 456 { ··· 554 557 return -EPROBE_DEFER; 555 558 556 559 drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs); 557 - drm_encoder_init(drm, encoder, &rk3066_hdmi_encoder_funcs, 558 - DRM_MODE_ENCODER_TMDS, NULL); 560 + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); 559 561 560 562 hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD; 561 563
+5 -9
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
··· 135 135 if (ret) 136 136 goto err_free; 137 137 138 - drm_mode_config_init(drm_dev); 138 + ret = drmm_mode_config_init(drm_dev); 139 + if (ret) 140 + goto err_iommu_cleanup; 139 141 140 142 rockchip_drm_mode_config_init(drm_dev); 141 143 142 144 /* Try to bind all sub drivers. */ 143 145 ret = component_bind_all(dev, drm_dev); 144 146 if (ret) 145 - goto err_mode_config_cleanup; 147 + goto err_iommu_cleanup; 146 148 147 149 ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc); 148 150 if (ret) ··· 175 173 rockchip_drm_fbdev_fini(drm_dev); 176 174 err_unbind_all: 177 175 component_unbind_all(dev, drm_dev); 178 - err_mode_config_cleanup: 179 - drm_mode_config_cleanup(drm_dev); 176 + err_iommu_cleanup: 180 177 rockchip_iommu_cleanup(drm_dev); 181 178 err_free: 182 - drm_dev->dev_private = NULL; 183 - dev_set_drvdata(dev, NULL); 184 179 drm_dev_put(drm_dev); 185 180 return ret; 186 181 } ··· 193 194 194 195 drm_atomic_helper_shutdown(drm_dev); 195 196 component_unbind_all(dev, drm_dev); 196 - drm_mode_config_cleanup(drm_dev); 197 197 rockchip_iommu_cleanup(drm_dev); 198 198 199 - drm_dev->dev_private = NULL; 200 - dev_set_drvdata(dev, NULL); 201 199 drm_dev_put(drm_dev); 202 200 } 203 201
+1
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
··· 30 30 int output_mode; 31 31 int output_bpc; 32 32 int output_flags; 33 + bool enable_afbc; 33 34 }; 34 35 #define to_rockchip_crtc_state(s) \ 35 36 container_of(s, struct rockchip_crtc_state, base)
+42 -1
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
··· 57 57 .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, 58 58 }; 59 59 60 + static struct drm_framebuffer * 61 + rockchip_fb_create(struct drm_device *dev, struct drm_file *file, 62 + const struct drm_mode_fb_cmd2 *mode_cmd) 63 + { 64 + struct drm_afbc_framebuffer *afbc_fb; 65 + const struct drm_format_info *info; 66 + int ret; 67 + 68 + info = drm_get_format_info(dev, mode_cmd); 69 + if (!info) 70 + return ERR_PTR(-ENOMEM); 71 + 72 + afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL); 73 + if (!afbc_fb) 74 + return ERR_PTR(-ENOMEM); 75 + 76 + ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd, 77 + &rockchip_drm_fb_funcs); 78 + if (ret) { 79 + kfree(afbc_fb); 80 + return ERR_PTR(ret); 81 + } 82 + 83 + if (drm_is_afbc(mode_cmd->modifier[0])) { 84 + int ret, i; 85 + 86 + ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb); 87 + if (ret) { 88 + struct drm_gem_object **obj = afbc_fb->base.obj; 89 + 90 + for (i = 0; i < info->num_planes; ++i) 91 + drm_gem_object_put_unlocked(obj[i]); 92 + 93 + kfree(afbc_fb); 94 + return ERR_PTR(ret); 95 + } 96 + } 97 + 98 + return &afbc_fb->base; 99 + } 100 + 60 101 static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { 61 - .fb_create = drm_gem_fb_create_with_dirty, 102 + .fb_create = rockchip_fb_create, 62 103 .output_poll_changed = drm_fb_helper_output_poll_changed, 63 104 .atomic_check = drm_atomic_helper_check, 64 105 .atomic_commit = drm_atomic_helper_commit,
+135 -2
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
··· 91 91 #define VOP_WIN_TO_INDEX(vop_win) \ 92 92 ((vop_win) - (vop_win)->vop->win) 93 93 94 + #define VOP_AFBC_SET(vop, name, v) \ 95 + do { \ 96 + if ((vop)->data->afbc) \ 97 + vop_reg_set((vop), &(vop)->data->afbc->name, \ 98 + 0, ~0, v, #name); \ 99 + } while (0) 100 + 94 101 #define to_vop(x) container_of(x, struct vop, crtc) 95 102 #define to_vop_win(x) container_of(x, struct vop_win, base) 103 + 104 + #define AFBC_FMT_RGB565 0x0 105 + #define AFBC_FMT_U8U8U8U8 0x5 106 + #define AFBC_FMT_U8U8U8 0x4 107 + 108 + #define AFBC_TILE_16x16 BIT(4) 96 109 97 110 /* 98 111 * The coefficients of the following matrix are all fixed points. ··· 285 272 DRM_ERROR("unsupported format[%08x]\n", format); 286 273 return -EINVAL; 287 274 } 275 + } 276 + 277 + static int vop_convert_afbc_format(uint32_t format) 278 + { 279 + switch (format) { 280 + case DRM_FORMAT_XRGB8888: 281 + case DRM_FORMAT_ARGB8888: 282 + case DRM_FORMAT_XBGR8888: 283 + case DRM_FORMAT_ABGR8888: 284 + return AFBC_FMT_U8U8U8U8; 285 + case DRM_FORMAT_RGB888: 286 + case DRM_FORMAT_BGR888: 287 + return AFBC_FMT_U8U8U8; 288 + case DRM_FORMAT_RGB565: 289 + case DRM_FORMAT_BGR565: 290 + return AFBC_FMT_RGB565; 291 + /* either of the below should not be reachable */ 292 + default: 293 + DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format); 294 + return -EINVAL; 295 + } 296 + 297 + return -EINVAL; 288 298 } 289 299 290 300 static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src, ··· 634 598 vop_win_disable(vop, vop_win); 635 599 } 636 600 } 601 + 602 + if (vop->data->afbc) { 603 + struct rockchip_crtc_state *s; 604 + /* 605 + * Disable AFBC and forget there was a vop window with AFBC 606 + */ 607 + VOP_AFBC_SET(vop, enable, 0); 608 + s = to_rockchip_crtc_state(crtc->state); 609 + s->enable_afbc = false; 610 + } 611 + 637 612 spin_unlock(&vop->reg_lock); 638 613 639 614 vop_cfg_done(vop); ··· 757 710 drm_plane_cleanup(plane); 758 711 } 759 712 713 + static inline bool rockchip_afbc(u64 modifier) 714 + { 715 + return modifier == ROCKCHIP_AFBC_MOD; 716 + } 717 + 718 + static bool rockchip_mod_supported(struct drm_plane *plane, 719 + u32 format, u64 modifier) 720 + { 721 + if (modifier == DRM_FORMAT_MOD_LINEAR) 722 + return true; 723 + 724 + if (!rockchip_afbc(modifier)) { 725 + DRM_DEBUG_KMS("Unsupported format modifer 0x%llx\n", modifier); 726 + 727 + return false; 728 + } 729 + 730 + return vop_convert_afbc_format(format) >= 0; 731 + } 732 + 760 733 static int vop_plane_atomic_check(struct drm_plane *plane, 761 734 struct drm_plane_state *state) 762 735 { ··· 823 756 if (fb->format->is_yuv && state->rotation & DRM_MODE_REFLECT_Y) { 824 757 DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n"); 825 758 return -EINVAL; 759 + } 760 + 761 + if (rockchip_afbc(fb->modifier)) { 762 + struct vop *vop = to_vop(crtc); 763 + 764 + if (!vop->data->afbc) { 765 + DRM_ERROR("vop does not support AFBC\n"); 766 + return -EINVAL; 767 + } 768 + 769 + ret = vop_convert_afbc_format(fb->format->format); 770 + if (ret < 0) 771 + return ret; 772 + 773 + if (state->src.x1 || state->src.y1) { 774 + DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", state->src.x1, state->src.y1, fb->offsets[0]); 775 + return -EINVAL; 776 + } 777 + 778 + if (state->rotation && state->rotation != DRM_MODE_ROTATE_0) { 779 + DRM_ERROR("No rotation support in AFBC, rotation=%d\n", 780 + state->rotation); 781 + return -EINVAL; 782 + } 826 783 } 827 784 828 785 return 0; ··· 936 845 format = vop_convert_format(fb->format->format); 937 846 938 847 spin_lock(&vop->reg_lock); 848 + 849 + if (rockchip_afbc(fb->modifier)) { 850 + int afbc_format = vop_convert_afbc_format(fb->format->format); 851 + 852 + VOP_AFBC_SET(vop, format, afbc_format | AFBC_TILE_16x16); 853 + VOP_AFBC_SET(vop, hreg_block_split, 0); 854 + VOP_AFBC_SET(vop, win_sel, VOP_WIN_TO_INDEX(vop_win)); 855 + VOP_AFBC_SET(vop, hdr_ptr, dma_addr); 856 + VOP_AFBC_SET(vop, pic_size, act_info); 857 + } 939 858 940 859 VOP_WIN_SET(vop, win, format, format); 941 860 VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4)); ··· 1102 1001 .reset = drm_atomic_helper_plane_reset, 1103 1002 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 1104 1003 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 1004 + .format_mod_supported = rockchip_mod_supported, 1105 1005 }; 1106 1006 1107 1007 static int vop_crtc_enable_vblank(struct drm_crtc *crtc) ··· 1412 1310 struct drm_crtc_state *crtc_state) 1413 1311 { 1414 1312 struct vop *vop = to_vop(crtc); 1313 + struct drm_plane *plane; 1314 + struct drm_plane_state *plane_state; 1315 + struct rockchip_crtc_state *s; 1316 + int afbc_planes = 0; 1415 1317 1416 1318 if (vop->lut_regs && crtc_state->color_mgmt_changed && 1417 1319 crtc_state->gamma_lut) { ··· 1429 1323 } 1430 1324 } 1431 1325 1326 + drm_atomic_crtc_state_for_each_plane(plane, crtc_state) { 1327 + plane_state = 1328 + drm_atomic_get_plane_state(crtc_state->state, plane); 1329 + if (IS_ERR(plane_state)) { 1330 + DRM_DEBUG_KMS("Cannot get plane state for plane %s\n", 1331 + plane->name); 1332 + return PTR_ERR(plane_state); 1333 + } 1334 + 1335 + if (drm_is_afbc(plane_state->fb->modifier)) 1336 + ++afbc_planes; 1337 + } 1338 + 1339 + if (afbc_planes > 1) { 1340 + DRM_DEBUG_KMS("Invalid number of AFBC planes; got %d, expected at most 1\n", afbc_planes); 1341 + return -EINVAL; 1342 + } 1343 + 1344 + s = to_rockchip_crtc_state(crtc_state); 1345 + s->enable_afbc = afbc_planes > 0; 1346 + 1432 1347 return 0; 1433 1348 } 1434 1349 ··· 1460 1333 struct drm_plane_state *old_plane_state, *new_plane_state; 1461 1334 struct vop *vop = to_vop(crtc); 1462 1335 struct drm_plane *plane; 1336 + struct rockchip_crtc_state *s; 1463 1337 int i; 1464 1338 1465 1339 if (WARN_ON(!vop->is_enabled)) ··· 1468 1340 1469 1341 spin_lock(&vop->reg_lock); 1470 1342 1343 + /* Enable AFBC if there is some AFBC window, disable otherwise. */ 1344 + s = to_rockchip_crtc_state(crtc->state); 1345 + VOP_AFBC_SET(vop, enable, s->enable_afbc); 1471 1346 vop_cfg_done(vop); 1472 1347 1473 1348 spin_unlock(&vop->reg_lock); ··· 1765 1634 0, &vop_plane_funcs, 1766 1635 win_data->phy->data_formats, 1767 1636 win_data->phy->nformats, 1768 - NULL, win_data->type, NULL); 1637 + win_data->phy->format_modifiers, 1638 + win_data->type, NULL); 1769 1639 if (ret) { 1770 1640 DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n", 1771 1641 ret); ··· 1810 1678 &vop_plane_funcs, 1811 1679 win_data->phy->data_formats, 1812 1680 win_data->phy->nformats, 1813 - NULL, win_data->type, NULL); 1681 + win_data->phy->format_modifiers, 1682 + win_data->type, NULL); 1814 1683 if (ret) { 1815 1684 DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n", 1816 1685 ret);
+17
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
··· 17 17 18 18 #define NUM_YUV2YUV_COEFFICIENTS 12 19 19 20 + #define ROCKCHIP_AFBC_MOD \ 21 + DRM_FORMAT_MOD_ARM_AFBC( \ 22 + AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \ 23 + ) 24 + 20 25 enum vop_data_format { 21 26 VOP_FMT_ARGB8888 = 0, 22 27 VOP_FMT_RGB888, ··· 37 32 uint8_t shift; 38 33 bool write_mask; 39 34 bool relaxed; 35 + }; 36 + 37 + struct vop_afbc { 38 + struct vop_reg enable; 39 + struct vop_reg win_sel; 40 + struct vop_reg format; 41 + struct vop_reg hreg_block_split; 42 + struct vop_reg pic_size; 43 + struct vop_reg hdr_ptr; 44 + struct vop_reg rstn; 40 45 }; 41 46 42 47 struct vop_modeset { ··· 149 134 const struct vop_scl_regs *scl; 150 135 const uint32_t *data_formats; 151 136 uint32_t nformats; 137 + const uint64_t *format_modifiers; 152 138 153 139 struct vop_reg enable; 154 140 struct vop_reg gate; ··· 189 173 const struct vop_misc *misc; 190 174 const struct vop_modeset *modeset; 191 175 const struct vop_output *output; 176 + const struct vop_afbc *afbc; 192 177 const struct vop_win_yuv2yuv_data *win_yuv2yuv; 193 178 const struct vop_win_data *win; 194 179 unsigned int win_size;
+3 -7
drivers/gpu/drm/rockchip/rockchip_lvds.c
··· 16 16 #include <linux/pm_runtime.h> 17 17 #include <linux/regmap.h> 18 18 #include <linux/reset.h> 19 + 19 20 #include <drm/drm_atomic_helper.h> 20 21 #include <drm/drm_bridge.h> 21 - 22 22 #include <drm/drm_dp_helper.h> 23 23 #include <drm/drm_of.h> 24 24 #include <drm/drm_panel.h> 25 25 #include <drm/drm_probe_helper.h> 26 + #include <drm/drm_simple_kms_helper.h> 26 27 27 28 #include "rockchip_drm_drv.h" 28 29 #include "rockchip_drm_vop.h" ··· 436 435 .atomic_check = rockchip_lvds_encoder_atomic_check, 437 436 }; 438 437 439 - static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = { 440 - .destroy = drm_encoder_cleanup, 441 - }; 442 - 443 438 static int rk3288_lvds_probe(struct platform_device *pdev, 444 439 struct rockchip_lvds *lvds) 445 440 { ··· 604 607 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, 605 608 dev->of_node); 606 609 607 - ret = drm_encoder_init(drm_dev, encoder, &rockchip_lvds_encoder_funcs, 608 - DRM_MODE_ENCODER_LVDS, NULL); 610 + ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS); 609 611 if (ret < 0) { 610 612 DRM_DEV_ERROR(drm_dev->dev, 611 613 "failed to initialize encoder: %d\n", ret);
+2 -6
drivers/gpu/drm/rockchip/rockchip_rgb.c
··· 14 14 #include <drm/drm_of.h> 15 15 #include <drm/drm_panel.h> 16 16 #include <drm/drm_probe_helper.h> 17 + #include <drm/drm_simple_kms_helper.h> 17 18 18 19 #include "rockchip_drm_drv.h" 19 20 #include "rockchip_drm_vop.h" ··· 66 65 static const 67 66 struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = { 68 67 .atomic_check = rockchip_rgb_encoder_atomic_check, 69 - }; 70 - 71 - static const struct drm_encoder_funcs rockchip_rgb_encoder_funcs = { 72 - .destroy = drm_encoder_cleanup, 73 68 }; 74 69 75 70 struct rockchip_rgb *rockchip_rgb_init(struct device *dev, ··· 123 126 encoder = &rgb->encoder; 124 127 encoder->possible_crtcs = drm_crtc_mask(crtc); 125 128 126 - ret = drm_encoder_init(drm_dev, encoder, &rockchip_rgb_encoder_funcs, 127 - DRM_MODE_ENCODER_NONE, NULL); 129 + ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE); 128 130 if (ret < 0) { 129 131 DRM_DEV_ERROR(drm_dev->dev, 130 132 "failed to initialize encoder: %d\n", ret);
+81 -2
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
··· 50 50 DRM_FORMAT_NV24, 51 51 }; 52 52 53 + static const uint64_t format_modifiers_win_full[] = { 54 + DRM_FORMAT_MOD_LINEAR, 55 + DRM_FORMAT_MOD_INVALID, 56 + }; 57 + 58 + static const uint64_t format_modifiers_win_full_afbc[] = { 59 + ROCKCHIP_AFBC_MOD, 60 + DRM_FORMAT_MOD_LINEAR, 61 + DRM_FORMAT_MOD_INVALID, 62 + }; 63 + 53 64 static const uint32_t formats_win_lite[] = { 54 65 DRM_FORMAT_XRGB8888, 55 66 DRM_FORMAT_ARGB8888, ··· 70 59 DRM_FORMAT_BGR888, 71 60 DRM_FORMAT_RGB565, 72 61 DRM_FORMAT_BGR565, 62 + }; 63 + 64 + static const uint64_t format_modifiers_win_lite[] = { 65 + DRM_FORMAT_MOD_LINEAR, 66 + DRM_FORMAT_MOD_INVALID, 73 67 }; 74 68 75 69 static const struct vop_scl_regs rk3036_win_scl = { ··· 88 72 .scl = &rk3036_win_scl, 89 73 .data_formats = formats_win_full, 90 74 .nformats = ARRAY_SIZE(formats_win_full), 75 + .format_modifiers = format_modifiers_win_full, 91 76 .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0), 92 77 .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3), 93 78 .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15), ··· 104 87 static const struct vop_win_phy rk3036_win1_data = { 105 88 .data_formats = formats_win_lite, 106 89 .nformats = ARRAY_SIZE(formats_win_lite), 90 + .format_modifiers = format_modifiers_win_lite, 107 91 .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1), 108 92 .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6), 109 93 .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19), ··· 171 153 static const struct vop_win_phy rk3126_win1_data = { 172 154 .data_formats = formats_win_lite, 173 155 .nformats = ARRAY_SIZE(formats_win_lite), 156 + .format_modifiers = format_modifiers_win_lite, 174 157 .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1), 175 158 .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6), 176 159 .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19), ··· 253 234 .scl = &px30_win_scl, 254 235 .data_formats = formats_win_full, 255 236 .nformats = ARRAY_SIZE(formats_win_full), 237 + .format_modifiers = format_modifiers_win_full, 256 238 .enable = VOP_REG(PX30_WIN0_CTRL0, 0x1, 0), 257 239 .format = VOP_REG(PX30_WIN0_CTRL0, 0x7, 1), 258 240 .rb_swap = VOP_REG(PX30_WIN0_CTRL0, 0x1, 12), ··· 269 249 static const struct vop_win_phy px30_win1_data = { 270 250 .data_formats = formats_win_lite, 271 251 .nformats = ARRAY_SIZE(formats_win_lite), 252 + .format_modifiers = format_modifiers_win_lite, 272 253 .enable = VOP_REG(PX30_WIN1_CTRL0, 0x1, 0), 273 254 .format = VOP_REG(PX30_WIN1_CTRL0, 0x7, 4), 274 255 .rb_swap = VOP_REG(PX30_WIN1_CTRL0, 0x1, 12), ··· 282 261 static const struct vop_win_phy px30_win2_data = { 283 262 .data_formats = formats_win_lite, 284 263 .nformats = ARRAY_SIZE(formats_win_lite), 264 + .format_modifiers = format_modifiers_win_lite, 285 265 .gate = VOP_REG(PX30_WIN2_CTRL0, 0x1, 4), 286 266 .enable = VOP_REG(PX30_WIN2_CTRL0, 0x1, 0), 287 267 .format = VOP_REG(PX30_WIN2_CTRL0, 0x3, 5), ··· 338 316 .scl = &rk3066_win_scl, 339 317 .data_formats = formats_win_full, 340 318 .nformats = ARRAY_SIZE(formats_win_full), 319 + .format_modifiers = format_modifiers_win_full, 341 320 .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0), 342 321 .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4), 343 322 .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19), ··· 355 332 .scl = &rk3066_win_scl, 356 333 .data_formats = formats_win_full, 357 334 .nformats = ARRAY_SIZE(formats_win_full), 335 + .format_modifiers = format_modifiers_win_full, 358 336 .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1), 359 337 .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7), 360 338 .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23), ··· 371 347 static const struct vop_win_phy rk3066_win2_data = { 372 348 .data_formats = formats_win_lite, 373 349 .nformats = ARRAY_SIZE(formats_win_lite), 350 + .format_modifiers = format_modifiers_win_lite, 374 351 .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2), 375 352 .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10), 376 353 .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27), ··· 451 426 .scl = &rk3188_win_scl, 452 427 .data_formats = formats_win_full, 453 428 .nformats = ARRAY_SIZE(formats_win_full), 429 + .format_modifiers = format_modifiers_win_full, 454 430 .enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 0), 455 431 .format = VOP_REG(RK3188_SYS_CTRL, 0x7, 3), 456 432 .rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 15), ··· 466 440 static const struct vop_win_phy rk3188_win1_data = { 467 441 .data_formats = formats_win_lite, 468 442 .nformats = ARRAY_SIZE(formats_win_lite), 443 + .format_modifiers = format_modifiers_win_lite, 469 444 .enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 1), 470 445 .format = VOP_REG(RK3188_SYS_CTRL, 0x7, 6), 471 446 .rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 19), ··· 572 545 .scl = &rk3288_win_full_scl, 573 546 .data_formats = formats_win_full, 574 547 .nformats = ARRAY_SIZE(formats_win_full), 548 + .format_modifiers = format_modifiers_win_full, 575 549 .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0), 576 550 .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1), 577 551 .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12), ··· 591 563 static const struct vop_win_phy rk3288_win23_data = { 592 564 .data_formats = formats_win_lite, 593 565 .nformats = ARRAY_SIZE(formats_win_lite), 566 + .format_modifiers = format_modifiers_win_lite, 594 567 .enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 4), 595 568 .gate = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0), 596 569 .format = VOP_REG(RK3288_WIN2_CTRL0, 0x7, 1), ··· 706 677 .scl = &rk3288_win_full_scl, 707 678 .data_formats = formats_win_full, 708 679 .nformats = ARRAY_SIZE(formats_win_full), 680 + .format_modifiers = format_modifiers_win_full, 709 681 .enable = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 0), 710 682 .format = VOP_REG(RK3368_WIN0_CTRL0, 0x7, 1), 711 683 .rb_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 12), ··· 727 697 static const struct vop_win_phy rk3368_win23_data = { 728 698 .data_formats = formats_win_lite, 729 699 .nformats = ARRAY_SIZE(formats_win_lite), 700 + .format_modifiers = format_modifiers_win_lite, 730 701 .gate = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 0), 731 702 .enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4), 732 703 .format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5), ··· 848 817 .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9) }, 849 818 { .base = 0xC0, .phy = &rk3399_yuv2yuv_win23_data }, 850 819 { .base = 0x120, .phy = &rk3399_yuv2yuv_win23_data }, 820 + 821 + }; 822 + 823 + static const struct vop_win_phy rk3399_win01_data = { 824 + .scl = &rk3288_win_full_scl, 825 + .data_formats = formats_win_full, 826 + .nformats = ARRAY_SIZE(formats_win_full), 827 + .format_modifiers = format_modifiers_win_full_afbc, 828 + .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0), 829 + .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1), 830 + .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12), 831 + .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22), 832 + .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0), 833 + .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0), 834 + .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0), 835 + .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0), 836 + .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0), 837 + .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0), 838 + .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16), 839 + .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0), 840 + .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0), 841 + }; 842 + 843 + /* 844 + * rk3399 vop big windows register layout is same as rk3288, but we 845 + * have a separate rk3399 win data array here so that we can advertise 846 + * AFBC on the primary plane. 847 + */ 848 + static const struct vop_win_data rk3399_vop_win_data[] = { 849 + { .base = 0x00, .phy = &rk3399_win01_data, 850 + .type = DRM_PLANE_TYPE_PRIMARY }, 851 + { .base = 0x40, .phy = &rk3288_win01_data, 852 + .type = DRM_PLANE_TYPE_OVERLAY }, 853 + { .base = 0x00, .phy = &rk3288_win23_data, 854 + .type = DRM_PLANE_TYPE_OVERLAY }, 855 + { .base = 0x50, .phy = &rk3288_win23_data, 856 + .type = DRM_PLANE_TYPE_CURSOR }, 857 + }; 858 + 859 + static const struct vop_afbc rk3399_vop_afbc = { 860 + .rstn = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 3), 861 + .enable = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 0), 862 + .win_sel = VOP_REG(RK3399_AFBCD0_CTRL, 0x3, 1), 863 + .format = VOP_REG(RK3399_AFBCD0_CTRL, 0x1f, 16), 864 + .hreg_block_split = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 21), 865 + .hdr_ptr = VOP_REG(RK3399_AFBCD0_HDR_PTR, 0xffffffff, 0), 866 + .pic_size = VOP_REG(RK3399_AFBCD0_PIC_SIZE, 0xffffffff, 0), 851 867 }; 852 868 853 869 static const struct vop_data rk3399_vop_big = { ··· 904 826 .common = &rk3288_common, 905 827 .modeset = &rk3288_modeset, 906 828 .output = &rk3399_output, 829 + .afbc = &rk3399_vop_afbc, 907 830 .misc = &rk3368_misc, 908 - .win = rk3368_vop_win_data, 909 - .win_size = ARRAY_SIZE(rk3368_vop_win_data), 831 + .win = rk3399_vop_win_data, 832 + .win_size = ARRAY_SIZE(rk3399_vop_win_data), 910 833 .win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data, 911 834 }; 912 835
+3 -11
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
··· 17 17 #include <drm/drm_gem_cma_helper.h> 18 18 #include <drm/drm_plane_helper.h> 19 19 #include <drm/drm_probe_helper.h> 20 + #include <drm/drm_simple_kms_helper.h> 20 21 #include <drm/drm_vblank.h> 21 22 22 23 #include "shmob_drm_backlight.h" ··· 559 558 .mode_set = shmob_drm_encoder_mode_set, 560 559 }; 561 560 562 - static void shmob_drm_encoder_destroy(struct drm_encoder *encoder) 563 - { 564 - drm_encoder_cleanup(encoder); 565 - } 566 - 567 - static const struct drm_encoder_funcs encoder_funcs = { 568 - .destroy = shmob_drm_encoder_destroy, 569 - }; 570 - 571 561 int shmob_drm_encoder_create(struct shmob_drm_device *sdev) 572 562 { 573 563 struct drm_encoder *encoder = &sdev->encoder.encoder; ··· 568 576 569 577 encoder->possible_crtcs = 1; 570 578 571 - ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs, 572 - DRM_MODE_ENCODER_LVDS, NULL); 579 + ret = drm_simple_encoder_init(sdev->ddev, encoder, 580 + DRM_MODE_ENCODER_LVDS); 573 581 if (ret < 0) 574 582 return ret; 575 583
-2
drivers/gpu/drm/shmobile/shmob_drm_drv.c
··· 192 192 193 193 drm_dev_unregister(ddev); 194 194 drm_kms_helper_poll_fini(ddev); 195 - drm_mode_config_cleanup(ddev); 196 195 drm_irq_uninstall(ddev); 197 196 drm_dev_put(ddev); 198 197 ··· 287 288 drm_irq_uninstall(ddev); 288 289 err_modeset_cleanup: 289 290 drm_kms_helper_poll_fini(ddev); 290 - drm_mode_config_cleanup(ddev); 291 291 err_free_drm_dev: 292 292 drm_dev_put(ddev); 293 293
+5 -1
drivers/gpu/drm/shmobile/shmob_drm_kms.c
··· 126 126 127 127 int shmob_drm_modeset_init(struct shmob_drm_device *sdev) 128 128 { 129 - drm_mode_config_init(sdev->ddev); 129 + int ret; 130 + 131 + ret = drmm_mode_config_init(sdev->ddev); 132 + if (ret) 133 + return ret; 130 134 131 135 shmob_drm_crtc_create(sdev); 132 136 shmob_drm_encoder_create(sdev);
+2 -4
drivers/gpu/drm/sti/sti_compositor.c
··· 42 42 }, 43 43 }; 44 44 45 - int sti_compositor_debugfs_init(struct sti_compositor *compo, 46 - struct drm_minor *minor) 45 + void sti_compositor_debugfs_init(struct sti_compositor *compo, 46 + struct drm_minor *minor) 47 47 { 48 48 unsigned int i; 49 49 ··· 54 54 for (i = 0; i < STI_MAX_MIXER; i++) 55 55 if (compo->mixer[i]) 56 56 sti_mixer_debugfs_init(compo->mixer[i], minor); 57 - 58 - return 0; 59 57 } 60 58 61 59 static int sti_compositor_bind(struct device *dev,
+2 -2
drivers/gpu/drm/sti/sti_compositor.h
··· 79 79 struct notifier_block vtg_vblank_nb[STI_MAX_MIXER]; 80 80 }; 81 81 82 - int sti_compositor_debugfs_init(struct sti_compositor *compo, 83 - struct drm_minor *minor); 82 + void sti_compositor_debugfs_init(struct sti_compositor *compo, 83 + struct drm_minor *minor); 84 84 85 85 #endif
+1 -1
drivers/gpu/drm/sti/sti_crtc.c
··· 319 319 struct sti_compositor *compo = dev_get_drvdata(mixer->dev); 320 320 321 321 if (drm_crtc_index(crtc) == 0) 322 - return sti_compositor_debugfs_init(compo, crtc->dev->primary); 322 + sti_compositor_debugfs_init(compo, crtc->dev->primary); 323 323 324 324 return 0; 325 325 }
+8 -6
drivers/gpu/drm/sti/sti_cursor.c
··· 131 131 { "cursor", cursor_dbg_show, 0, NULL }, 132 132 }; 133 133 134 - static int cursor_debugfs_init(struct sti_cursor *cursor, 135 - struct drm_minor *minor) 134 + static void cursor_debugfs_init(struct sti_cursor *cursor, 135 + struct drm_minor *minor) 136 136 { 137 137 unsigned int i; 138 138 139 139 for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++) 140 140 cursor_debugfs_files[i].data = cursor; 141 141 142 - return drm_debugfs_create_files(cursor_debugfs_files, 143 - ARRAY_SIZE(cursor_debugfs_files), 144 - minor->debugfs_root, minor); 142 + drm_debugfs_create_files(cursor_debugfs_files, 143 + ARRAY_SIZE(cursor_debugfs_files), 144 + minor->debugfs_root, minor); 145 145 } 146 146 147 147 static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src) ··· 342 342 struct sti_plane *plane = to_sti_plane(drm_plane); 343 343 struct sti_cursor *cursor = to_sti_cursor(plane); 344 344 345 - return cursor_debugfs_init(cursor, drm_plane->dev->primary); 345 + cursor_debugfs_init(cursor, drm_plane->dev->primary); 346 + 347 + return 0; 346 348 } 347 349 348 350 static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
+4 -12
drivers/gpu/drm/sti/sti_drv.c
··· 92 92 {"fps_get", sti_drm_fps_dbg_show, 0}, 93 93 }; 94 94 95 - static int sti_drm_dbg_init(struct drm_minor *minor) 95 + static void sti_drm_dbg_init(struct drm_minor *minor) 96 96 { 97 - int ret; 98 - 99 - ret = drm_debugfs_create_files(sti_drm_dbg_list, 100 - ARRAY_SIZE(sti_drm_dbg_list), 101 - minor->debugfs_root, minor); 102 - if (ret) 103 - goto err; 97 + drm_debugfs_create_files(sti_drm_dbg_list, 98 + ARRAY_SIZE(sti_drm_dbg_list), 99 + minor->debugfs_root, minor); 104 100 105 101 debugfs_create_file("fps_show", S_IRUGO | S_IWUSR, minor->debugfs_root, 106 102 minor->dev, &sti_drm_fps_fops); 107 103 108 104 DRM_INFO("%s: debugfs installed\n", DRIVER_NAME); 109 - return 0; 110 - err: 111 - DRM_ERROR("%s: cannot install debugfs\n", DRIVER_NAME); 112 - return ret; 113 105 } 114 106 115 107 static const struct drm_mode_config_funcs sti_mode_config_funcs = {
+5 -8
drivers/gpu/drm/sti/sti_dvo.c
··· 196 196 { "dvo", dvo_dbg_show, 0, NULL }, 197 197 }; 198 198 199 - static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor) 199 + static void dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor) 200 200 { 201 201 unsigned int i; 202 202 203 203 for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++) 204 204 dvo_debugfs_files[i].data = dvo; 205 205 206 - return drm_debugfs_create_files(dvo_debugfs_files, 207 - ARRAY_SIZE(dvo_debugfs_files), 208 - minor->debugfs_root, minor); 206 + drm_debugfs_create_files(dvo_debugfs_files, 207 + ARRAY_SIZE(dvo_debugfs_files), 208 + minor->debugfs_root, minor); 209 209 } 210 210 211 211 static void sti_dvo_disable(struct drm_bridge *bridge) ··· 405 405 = to_sti_dvo_connector(connector); 406 406 struct sti_dvo *dvo = dvo_connector->dvo; 407 407 408 - if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) { 409 - DRM_ERROR("DVO debugfs setup failed\n"); 410 - return -EINVAL; 411 - } 408 + dvo_debugfs_init(dvo, dvo->drm_dev->primary); 412 409 413 410 return 0; 414 411 }
+4 -3
drivers/gpu/drm/sti/sti_gdp.c
··· 343 343 for (i = 0; i < nb_files; i++) 344 344 gdp_debugfs_files[i].data = gdp; 345 345 346 - return drm_debugfs_create_files(gdp_debugfs_files, 347 - nb_files, 348 - minor->debugfs_root, minor); 346 + drm_debugfs_create_files(gdp_debugfs_files, 347 + nb_files, 348 + minor->debugfs_root, minor); 349 + return 0; 349 350 } 350 351 351 352 static int sti_gdp_fourcc2format(int fourcc)
+5 -8
drivers/gpu/drm/sti/sti_hda.c
··· 367 367 { "hda", hda_dbg_show, 0, NULL }, 368 368 }; 369 369 370 - static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor) 370 + static void hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor) 371 371 { 372 372 unsigned int i; 373 373 374 374 for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++) 375 375 hda_debugfs_files[i].data = hda; 376 376 377 - return drm_debugfs_create_files(hda_debugfs_files, 378 - ARRAY_SIZE(hda_debugfs_files), 379 - minor->debugfs_root, minor); 377 + drm_debugfs_create_files(hda_debugfs_files, 378 + ARRAY_SIZE(hda_debugfs_files), 379 + minor->debugfs_root, minor); 380 380 } 381 381 382 382 /** ··· 643 643 = to_sti_hda_connector(connector); 644 644 struct sti_hda *hda = hda_connector->hda; 645 645 646 - if (hda_debugfs_init(hda, hda->drm_dev->primary)) { 647 - DRM_ERROR("HDA debugfs setup failed\n"); 648 - return -EINVAL; 649 - } 646 + hda_debugfs_init(hda, hda->drm_dev->primary); 650 647 651 648 return 0; 652 649 }
+5 -8
drivers/gpu/drm/sti/sti_hdmi.c
··· 727 727 { "hdmi", hdmi_dbg_show, 0, NULL }, 728 728 }; 729 729 730 - static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor) 730 + static void hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor) 731 731 { 732 732 unsigned int i; 733 733 734 734 for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++) 735 735 hdmi_debugfs_files[i].data = hdmi; 736 736 737 - return drm_debugfs_create_files(hdmi_debugfs_files, 738 - ARRAY_SIZE(hdmi_debugfs_files), 739 - minor->debugfs_root, minor); 737 + drm_debugfs_create_files(hdmi_debugfs_files, 738 + ARRAY_SIZE(hdmi_debugfs_files), 739 + minor->debugfs_root, minor); 740 740 } 741 741 742 742 static void sti_hdmi_disable(struct drm_bridge *bridge) ··· 1113 1113 = to_sti_hdmi_connector(connector); 1114 1114 struct sti_hdmi *hdmi = hdmi_connector->hdmi; 1115 1115 1116 - if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) { 1117 - DRM_ERROR("HDMI debugfs setup failed\n"); 1118 - return -EINVAL; 1119 - } 1116 + hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary); 1120 1117 1121 1118 return 0; 1122 1119 }
+7 -5
drivers/gpu/drm/sti/sti_hqvdp.c
··· 639 639 { "hqvdp", hqvdp_dbg_show, 0, NULL }, 640 640 }; 641 641 642 - static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor) 642 + static void hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor) 643 643 { 644 644 unsigned int i; 645 645 646 646 for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++) 647 647 hqvdp_debugfs_files[i].data = hqvdp; 648 648 649 - return drm_debugfs_create_files(hqvdp_debugfs_files, 650 - ARRAY_SIZE(hqvdp_debugfs_files), 651 - minor->debugfs_root, minor); 649 + drm_debugfs_create_files(hqvdp_debugfs_files, 650 + ARRAY_SIZE(hqvdp_debugfs_files), 651 + minor->debugfs_root, minor); 652 652 } 653 653 654 654 /** ··· 1274 1274 struct sti_plane *plane = to_sti_plane(drm_plane); 1275 1275 struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); 1276 1276 1277 - return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary); 1277 + hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary); 1278 + 1279 + return 0; 1278 1280 } 1279 1281 1280 1282 static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
+5 -5
drivers/gpu/drm/sti/sti_mixer.c
··· 178 178 { "mixer_aux", mixer_dbg_show, 0, NULL }, 179 179 }; 180 180 181 - int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) 181 + void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) 182 182 { 183 183 unsigned int i; 184 184 struct drm_info_list *mixer_debugfs_files; ··· 194 194 nb_files = ARRAY_SIZE(mixer1_debugfs_files); 195 195 break; 196 196 default: 197 - return -EINVAL; 197 + return; 198 198 } 199 199 200 200 for (i = 0; i < nb_files; i++) 201 201 mixer_debugfs_files[i].data = mixer; 202 202 203 - return drm_debugfs_create_files(mixer_debugfs_files, 204 - nb_files, 205 - minor->debugfs_root, minor); 203 + drm_debugfs_create_files(mixer_debugfs_files, 204 + nb_files, 205 + minor->debugfs_root, minor); 206 206 } 207 207 208 208 void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
+1 -1
drivers/gpu/drm/sti/sti_mixer.h
··· 58 58 59 59 void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable); 60 60 61 - int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor); 61 + void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor); 62 62 63 63 /* depth in Cross-bar control = z order */ 64 64 #define GAM_MIXER_NB_DEPTH_LEVEL 6
+5 -8
drivers/gpu/drm/sti/sti_tvout.c
··· 570 570 { "tvout", tvout_dbg_show, 0, NULL }, 571 571 }; 572 572 573 - static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor) 573 + static void tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor) 574 574 { 575 575 unsigned int i; 576 576 577 577 for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++) 578 578 tvout_debugfs_files[i].data = tvout; 579 579 580 - return drm_debugfs_create_files(tvout_debugfs_files, 581 - ARRAY_SIZE(tvout_debugfs_files), 582 - minor->debugfs_root, minor); 580 + drm_debugfs_create_files(tvout_debugfs_files, 581 + ARRAY_SIZE(tvout_debugfs_files), 582 + minor->debugfs_root, minor); 583 583 } 584 584 585 585 static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode) ··· 603 603 static int sti_tvout_late_register(struct drm_encoder *encoder) 604 604 { 605 605 struct sti_tvout *tvout = to_sti_tvout(encoder); 606 - int ret; 607 606 608 607 if (tvout->debugfs_registered) 609 608 return 0; 610 609 611 - ret = tvout_debugfs_init(tvout, encoder->dev->primary); 612 - if (ret) 613 - return ret; 610 + tvout_debugfs_init(tvout, encoder->dev->primary); 614 611 615 612 tvout->debugfs_registered = true; 616 613 return 0;
+4 -4
drivers/gpu/drm/sti/sti_vid.c
··· 124 124 { "vid", vid_dbg_show, 0, NULL }, 125 125 }; 126 126 127 - int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor) 127 + void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor) 128 128 { 129 129 unsigned int i; 130 130 131 131 for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++) 132 132 vid_debugfs_files[i].data = vid; 133 133 134 - return drm_debugfs_create_files(vid_debugfs_files, 135 - ARRAY_SIZE(vid_debugfs_files), 136 - minor->debugfs_root, minor); 134 + drm_debugfs_create_files(vid_debugfs_files, 135 + ARRAY_SIZE(vid_debugfs_files), 136 + minor->debugfs_root, minor); 137 137 } 138 138 139 139 void sti_vid_commit(struct sti_vid *vid,
+1 -1
drivers/gpu/drm/sti/sti_vid.h
··· 26 26 struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev, 27 27 int id, void __iomem *baseaddr); 28 28 29 - int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor); 29 + void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor); 30 30 31 31 #endif
+4 -6
drivers/gpu/drm/stm/drv.c
··· 88 88 89 89 ddev->dev_private = (void *)ldev; 90 90 91 - drm_mode_config_init(ddev); 91 + ret = drmm_mode_config_init(ddev); 92 + if (ret) 93 + return ret; 92 94 93 95 /* 94 96 * set max width and height as default value. ··· 105 103 106 104 ret = ltdc_load(ddev); 107 105 if (ret) 108 - goto err; 106 + return ret; 109 107 110 108 drm_mode_config_reset(ddev); 111 109 drm_kms_helper_poll_init(ddev); ··· 113 111 platform_set_drvdata(pdev, ddev); 114 112 115 113 return 0; 116 - err: 117 - drm_mode_config_cleanup(ddev); 118 - return ret; 119 114 } 120 115 121 116 static void drv_unload(struct drm_device *ddev) ··· 121 122 122 123 drm_kms_helper_poll_fini(ddev); 123 124 ltdc_unload(ddev); 124 - drm_mode_config_cleanup(ddev); 125 125 } 126 126 127 127 static __maybe_unused int drv_suspend(struct device *dev)
+3 -9
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
··· 22 22 #include <drm/drm_panel.h> 23 23 #include <drm/drm_print.h> 24 24 #include <drm/drm_probe_helper.h> 25 + #include <drm/drm_simple_kms_helper.h> 25 26 26 27 #include "sun4i_backend.h" 27 28 #include "sun4i_crtc.h" ··· 203 202 .enable = sun4i_hdmi_enable, 204 203 .mode_set = sun4i_hdmi_mode_set, 205 204 .mode_valid = sun4i_hdmi_mode_valid, 206 - }; 207 - 208 - static const struct drm_encoder_funcs sun4i_hdmi_funcs = { 209 - .destroy = drm_encoder_cleanup, 210 205 }; 211 206 212 207 static int sun4i_hdmi_get_modes(struct drm_connector *connector) ··· 608 611 609 612 drm_encoder_helper_add(&hdmi->encoder, 610 613 &sun4i_hdmi_helper_funcs); 611 - ret = drm_encoder_init(drm, 612 - &hdmi->encoder, 613 - &sun4i_hdmi_funcs, 614 - DRM_MODE_ENCODER_TMDS, 615 - NULL); 614 + ret = drm_simple_encoder_init(drm, &hdmi->encoder, 615 + DRM_MODE_ENCODER_TMDS); 616 616 if (ret) { 617 617 dev_err(dev, "Couldn't initialise the HDMI encoder\n"); 618 618 goto err_put_ddc_i2c;
+3 -9
drivers/gpu/drm/sun4i/sun4i_lvds.c
··· 12 12 #include <drm/drm_panel.h> 13 13 #include <drm/drm_print.h> 14 14 #include <drm/drm_probe_helper.h> 15 + #include <drm/drm_simple_kms_helper.h> 15 16 16 17 #include "sun4i_crtc.h" 17 18 #include "sun4i_tcon.h" ··· 97 96 .enable = sun4i_lvds_encoder_enable, 98 97 }; 99 98 100 - static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = { 101 - .destroy = drm_encoder_cleanup, 102 - }; 103 - 104 99 int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon) 105 100 { 106 101 struct drm_encoder *encoder; ··· 118 121 119 122 drm_encoder_helper_add(&lvds->encoder, 120 123 &sun4i_lvds_enc_helper_funcs); 121 - ret = drm_encoder_init(drm, 122 - &lvds->encoder, 123 - &sun4i_lvds_enc_funcs, 124 - DRM_MODE_ENCODER_LVDS, 125 - NULL); 124 + ret = drm_simple_encoder_init(drm, &lvds->encoder, 125 + DRM_MODE_ENCODER_LVDS); 126 126 if (ret) { 127 127 dev_err(drm->dev, "Couldn't initialise the lvds encoder\n"); 128 128 goto err_out;
+3 -14
drivers/gpu/drm/sun4i/sun4i_rgb.c
··· 14 14 #include <drm/drm_panel.h> 15 15 #include <drm/drm_print.h> 16 16 #include <drm/drm_probe_helper.h> 17 + #include <drm/drm_simple_kms_helper.h> 17 18 18 19 #include "sun4i_crtc.h" 19 20 #include "sun4i_tcon.h" ··· 189 188 .mode_valid = sun4i_rgb_mode_valid, 190 189 }; 191 190 192 - static void sun4i_rgb_enc_destroy(struct drm_encoder *encoder) 193 - { 194 - drm_encoder_cleanup(encoder); 195 - } 196 - 197 - static struct drm_encoder_funcs sun4i_rgb_enc_funcs = { 198 - .destroy = sun4i_rgb_enc_destroy, 199 - }; 200 - 201 191 int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) 202 192 { 203 193 struct drm_encoder *encoder; ··· 210 218 211 219 drm_encoder_helper_add(&rgb->encoder, 212 220 &sun4i_rgb_enc_helper_funcs); 213 - ret = drm_encoder_init(drm, 214 - &rgb->encoder, 215 - &sun4i_rgb_enc_funcs, 216 - DRM_MODE_ENCODER_NONE, 217 - NULL); 221 + ret = drm_simple_encoder_init(drm, &rgb->encoder, 222 + DRM_MODE_ENCODER_NONE); 218 223 if (ret) { 219 224 dev_err(drm->dev, "Couldn't initialise the rgb encoder\n"); 220 225 goto err_out;
+1 -3
drivers/gpu/drm/sun4i/sun4i_tcon.c
··· 812 812 int irq, ret; 813 813 814 814 irq = platform_get_irq(pdev, 0); 815 - if (irq < 0) { 816 - dev_err(dev, "Couldn't retrieve the TCON interrupt\n"); 815 + if (irq < 0) 817 816 return irq; 818 - } 819 817 820 818 ret = devm_request_irq(dev, irq, sun4i_tcon_handler, 0, 821 819 dev_name(dev), tcon);
+3 -14
drivers/gpu/drm/sun4i/sun4i_tv.c
··· 19 19 #include <drm/drm_panel.h> 20 20 #include <drm/drm_print.h> 21 21 #include <drm/drm_probe_helper.h> 22 + #include <drm/drm_simple_kms_helper.h> 22 23 23 24 #include "sun4i_crtc.h" 24 25 #include "sun4i_drv.h" ··· 474 473 .mode_set = sun4i_tv_mode_set, 475 474 }; 476 475 477 - static void sun4i_tv_destroy(struct drm_encoder *encoder) 478 - { 479 - drm_encoder_cleanup(encoder); 480 - } 481 - 482 - static struct drm_encoder_funcs sun4i_tv_funcs = { 483 - .destroy = sun4i_tv_destroy, 484 - }; 485 - 486 476 static int sun4i_tv_comp_get_modes(struct drm_connector *connector) 487 477 { 488 478 int i; ··· 584 592 585 593 drm_encoder_helper_add(&tv->encoder, 586 594 &sun4i_tv_helper_funcs); 587 - ret = drm_encoder_init(drm, 588 - &tv->encoder, 589 - &sun4i_tv_funcs, 590 - DRM_MODE_ENCODER_TVDAC, 591 - NULL); 595 + ret = drm_simple_encoder_init(drm, &tv->encoder, 596 + DRM_MODE_ENCODER_TVDAC); 592 597 if (ret) { 593 598 dev_err(dev, "Couldn't initialise the TV encoder\n"); 594 599 goto err_disable_clk;
+3 -9
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
··· 24 24 #include <drm/drm_panel.h> 25 25 #include <drm/drm_print.h> 26 26 #include <drm/drm_probe_helper.h> 27 + #include <drm/drm_simple_kms_helper.h> 27 28 28 29 #include "sun4i_crtc.h" 29 30 #include "sun4i_tcon.h" ··· 847 846 .enable = sun6i_dsi_encoder_enable, 848 847 }; 849 848 850 - static const struct drm_encoder_funcs sun6i_dsi_enc_funcs = { 851 - .destroy = drm_encoder_cleanup, 852 - }; 853 - 854 849 static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi, 855 850 const struct mipi_dsi_msg *msg) 856 851 { ··· 1059 1062 1060 1063 drm_encoder_helper_add(&dsi->encoder, 1061 1064 &sun6i_dsi_enc_helper_funcs); 1062 - ret = drm_encoder_init(drm, 1063 - &dsi->encoder, 1064 - &sun6i_dsi_enc_funcs, 1065 - DRM_MODE_ENCODER_DSI, 1066 - NULL); 1065 + ret = drm_simple_encoder_init(drm, &dsi->encoder, 1066 + DRM_MODE_ENCODER_DSI); 1067 1067 if (ret) { 1068 1068 dev_err(dsi->dev, "Couldn't initialise the DSI encoder\n"); 1069 1069 return ret;
+2 -6
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
··· 10 10 11 11 #include <drm/drm_crtc_helper.h> 12 12 #include <drm/drm_of.h> 13 + #include <drm/drm_simple_kms_helper.h> 13 14 14 15 #include "sun8i_dw_hdmi.h" 15 16 #include "sun8i_tcon_top.h" ··· 28 27 static const struct drm_encoder_helper_funcs 29 28 sun8i_dw_hdmi_encoder_helper_funcs = { 30 29 .mode_set = sun8i_dw_hdmi_encoder_mode_set, 31 - }; 32 - 33 - static const struct drm_encoder_funcs sun8i_dw_hdmi_encoder_funcs = { 34 - .destroy = drm_encoder_cleanup, 35 30 }; 36 31 37 32 static enum drm_mode_status ··· 217 220 } 218 221 219 222 drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs); 220 - drm_encoder_init(drm, encoder, &sun8i_dw_hdmi_encoder_funcs, 221 - DRM_MODE_ENCODER_TMDS, NULL); 223 + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); 222 224 223 225 sun8i_hdmi_phy_init(hdmi->phy); 224 226
+11 -100
drivers/gpu/drm/sun4i/sun8i_mixer.c
··· 27 27 #include "sun8i_vi_layer.h" 28 28 #include "sunxi_engine.h" 29 29 30 + struct de2_fmt_info { 31 + u32 drm_fmt; 32 + u32 de2_fmt; 33 + }; 34 + 30 35 static const struct de2_fmt_info de2_formats[] = { 31 36 { 32 37 .drm_fmt = DRM_FORMAT_ARGB8888, 33 38 .de2_fmt = SUN8I_MIXER_FBFMT_ARGB8888, 34 - .rgb = true, 35 - .csc = SUN8I_CSC_MODE_OFF, 36 39 }, 37 40 { 38 41 .drm_fmt = DRM_FORMAT_ABGR8888, 39 42 .de2_fmt = SUN8I_MIXER_FBFMT_ABGR8888, 40 - .rgb = true, 41 - .csc = SUN8I_CSC_MODE_OFF, 42 43 }, 43 44 { 44 45 .drm_fmt = DRM_FORMAT_RGBA8888, 45 46 .de2_fmt = SUN8I_MIXER_FBFMT_RGBA8888, 46 - .rgb = true, 47 - .csc = SUN8I_CSC_MODE_OFF, 48 47 }, 49 48 { 50 49 .drm_fmt = DRM_FORMAT_BGRA8888, 51 50 .de2_fmt = SUN8I_MIXER_FBFMT_BGRA8888, 52 - .rgb = true, 53 - .csc = SUN8I_CSC_MODE_OFF, 54 51 }, 55 52 { 56 53 .drm_fmt = DRM_FORMAT_XRGB8888, 57 54 .de2_fmt = SUN8I_MIXER_FBFMT_XRGB8888, 58 - .rgb = true, 59 - .csc = SUN8I_CSC_MODE_OFF, 60 55 }, 61 56 { 62 57 .drm_fmt = DRM_FORMAT_XBGR8888, 63 58 .de2_fmt = SUN8I_MIXER_FBFMT_XBGR8888, 64 - .rgb = true, 65 - .csc = SUN8I_CSC_MODE_OFF, 66 59 }, 67 60 { 68 61 .drm_fmt = DRM_FORMAT_RGBX8888, 69 62 .de2_fmt = SUN8I_MIXER_FBFMT_RGBX8888, 70 - .rgb = true, 71 - .csc = SUN8I_CSC_MODE_OFF, 72 63 }, 73 64 { 74 65 .drm_fmt = DRM_FORMAT_BGRX8888, 75 66 .de2_fmt = SUN8I_MIXER_FBFMT_BGRX8888, 76 - .rgb = true, 77 - .csc = SUN8I_CSC_MODE_OFF, 78 67 }, 79 68 { 80 69 .drm_fmt = DRM_FORMAT_RGB888, 81 70 .de2_fmt = SUN8I_MIXER_FBFMT_RGB888, 82 - .rgb = true, 83 - .csc = SUN8I_CSC_MODE_OFF, 84 71 }, 85 72 { 86 73 .drm_fmt = DRM_FORMAT_BGR888, 87 74 .de2_fmt = SUN8I_MIXER_FBFMT_BGR888, 88 - .rgb = true, 89 - .csc = SUN8I_CSC_MODE_OFF, 90 75 }, 91 76 { 92 77 .drm_fmt = DRM_FORMAT_RGB565, 93 78 .de2_fmt = SUN8I_MIXER_FBFMT_RGB565, 94 - .rgb = true, 95 - .csc = SUN8I_CSC_MODE_OFF, 96 79 }, 97 80 { 98 81 .drm_fmt = DRM_FORMAT_BGR565, 99 82 .de2_fmt = SUN8I_MIXER_FBFMT_BGR565, 100 - .rgb = true, 101 - .csc = SUN8I_CSC_MODE_OFF, 102 83 }, 103 84 { 104 85 .drm_fmt = DRM_FORMAT_ARGB4444, 105 86 .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444, 106 - .rgb = true, 107 - .csc = SUN8I_CSC_MODE_OFF, 108 87 }, 109 88 { 110 89 /* for DE2 VI layer which ignores alpha */ 111 90 .drm_fmt = DRM_FORMAT_XRGB4444, 112 91 .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444, 113 - .rgb = true, 114 - .csc = SUN8I_CSC_MODE_OFF, 115 92 }, 116 93 { 117 94 .drm_fmt = DRM_FORMAT_ABGR4444, 118 95 .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444, 119 - .rgb = true, 120 - .csc = SUN8I_CSC_MODE_OFF, 121 96 }, 122 97 { 123 98 /* for DE2 VI layer which ignores alpha */ 124 99 .drm_fmt = DRM_FORMAT_XBGR4444, 125 100 .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444, 126 - .rgb = true, 127 - .csc = SUN8I_CSC_MODE_OFF, 128 101 }, 129 102 { 130 103 .drm_fmt = DRM_FORMAT_RGBA4444, 131 104 .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444, 132 - .rgb = true, 133 - .csc = SUN8I_CSC_MODE_OFF, 134 105 }, 135 106 { 136 107 /* for DE2 VI layer which ignores alpha */ 137 108 .drm_fmt = DRM_FORMAT_RGBX4444, 138 109 .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444, 139 - .rgb = true, 140 - .csc = SUN8I_CSC_MODE_OFF, 141 110 }, 142 111 { 143 112 .drm_fmt = DRM_FORMAT_BGRA4444, 144 113 .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444, 145 - .rgb = true, 146 - .csc = SUN8I_CSC_MODE_OFF, 147 114 }, 148 115 { 149 116 /* for DE2 VI layer which ignores alpha */ 150 117 .drm_fmt = DRM_FORMAT_BGRX4444, 151 118 .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444, 152 - .rgb = true, 153 - .csc = SUN8I_CSC_MODE_OFF, 154 119 }, 155 120 { 156 121 .drm_fmt = DRM_FORMAT_ARGB1555, 157 122 .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555, 158 - .rgb = true, 159 - .csc = SUN8I_CSC_MODE_OFF, 160 123 }, 161 124 { 162 125 /* for DE2 VI layer which ignores alpha */ 163 126 .drm_fmt = DRM_FORMAT_XRGB1555, 164 127 .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555, 165 - .rgb = true, 166 - .csc = SUN8I_CSC_MODE_OFF, 167 128 }, 168 129 { 169 130 .drm_fmt = DRM_FORMAT_ABGR1555, 170 131 .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555, 171 - .rgb = true, 172 - .csc = SUN8I_CSC_MODE_OFF, 173 132 }, 174 133 { 175 134 /* for DE2 VI layer which ignores alpha */ 176 135 .drm_fmt = DRM_FORMAT_XBGR1555, 177 136 .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555, 178 - .rgb = true, 179 - .csc = SUN8I_CSC_MODE_OFF, 180 137 }, 181 138 { 182 139 .drm_fmt = DRM_FORMAT_RGBA5551, 183 140 .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551, 184 - .rgb = true, 185 - .csc = SUN8I_CSC_MODE_OFF, 186 141 }, 187 142 { 188 143 /* for DE2 VI layer which ignores alpha */ 189 144 .drm_fmt = DRM_FORMAT_RGBX5551, 190 145 .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551, 191 - .rgb = true, 192 - .csc = SUN8I_CSC_MODE_OFF, 193 146 }, 194 147 { 195 148 .drm_fmt = DRM_FORMAT_BGRA5551, 196 149 .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551, 197 - .rgb = true, 198 - .csc = SUN8I_CSC_MODE_OFF, 199 150 }, 200 151 { 201 152 /* for DE2 VI layer which ignores alpha */ 202 153 .drm_fmt = DRM_FORMAT_BGRX5551, 203 154 .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551, 204 - .rgb = true, 205 - .csc = SUN8I_CSC_MODE_OFF, 206 155 }, 207 156 { 208 157 .drm_fmt = DRM_FORMAT_ARGB2101010, 209 158 .de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010, 210 - .rgb = true, 211 - .csc = SUN8I_CSC_MODE_OFF, 212 159 }, 213 160 { 214 161 .drm_fmt = DRM_FORMAT_ABGR2101010, 215 162 .de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010, 216 - .rgb = true, 217 - .csc = SUN8I_CSC_MODE_OFF, 218 163 }, 219 164 { 220 165 .drm_fmt = DRM_FORMAT_RGBA1010102, 221 166 .de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102, 222 - .rgb = true, 223 - .csc = SUN8I_CSC_MODE_OFF, 224 167 }, 225 168 { 226 169 .drm_fmt = DRM_FORMAT_BGRA1010102, 227 170 .de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102, 228 - .rgb = true, 229 - .csc = SUN8I_CSC_MODE_OFF, 230 171 }, 231 172 { 232 173 .drm_fmt = DRM_FORMAT_UYVY, 233 174 .de2_fmt = SUN8I_MIXER_FBFMT_UYVY, 234 - .rgb = false, 235 - .csc = SUN8I_CSC_MODE_YUV2RGB, 236 175 }, 237 176 { 238 177 .drm_fmt = DRM_FORMAT_VYUY, 239 178 .de2_fmt = SUN8I_MIXER_FBFMT_VYUY, 240 - .rgb = false, 241 - .csc = SUN8I_CSC_MODE_YUV2RGB, 242 179 }, 243 180 { 244 181 .drm_fmt = DRM_FORMAT_YUYV, 245 182 .de2_fmt = SUN8I_MIXER_FBFMT_YUYV, 246 - .rgb = false, 247 - .csc = SUN8I_CSC_MODE_YUV2RGB, 248 183 }, 249 184 { 250 185 .drm_fmt = DRM_FORMAT_YVYU, 251 186 .de2_fmt = SUN8I_MIXER_FBFMT_YVYU, 252 - .rgb = false, 253 - .csc = SUN8I_CSC_MODE_YUV2RGB, 254 187 }, 255 188 { 256 189 .drm_fmt = DRM_FORMAT_NV16, 257 190 .de2_fmt = SUN8I_MIXER_FBFMT_NV16, 258 - .rgb = false, 259 - .csc = SUN8I_CSC_MODE_YUV2RGB, 260 191 }, 261 192 { 262 193 .drm_fmt = DRM_FORMAT_NV61, 263 194 .de2_fmt = SUN8I_MIXER_FBFMT_NV61, 264 - .rgb = false, 265 - .csc = SUN8I_CSC_MODE_YUV2RGB, 266 195 }, 267 196 { 268 197 .drm_fmt = DRM_FORMAT_NV12, 269 198 .de2_fmt = SUN8I_MIXER_FBFMT_NV12, 270 - .rgb = false, 271 - .csc = SUN8I_CSC_MODE_YUV2RGB, 272 199 }, 273 200 { 274 201 .drm_fmt = DRM_FORMAT_NV21, 275 202 .de2_fmt = SUN8I_MIXER_FBFMT_NV21, 276 - .rgb = false, 277 - .csc = SUN8I_CSC_MODE_YUV2RGB, 278 203 }, 279 204 { 280 205 .drm_fmt = DRM_FORMAT_YUV422, 281 206 .de2_fmt = SUN8I_MIXER_FBFMT_YUV422, 282 - .rgb = false, 283 - .csc = SUN8I_CSC_MODE_YUV2RGB, 284 207 }, 285 208 { 286 209 .drm_fmt = DRM_FORMAT_YUV420, 287 210 .de2_fmt = SUN8I_MIXER_FBFMT_YUV420, 288 - .rgb = false, 289 - .csc = SUN8I_CSC_MODE_YUV2RGB, 290 211 }, 291 212 { 292 213 .drm_fmt = DRM_FORMAT_YUV411, 293 214 .de2_fmt = SUN8I_MIXER_FBFMT_YUV411, 294 - .rgb = false, 295 - .csc = SUN8I_CSC_MODE_YUV2RGB, 296 215 }, 297 216 { 298 217 .drm_fmt = DRM_FORMAT_YVU422, 299 218 .de2_fmt = SUN8I_MIXER_FBFMT_YUV422, 300 - .rgb = false, 301 - .csc = SUN8I_CSC_MODE_YVU2RGB, 302 219 }, 303 220 { 304 221 .drm_fmt = DRM_FORMAT_YVU420, 305 222 .de2_fmt = SUN8I_MIXER_FBFMT_YUV420, 306 - .rgb = false, 307 - .csc = SUN8I_CSC_MODE_YVU2RGB, 308 223 }, 309 224 { 310 225 .drm_fmt = DRM_FORMAT_YVU411, 311 226 .de2_fmt = SUN8I_MIXER_FBFMT_YUV411, 312 - .rgb = false, 313 - .csc = SUN8I_CSC_MODE_YVU2RGB, 314 227 }, 315 228 { 316 229 .drm_fmt = DRM_FORMAT_P010, 317 230 .de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV, 318 - .rgb = false, 319 - .csc = SUN8I_CSC_MODE_YUV2RGB, 320 231 }, 321 232 { 322 233 .drm_fmt = DRM_FORMAT_P210, 323 234 .de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV, 324 - .rgb = false, 325 - .csc = SUN8I_CSC_MODE_YUV2RGB, 326 235 }, 327 236 }; 328 237 329 - const struct de2_fmt_info *sun8i_mixer_format_info(u32 format) 238 + int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format) 330 239 { 331 240 unsigned int i; 332 241 333 242 for (i = 0; i < ARRAY_SIZE(de2_formats); ++i) 334 - if (de2_formats[i].drm_fmt == format) 335 - return &de2_formats[i]; 243 + if (de2_formats[i].drm_fmt == format) { 244 + *hw_format = de2_formats[i].de2_fmt; 245 + return 0; 246 + } 336 247 337 - return NULL; 248 + return -EINVAL; 338 249 } 339 250 340 251 static void sun8i_mixer_commit(struct sunxi_engine *engine)
+1 -9
drivers/gpu/drm/sun4i/sun8i_mixer.h
··· 10 10 #include <linux/regmap.h> 11 11 #include <linux/reset.h> 12 12 13 - #include "sun8i_csc.h" 14 13 #include "sunxi_engine.h" 15 14 16 15 #define SUN8I_MIXER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1)) ··· 143 144 #define SUN50I_MIXER_CDC0_EN 0xd0000 144 145 #define SUN50I_MIXER_CDC1_EN 0xd8000 145 146 146 - struct de2_fmt_info { 147 - u32 drm_fmt; 148 - u32 de2_fmt; 149 - bool rgb; 150 - enum sun8i_csc_mode csc; 151 - }; 152 - 153 147 /** 154 148 * struct sun8i_mixer_cfg - mixer HW configuration 155 149 * @vi_num: number of VI channels ··· 202 210 return DE2_CH_BASE + channel * DE2_CH_SIZE; 203 211 } 204 212 205 - const struct de2_fmt_info *sun8i_mixer_format_info(u32 format); 213 + int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format); 206 214 #endif /* _SUN8I_MIXER_H_ */
+8 -6
drivers/gpu/drm/sun4i/sun8i_ui_layer.c
··· 19 19 #include <drm/drm_plane_helper.h> 20 20 #include <drm/drm_probe_helper.h> 21 21 22 - #include "sun8i_ui_layer.h" 23 22 #include "sun8i_mixer.h" 23 + #include "sun8i_ui_layer.h" 24 24 #include "sun8i_ui_scaler.h" 25 25 26 26 static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel, ··· 174 174 int overlay, struct drm_plane *plane) 175 175 { 176 176 struct drm_plane_state *state = plane->state; 177 - const struct de2_fmt_info *fmt_info; 178 - u32 val, ch_base; 177 + const struct drm_format_info *fmt; 178 + u32 val, ch_base, hw_fmt; 179 + int ret; 179 180 180 181 ch_base = sun8i_channel_base(mixer, channel); 181 182 182 - fmt_info = sun8i_mixer_format_info(state->fb->format->format); 183 - if (!fmt_info || !fmt_info->rgb) { 183 + fmt = state->fb->format; 184 + ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt); 185 + if (ret || fmt->is_yuv) { 184 186 DRM_DEBUG_DRIVER("Invalid format\n"); 185 187 return -EINVAL; 186 188 } 187 189 188 - val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET; 190 + val = hw_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET; 189 191 regmap_update_bits(mixer->engine.regs, 190 192 SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay), 191 193 SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
+30 -10
drivers/gpu/drm/sun4i/sun8i_vi_layer.c
··· 12 12 #include <drm/drm_plane_helper.h> 13 13 #include <drm/drm_probe_helper.h> 14 14 15 - #include "sun8i_vi_layer.h" 15 + #include "sun8i_csc.h" 16 16 #include "sun8i_mixer.h" 17 + #include "sun8i_vi_layer.h" 17 18 #include "sun8i_vi_scaler.h" 18 19 19 20 static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel, ··· 211 210 return 0; 212 211 } 213 212 213 + static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format) 214 + { 215 + if (!format->is_yuv) 216 + return SUN8I_CSC_MODE_OFF; 217 + 218 + switch (format->format) { 219 + case DRM_FORMAT_YVU411: 220 + case DRM_FORMAT_YVU420: 221 + case DRM_FORMAT_YVU422: 222 + case DRM_FORMAT_YVU444: 223 + return SUN8I_CSC_MODE_YVU2RGB; 224 + default: 225 + return SUN8I_CSC_MODE_YUV2RGB; 226 + } 227 + } 228 + 214 229 static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel, 215 230 int overlay, struct drm_plane *plane) 216 231 { 217 232 struct drm_plane_state *state = plane->state; 218 - const struct de2_fmt_info *fmt_info; 219 - u32 val, ch_base; 233 + u32 val, ch_base, csc_mode, hw_fmt; 234 + const struct drm_format_info *fmt; 235 + int ret; 220 236 221 237 ch_base = sun8i_channel_base(mixer, channel); 222 238 223 - fmt_info = sun8i_mixer_format_info(state->fb->format->format); 224 - if (!fmt_info) { 239 + fmt = state->fb->format; 240 + ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt); 241 + if (ret) { 225 242 DRM_DEBUG_DRIVER("Invalid format\n"); 226 - return -EINVAL; 243 + return ret; 227 244 } 228 245 229 - val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET; 246 + val = hw_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET; 230 247 regmap_update_bits(mixer->engine.regs, 231 248 SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay), 232 249 SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val); 233 250 234 - if (fmt_info->csc != SUN8I_CSC_MODE_OFF) { 235 - sun8i_csc_set_ccsc_coefficients(mixer, channel, fmt_info->csc, 251 + csc_mode = sun8i_vi_layer_get_csc_mode(fmt); 252 + if (csc_mode != SUN8I_CSC_MODE_OFF) { 253 + sun8i_csc_set_ccsc_coefficients(mixer, channel, csc_mode, 236 254 state->color_encoding, 237 255 state->color_range); 238 256 sun8i_csc_enable_ccsc(mixer, channel, true); ··· 259 239 sun8i_csc_enable_ccsc(mixer, channel, false); 260 240 } 261 241 262 - if (fmt_info->rgb) 242 + if (!fmt->is_yuv) 263 243 val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE; 264 244 else 265 245 val = 0;
+1 -10
drivers/gpu/drm/tegra/dc.c
··· 1496 1496 struct drm_minor *minor = crtc->dev->primary; 1497 1497 struct dentry *root; 1498 1498 struct tegra_dc *dc = to_tegra_dc(crtc); 1499 - int err; 1500 1499 1501 1500 #ifdef CONFIG_DEBUG_FS 1502 1501 root = crtc->debugfs_entry; ··· 1511 1512 for (i = 0; i < count; i++) 1512 1513 dc->debugfs_files[i].data = dc; 1513 1514 1514 - err = drm_debugfs_create_files(dc->debugfs_files, count, root, minor); 1515 - if (err < 0) 1516 - goto free; 1515 + drm_debugfs_create_files(dc->debugfs_files, count, root, minor); 1517 1516 1518 1517 return 0; 1519 - 1520 - free: 1521 - kfree(dc->debugfs_files); 1522 - dc->debugfs_files = NULL; 1523 - 1524 - return err; 1525 1518 } 1526 1519 1527 1520 static void tegra_dc_early_unregister(struct drm_crtc *crtc)
+4 -4
drivers/gpu/drm/tegra/drm.c
··· 839 839 { "iova", tegra_debugfs_iova, 0 }, 840 840 }; 841 841 842 - static int tegra_debugfs_init(struct drm_minor *minor) 842 + static void tegra_debugfs_init(struct drm_minor *minor) 843 843 { 844 - return drm_debugfs_create_files(tegra_debugfs_list, 845 - ARRAY_SIZE(tegra_debugfs_list), 846 - minor->debugfs_root, minor); 844 + drm_debugfs_create_files(tegra_debugfs_list, 845 + ARRAY_SIZE(tegra_debugfs_list), 846 + minor->debugfs_root, minor); 847 847 } 848 848 #endif 849 849
-2
drivers/gpu/drm/tegra/drm.h
··· 152 152 tegra_output_connector_detect(struct drm_connector *connector, bool force); 153 153 void tegra_output_connector_destroy(struct drm_connector *connector); 154 154 155 - void tegra_output_encoder_destroy(struct drm_encoder *encoder); 156 - 157 155 /* from dpaux.c */ 158 156 struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np); 159 157 enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
+4 -17
drivers/gpu/drm/tegra/dsi.c
··· 22 22 #include <drm/drm_file.h> 23 23 #include <drm/drm_mipi_dsi.h> 24 24 #include <drm/drm_panel.h> 25 + #include <drm/drm_simple_kms_helper.h> 25 26 26 27 #include "dc.h" 27 28 #include "drm.h" ··· 235 234 struct drm_minor *minor = connector->dev->primary; 236 235 struct dentry *root = connector->debugfs_entry; 237 236 struct tegra_dsi *dsi = to_dsi(output); 238 - int err; 239 237 240 238 dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), 241 239 GFP_KERNEL); ··· 244 244 for (i = 0; i < count; i++) 245 245 dsi->debugfs_files[i].data = dsi; 246 246 247 - err = drm_debugfs_create_files(dsi->debugfs_files, count, root, minor); 248 - if (err < 0) 249 - goto free; 247 + drm_debugfs_create_files(dsi->debugfs_files, count, root, minor); 250 248 251 249 return 0; 252 - 253 - free: 254 - kfree(dsi->debugfs_files); 255 - dsi->debugfs_files = NULL; 256 - 257 - return err; 258 250 } 259 251 260 252 static void tegra_dsi_early_unregister(struct drm_connector *connector) ··· 816 824 .mode_valid = tegra_dsi_connector_mode_valid, 817 825 }; 818 826 819 - static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { 820 - .destroy = tegra_output_encoder_destroy, 821 - }; 822 - 823 827 static void tegra_dsi_unprepare(struct tegra_dsi *dsi) 824 828 { 825 829 int err; ··· 1046 1058 &tegra_dsi_connector_helper_funcs); 1047 1059 dsi->output.connector.dpms = DRM_MODE_DPMS_OFF; 1048 1060 1049 - drm_encoder_init(drm, &dsi->output.encoder, 1050 - &tegra_dsi_encoder_funcs, 1051 - DRM_MODE_ENCODER_DSI, NULL); 1061 + drm_simple_encoder_init(drm, &dsi->output.encoder, 1062 + DRM_MODE_ENCODER_DSI); 1052 1063 drm_encoder_helper_add(&dsi->output.encoder, 1053 1064 &tegra_dsi_encoder_helper_funcs); 1054 1065
+4 -16
drivers/gpu/drm/tegra/hdmi.c
··· 22 22 #include <drm/drm_file.h> 23 23 #include <drm/drm_fourcc.h> 24 24 #include <drm/drm_probe_helper.h> 25 + #include <drm/drm_simple_kms_helper.h> 25 26 26 27 #include "hda.h" 27 28 #include "hdmi.h" ··· 1065 1064 struct drm_minor *minor = connector->dev->primary; 1066 1065 struct dentry *root = connector->debugfs_entry; 1067 1066 struct tegra_hdmi *hdmi = to_hdmi(output); 1068 - int err; 1069 1067 1070 1068 hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), 1071 1069 GFP_KERNEL); ··· 1074 1074 for (i = 0; i < count; i++) 1075 1075 hdmi->debugfs_files[i].data = hdmi; 1076 1076 1077 - err = drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor); 1078 - if (err < 0) 1079 - goto free; 1077 + drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor); 1080 1078 1081 1079 return 0; 1082 - 1083 - free: 1084 - kfree(hdmi->debugfs_files); 1085 - hdmi->debugfs_files = NULL; 1086 - 1087 - return err; 1088 1080 } 1089 1081 1090 1082 static void tegra_hdmi_early_unregister(struct drm_connector *connector) ··· 1126 1134 tegra_hdmi_connector_helper_funcs = { 1127 1135 .get_modes = tegra_output_connector_get_modes, 1128 1136 .mode_valid = tegra_hdmi_connector_mode_valid, 1129 - }; 1130 - 1131 - static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { 1132 - .destroy = tegra_output_encoder_destroy, 1133 1137 }; 1134 1138 1135 1139 static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) ··· 1433 1445 &tegra_hdmi_connector_helper_funcs); 1434 1446 hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF; 1435 1447 1436 - drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs, 1437 - DRM_MODE_ENCODER_TMDS, NULL); 1448 + drm_simple_encoder_init(drm, &hdmi->output.encoder, 1449 + DRM_MODE_ENCODER_TMDS); 1438 1450 drm_encoder_helper_add(&hdmi->output.encoder, 1439 1451 &tegra_hdmi_encoder_helper_funcs); 1440 1452
+1 -5
drivers/gpu/drm/tegra/output.c
··· 6 6 7 7 #include <drm/drm_atomic_helper.h> 8 8 #include <drm/drm_panel.h> 9 + #include <drm/drm_simple_kms_helper.h> 9 10 10 11 #include "drm.h" 11 12 #include "dc.h" ··· 78 77 79 78 drm_connector_unregister(connector); 80 79 drm_connector_cleanup(connector); 81 - } 82 - 83 - void tegra_output_encoder_destroy(struct drm_encoder *encoder) 84 - { 85 - drm_encoder_cleanup(encoder); 86 80 } 87 81 88 82 static irqreturn_t hpd_irq(int irq, void *data)
+2 -6
drivers/gpu/drm/tegra/rgb.c
··· 8 8 9 9 #include <drm/drm_atomic_helper.h> 10 10 #include <drm/drm_panel.h> 11 + #include <drm/drm_simple_kms_helper.h> 11 12 12 13 #include "drm.h" 13 14 #include "dc.h" ··· 109 108 static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = { 110 109 .get_modes = tegra_output_connector_get_modes, 111 110 .mode_valid = tegra_rgb_connector_mode_valid, 112 - }; 113 - 114 - static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = { 115 - .destroy = tegra_output_encoder_destroy, 116 111 }; 117 112 118 113 static void tegra_rgb_encoder_disable(struct drm_encoder *encoder) ··· 278 281 &tegra_rgb_connector_helper_funcs); 279 282 output->connector.dpms = DRM_MODE_DPMS_OFF; 280 283 281 - drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs, 282 - DRM_MODE_ENCODER_LVDS, NULL); 284 + drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS); 283 285 drm_encoder_helper_add(&output->encoder, 284 286 &tegra_rgb_encoder_helper_funcs); 285 287
+3 -16
drivers/gpu/drm/tegra/sor.c
··· 23 23 #include <drm/drm_file.h> 24 24 #include <drm/drm_panel.h> 25 25 #include <drm/drm_scdc_helper.h> 26 + #include <drm/drm_simple_kms_helper.h> 26 27 27 28 #include "dc.h" 28 29 #include "dp.h" ··· 1688 1687 struct drm_minor *minor = connector->dev->primary; 1689 1688 struct dentry *root = connector->debugfs_entry; 1690 1689 struct tegra_sor *sor = to_sor(output); 1691 - int err; 1692 1690 1693 1691 sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), 1694 1692 GFP_KERNEL); ··· 1697 1697 for (i = 0; i < count; i++) 1698 1698 sor->debugfs_files[i].data = sor; 1699 1699 1700 - err = drm_debugfs_create_files(sor->debugfs_files, count, root, minor); 1701 - if (err < 0) 1702 - goto free; 1700 + drm_debugfs_create_files(sor->debugfs_files, count, root, minor); 1703 1701 1704 1702 return 0; 1705 - 1706 - free: 1707 - kfree(sor->debugfs_files); 1708 - sor->debugfs_files = NULL; 1709 - 1710 - return err; 1711 1703 } 1712 1704 1713 1705 static void tegra_sor_early_unregister(struct drm_connector *connector) ··· 1795 1803 static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = { 1796 1804 .get_modes = tegra_sor_connector_get_modes, 1797 1805 .mode_valid = tegra_sor_connector_mode_valid, 1798 - }; 1799 - 1800 - static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { 1801 - .destroy = tegra_output_encoder_destroy, 1802 1806 }; 1803 1807 1804 1808 static int ··· 3090 3102 &tegra_sor_connector_helper_funcs); 3091 3103 sor->output.connector.dpms = DRM_MODE_DPMS_OFF; 3092 3104 3093 - drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs, 3094 - encoder, NULL); 3105 + drm_simple_encoder_init(drm, &sor->output.encoder, encoder); 3095 3106 drm_encoder_helper_add(&sor->output.encoder, helpers); 3096 3107 3097 3108 drm_connector_attach_encoder(&sor->output.connector,
+2 -8
drivers/gpu/drm/tidss/tidss_drv.c
··· 17 17 #include <drm/drm_fb_helper.h> 18 18 #include <drm/drm_gem_cma_helper.h> 19 19 #include <drm/drm_irq.h> 20 + #include <drm/drm_managed.h> 20 21 #include <drm/drm_probe_helper.h> 21 22 22 23 #include "tidss_dispc.h" ··· 103 102 104 103 static void tidss_release(struct drm_device *ddev) 105 104 { 106 - struct tidss_device *tidss = ddev->dev_private; 107 - 108 105 drm_kms_helper_poll_fini(ddev); 109 - 110 - tidss_modeset_cleanup(tidss); 111 - 112 - drm_dev_fini(ddev); 113 - 114 - kfree(tidss); 115 106 } 116 107 117 108 DEFINE_DRM_GEM_CMA_FOPS(tidss_fops); ··· 147 154 kfree(ddev); 148 155 return ret; 149 156 } 157 + drmm_add_final_kfree(ddev, tidss); 150 158 151 159 tidss->dev = dev; 152 160 tidss->feat = of_device_get_match_data(dev);
+3 -7
drivers/gpu/drm/tidss/tidss_encoder.c
··· 8 8 9 9 #include <drm/drm_crtc.h> 10 10 #include <drm/drm_crtc_helper.h> 11 - #include <drm/drm_panel.h> 12 11 #include <drm/drm_of.h> 12 + #include <drm/drm_panel.h> 13 + #include <drm/drm_simple_kms_helper.h> 13 14 14 15 #include "tidss_crtc.h" 15 16 #include "tidss_drv.h" ··· 60 59 .atomic_check = tidss_encoder_atomic_check, 61 60 }; 62 61 63 - static const struct drm_encoder_funcs encoder_funcs = { 64 - .destroy = drm_encoder_cleanup, 65 - }; 66 - 67 62 struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss, 68 63 u32 encoder_type, u32 possible_crtcs) 69 64 { ··· 72 75 73 76 enc->possible_crtcs = possible_crtcs; 74 77 75 - ret = drm_encoder_init(&tidss->ddev, enc, &encoder_funcs, 76 - encoder_type, NULL); 78 + ret = drm_simple_encoder_init(&tidss->ddev, enc, encoder_type); 77 79 if (ret < 0) 78 80 return ERR_PTR(ret); 79 81
+5 -14
drivers/gpu/drm/tidss/tidss_kms.c
··· 258 258 259 259 dev_dbg(tidss->dev, "%s\n", __func__); 260 260 261 - drm_mode_config_init(ddev); 261 + ret = drmm_mode_config_init(ddev); 262 + if (ret) 263 + return ret; 262 264 263 265 ddev->mode_config.min_width = 8; 264 266 ddev->mode_config.min_height = 8; ··· 272 270 273 271 ret = tidss_dispc_modeset_init(tidss); 274 272 if (ret) 275 - goto err_mode_config_cleanup; 273 + return ret; 276 274 277 275 ret = drm_vblank_init(ddev, tidss->num_crtcs); 278 276 if (ret) 279 - goto err_mode_config_cleanup; 277 + return ret; 280 278 281 279 /* Start with vertical blanking interrupt reporting disabled. */ 282 280 for (i = 0; i < tidss->num_crtcs; ++i) ··· 287 285 dev_dbg(tidss->dev, "%s done\n", __func__); 288 286 289 287 return 0; 290 - 291 - err_mode_config_cleanup: 292 - drm_mode_config_cleanup(ddev); 293 - return ret; 294 - } 295 - 296 - void tidss_modeset_cleanup(struct tidss_device *tidss) 297 - { 298 - struct drm_device *ddev = &tidss->ddev; 299 - 300 - drm_mode_config_cleanup(ddev); 301 288 }
-1
drivers/gpu/drm/tidss/tidss_kms.h
··· 10 10 struct tidss_device; 11 11 12 12 int tidss_modeset_init(struct tidss_device *tidss); 13 - void tidss_modeset_cleanup(struct tidss_device *tidss); 14 13 15 14 #endif
+5 -15
drivers/gpu/drm/tilcdc/tilcdc_drv.c
··· 390 390 ret = drm_dev_register(ddev, 0); 391 391 if (ret) 392 392 goto init_failed; 393 + priv->is_registered = true; 393 394 394 395 drm_fbdev_generic_setup(ddev, bpp); 395 - 396 - priv->is_registered = true; 397 396 return 0; 398 397 399 398 init_failed: ··· 477 478 { "mm", tilcdc_mm_show, 0 }, 478 479 }; 479 480 480 - static int tilcdc_debugfs_init(struct drm_minor *minor) 481 + static void tilcdc_debugfs_init(struct drm_minor *minor) 481 482 { 482 - struct drm_device *dev = minor->dev; 483 483 struct tilcdc_module *mod; 484 - int ret; 485 484 486 - ret = drm_debugfs_create_files(tilcdc_debugfs_list, 487 - ARRAY_SIZE(tilcdc_debugfs_list), 488 - minor->debugfs_root, minor); 485 + drm_debugfs_create_files(tilcdc_debugfs_list, 486 + ARRAY_SIZE(tilcdc_debugfs_list), 487 + minor->debugfs_root, minor); 489 488 490 489 list_for_each_entry(mod, &module_list, list) 491 490 if (mod->funcs->debugfs_init) 492 491 mod->funcs->debugfs_init(mod, minor); 493 - 494 - if (ret) { 495 - dev_err(dev->dev, "could not install tilcdc_debugfs_list\n"); 496 - return ret; 497 - } 498 - 499 - return ret; 500 492 } 501 493 #endif 502 494
+3 -7
drivers/gpu/drm/tilcdc/tilcdc_external.c
··· 10 10 #include <drm/drm_atomic_helper.h> 11 11 #include <drm/drm_bridge.h> 12 12 #include <drm/drm_of.h> 13 + #include <drm/drm_simple_kms_helper.h> 13 14 14 15 #include "tilcdc_drv.h" 15 16 #include "tilcdc_external.h" ··· 84 83 return 0; 85 84 } 86 85 87 - static const struct drm_encoder_funcs tilcdc_external_encoder_funcs = { 88 - .destroy = drm_encoder_cleanup, 89 - }; 90 - 91 86 static 92 87 int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge) 93 88 { ··· 128 131 if (!priv->external_encoder) 129 132 return -ENOMEM; 130 133 131 - ret = drm_encoder_init(ddev, priv->external_encoder, 132 - &tilcdc_external_encoder_funcs, 133 - DRM_MODE_ENCODER_NONE, NULL); 134 + ret = drm_simple_encoder_init(ddev, priv->external_encoder, 135 + DRM_MODE_ENCODER_NONE); 134 136 if (ret) { 135 137 dev_err(ddev->dev, "drm_encoder_init() failed %d\n", ret); 136 138 return ret;
+2 -6
drivers/gpu/drm/tilcdc/tilcdc_panel.c
··· 16 16 #include <drm/drm_connector.h> 17 17 #include <drm/drm_modeset_helper_vtables.h> 18 18 #include <drm/drm_probe_helper.h> 19 + #include <drm/drm_simple_kms_helper.h> 19 20 20 21 #include "tilcdc_drv.h" 21 22 #include "tilcdc_panel.h" ··· 75 74 /* nothing needed */ 76 75 } 77 76 78 - static const struct drm_encoder_funcs panel_encoder_funcs = { 79 - .destroy = drm_encoder_cleanup, 80 - }; 81 - 82 77 static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = { 83 78 .dpms = panel_encoder_dpms, 84 79 .prepare = panel_encoder_prepare, ··· 99 102 encoder = &panel_encoder->base; 100 103 encoder->possible_crtcs = 1; 101 104 102 - ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs, 103 - DRM_MODE_ENCODER_LVDS, NULL); 105 + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS); 104 106 if (ret < 0) 105 107 goto fail; 106 108
+84 -138
drivers/gpu/drm/tiny/gm12u320.c
··· 19 19 #include <drm/drm_gem_shmem_helper.h> 20 20 #include <drm/drm_gem_framebuffer_helper.h> 21 21 #include <drm/drm_ioctl.h> 22 + #include <drm/drm_managed.h> 22 23 #include <drm/drm_modeset_helper_vtables.h> 23 24 #include <drm/drm_probe_helper.h> 24 25 #include <drm/drm_simple_kms_helper.h> ··· 88 87 struct usb_device *udev; 89 88 unsigned char *cmd_buf; 90 89 unsigned char *data_buf[GM12U320_BLOCK_COUNT]; 91 - bool pipe_enabled; 92 90 struct { 93 - bool run; 94 - struct workqueue_struct *workq; 95 - struct work_struct work; 96 - wait_queue_head_t waitq; 91 + struct delayed_work work; 97 92 struct mutex lock; 98 93 struct drm_framebuffer *fb; 99 94 struct drm_rect rect; 95 + int frame; 96 + int draw_status_timeout; 100 97 } fb_update; 101 98 }; 102 99 ··· 158 159 int i, block_size; 159 160 const char *hdr; 160 161 161 - gm12u320->cmd_buf = kmalloc(CMD_SIZE, GFP_KERNEL); 162 + gm12u320->cmd_buf = drmm_kmalloc(&gm12u320->dev, CMD_SIZE, GFP_KERNEL); 162 163 if (!gm12u320->cmd_buf) 163 164 return -ENOMEM; 164 165 ··· 171 172 hdr = data_block_header; 172 173 } 173 174 174 - gm12u320->data_buf[i] = kzalloc(block_size, GFP_KERNEL); 175 + gm12u320->data_buf[i] = drmm_kzalloc(&gm12u320->dev, 176 + block_size, GFP_KERNEL); 175 177 if (!gm12u320->data_buf[i]) 176 178 return -ENOMEM; 177 179 ··· 182 182 data_block_footer, DATA_BLOCK_FOOTER_SIZE); 183 183 } 184 184 185 - gm12u320->fb_update.workq = create_singlethread_workqueue(DRIVER_NAME); 186 - if (!gm12u320->fb_update.workq) 187 - return -ENOMEM; 188 - 189 185 return 0; 190 - } 191 - 192 - static void gm12u320_usb_free(struct gm12u320_device *gm12u320) 193 - { 194 - int i; 195 - 196 - if (gm12u320->fb_update.workq) 197 - destroy_workqueue(gm12u320->fb_update.workq); 198 - 199 - for (i = 0; i < GM12U320_BLOCK_COUNT; i++) 200 - kfree(gm12u320->data_buf[i]); 201 - 202 - kfree(gm12u320->cmd_buf); 203 186 } 204 187 205 188 static int gm12u320_misc_request(struct gm12u320_device *gm12u320, ··· 327 344 static void gm12u320_fb_update_work(struct work_struct *work) 328 345 { 329 346 struct gm12u320_device *gm12u320 = 330 - container_of(work, struct gm12u320_device, fb_update.work); 331 - int draw_status_timeout = FIRST_FRAME_TIMEOUT; 347 + container_of(to_delayed_work(work), struct gm12u320_device, 348 + fb_update.work); 332 349 int block, block_size, len; 333 - int frame = 0; 334 350 int ret = 0; 335 351 336 - while (gm12u320->fb_update.run) { 337 - gm12u320_copy_fb_to_blocks(gm12u320); 352 + gm12u320_copy_fb_to_blocks(gm12u320); 338 353 339 - for (block = 0; block < GM12U320_BLOCK_COUNT; block++) { 340 - if (block == GM12U320_BLOCK_COUNT - 1) 341 - block_size = DATA_LAST_BLOCK_SIZE; 342 - else 343 - block_size = DATA_BLOCK_SIZE; 354 + for (block = 0; block < GM12U320_BLOCK_COUNT; block++) { 355 + if (block == GM12U320_BLOCK_COUNT - 1) 356 + block_size = DATA_LAST_BLOCK_SIZE; 357 + else 358 + block_size = DATA_BLOCK_SIZE; 344 359 345 - /* Send data command to device */ 346 - memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE); 347 - gm12u320->cmd_buf[8] = block_size & 0xff; 348 - gm12u320->cmd_buf[9] = block_size >> 8; 349 - gm12u320->cmd_buf[20] = 0xfc - block * 4; 350 - gm12u320->cmd_buf[21] = block | (frame << 7); 360 + /* Send data command to device */ 361 + memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE); 362 + gm12u320->cmd_buf[8] = block_size & 0xff; 363 + gm12u320->cmd_buf[9] = block_size >> 8; 364 + gm12u320->cmd_buf[20] = 0xfc - block * 4; 365 + gm12u320->cmd_buf[21] = 366 + block | (gm12u320->fb_update.frame << 7); 351 367 352 - ret = usb_bulk_msg(gm12u320->udev, 353 - usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), 354 - gm12u320->cmd_buf, CMD_SIZE, &len, 355 - CMD_TIMEOUT); 356 - if (ret || len != CMD_SIZE) 357 - goto err; 358 - 359 - /* Send data block to device */ 360 - ret = usb_bulk_msg(gm12u320->udev, 361 - usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), 362 - gm12u320->data_buf[block], block_size, 363 - &len, DATA_TIMEOUT); 364 - if (ret || len != block_size) 365 - goto err; 366 - 367 - /* Read status */ 368 - ret = usb_bulk_msg(gm12u320->udev, 369 - usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT), 370 - gm12u320->cmd_buf, READ_STATUS_SIZE, &len, 371 - CMD_TIMEOUT); 372 - if (ret || len != READ_STATUS_SIZE) 373 - goto err; 374 - } 375 - 376 - /* Send draw command to device */ 377 - memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE); 378 368 ret = usb_bulk_msg(gm12u320->udev, 379 369 usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), 380 - gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); 370 + gm12u320->cmd_buf, CMD_SIZE, &len, 371 + CMD_TIMEOUT); 381 372 if (ret || len != CMD_SIZE) 373 + goto err; 374 + 375 + /* Send data block to device */ 376 + ret = usb_bulk_msg(gm12u320->udev, 377 + usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), 378 + gm12u320->data_buf[block], block_size, 379 + &len, DATA_TIMEOUT); 380 + if (ret || len != block_size) 382 381 goto err; 383 382 384 383 /* Read status */ 385 384 ret = usb_bulk_msg(gm12u320->udev, 386 385 usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT), 387 386 gm12u320->cmd_buf, READ_STATUS_SIZE, &len, 388 - draw_status_timeout); 387 + CMD_TIMEOUT); 389 388 if (ret || len != READ_STATUS_SIZE) 390 389 goto err; 391 - 392 - draw_status_timeout = CMD_TIMEOUT; 393 - frame = !frame; 394 - 395 - /* 396 - * We must draw a frame every 2s otherwise the projector 397 - * switches back to showing its logo. 398 - */ 399 - wait_event_timeout(gm12u320->fb_update.waitq, 400 - !gm12u320->fb_update.run || 401 - gm12u320->fb_update.fb != NULL, 402 - IDLE_TIMEOUT); 403 390 } 391 + 392 + /* Send draw command to device */ 393 + memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE); 394 + ret = usb_bulk_msg(gm12u320->udev, 395 + usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), 396 + gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); 397 + if (ret || len != CMD_SIZE) 398 + goto err; 399 + 400 + /* Read status */ 401 + ret = usb_bulk_msg(gm12u320->udev, 402 + usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT), 403 + gm12u320->cmd_buf, READ_STATUS_SIZE, &len, 404 + gm12u320->fb_update.draw_status_timeout); 405 + if (ret || len != READ_STATUS_SIZE) 406 + goto err; 407 + 408 + gm12u320->fb_update.draw_status_timeout = CMD_TIMEOUT; 409 + gm12u320->fb_update.frame = !gm12u320->fb_update.frame; 410 + 411 + /* 412 + * We must draw a frame every 2s otherwise the projector 413 + * switches back to showing its logo. 414 + */ 415 + queue_delayed_work(system_long_wq, &gm12u320->fb_update.work, 416 + IDLE_TIMEOUT); 417 + 404 418 return; 405 419 err: 406 420 /* Do not log errors caused by module unload or device unplug */ ··· 432 452 mutex_unlock(&gm12u320->fb_update.lock); 433 453 434 454 if (wakeup) 435 - wake_up(&gm12u320->fb_update.waitq); 455 + mod_delayed_work(system_long_wq, &gm12u320->fb_update.work, 0); 436 456 437 457 if (old_fb) 438 458 drm_framebuffer_put(old_fb); 439 459 } 440 460 441 - static void gm12u320_start_fb_update(struct gm12u320_device *gm12u320) 442 - { 443 - mutex_lock(&gm12u320->fb_update.lock); 444 - gm12u320->fb_update.run = true; 445 - mutex_unlock(&gm12u320->fb_update.lock); 446 - 447 - queue_work(gm12u320->fb_update.workq, &gm12u320->fb_update.work); 448 - } 449 - 450 461 static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320) 451 462 { 452 - mutex_lock(&gm12u320->fb_update.lock); 453 - gm12u320->fb_update.run = false; 454 - mutex_unlock(&gm12u320->fb_update.lock); 463 + struct drm_framebuffer *old_fb; 455 464 456 - wake_up(&gm12u320->fb_update.waitq); 457 - cancel_work_sync(&gm12u320->fb_update.work); 465 + cancel_delayed_work_sync(&gm12u320->fb_update.work); 458 466 459 467 mutex_lock(&gm12u320->fb_update.lock); 460 - if (gm12u320->fb_update.fb) { 461 - drm_framebuffer_put(gm12u320->fb_update.fb); 462 - gm12u320->fb_update.fb = NULL; 463 - } 468 + old_fb = gm12u320->fb_update.fb; 469 + gm12u320->fb_update.fb = NULL; 464 470 mutex_unlock(&gm12u320->fb_update.lock); 471 + 472 + drm_framebuffer_put(old_fb); 465 473 } 466 474 467 475 static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320) ··· 557 589 struct drm_crtc_state *crtc_state, 558 590 struct drm_plane_state *plane_state) 559 591 { 560 - struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private; 561 592 struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT }; 593 + struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private; 562 594 595 + gm12u320->fb_update.draw_status_timeout = FIRST_FRAME_TIMEOUT; 563 596 gm12u320_fb_mark_dirty(plane_state->fb, &rect); 564 - gm12u320_start_fb_update(gm12u320); 565 - gm12u320->pipe_enabled = true; 566 597 } 567 598 568 599 static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe) ··· 569 602 struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private; 570 603 571 604 gm12u320_stop_fb_update(gm12u320); 572 - gm12u320->pipe_enabled = false; 573 605 } 574 606 575 607 static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe, ··· 596 630 DRM_FORMAT_MOD_INVALID 597 631 }; 598 632 599 - static void gm12u320_driver_release(struct drm_device *dev) 600 - { 601 - struct gm12u320_device *gm12u320 = dev->dev_private; 602 - 603 - gm12u320_usb_free(gm12u320); 604 - drm_mode_config_cleanup(dev); 605 - drm_dev_fini(dev); 606 - kfree(gm12u320); 607 - } 608 - 609 633 DEFINE_DRM_GEM_FOPS(gm12u320_fops); 610 634 611 635 static struct drm_driver gm12u320_drm_driver = { ··· 607 651 .major = DRIVER_MAJOR, 608 652 .minor = DRIVER_MINOR, 609 653 610 - .release = gm12u320_driver_release, 611 654 .fops = &gm12u320_fops, 612 655 DRM_GEM_SHMEM_DRIVER_OPS, 613 656 }; ··· 636 681 return -ENOMEM; 637 682 638 683 gm12u320->udev = interface_to_usbdev(interface); 639 - INIT_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); 684 + INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); 640 685 mutex_init(&gm12u320->fb_update.lock); 641 - init_waitqueue_head(&gm12u320->fb_update.waitq); 642 686 643 687 dev = &gm12u320->dev; 644 - ret = drm_dev_init(dev, &gm12u320_drm_driver, &interface->dev); 688 + ret = devm_drm_dev_init(&interface->dev, dev, &gm12u320_drm_driver); 645 689 if (ret) { 646 690 kfree(gm12u320); 647 691 return ret; 648 692 } 649 693 dev->dev_private = gm12u320; 694 + drmm_add_final_kfree(dev, gm12u320); 650 695 651 - drm_mode_config_init(dev); 696 + ret = drmm_mode_config_init(dev); 697 + if (ret) 698 + return ret; 699 + 652 700 dev->mode_config.min_width = GM12U320_USER_WIDTH; 653 701 dev->mode_config.max_width = GM12U320_USER_WIDTH; 654 702 dev->mode_config.min_height = GM12U320_HEIGHT; ··· 660 702 661 703 ret = gm12u320_usb_alloc(gm12u320); 662 704 if (ret) 663 - goto err_put; 705 + return ret; 664 706 665 707 ret = gm12u320_set_ecomode(gm12u320); 666 708 if (ret) 667 - goto err_put; 709 + return ret; 668 710 669 711 ret = gm12u320_conn_init(gm12u320); 670 712 if (ret) 671 - goto err_put; 713 + return ret; 672 714 673 715 ret = drm_simple_display_pipe_init(&gm12u320->dev, 674 716 &gm12u320->pipe, ··· 678 720 gm12u320_pipe_modifiers, 679 721 &gm12u320->conn); 680 722 if (ret) 681 - goto err_put; 723 + return ret; 682 724 683 725 drm_mode_config_reset(dev); 684 726 685 727 usb_set_intfdata(interface, dev); 686 728 ret = drm_dev_register(dev, 0); 687 729 if (ret) 688 - goto err_put; 730 + return ret; 689 731 690 732 drm_fbdev_generic_setup(dev, 0); 691 733 692 734 return 0; 693 - 694 - err_put: 695 - drm_dev_put(dev); 696 - return ret; 697 735 } 698 736 699 737 static void gm12u320_usb_disconnect(struct usb_interface *interface) 700 738 { 701 739 struct drm_device *dev = usb_get_intfdata(interface); 702 - struct gm12u320_device *gm12u320 = dev->dev_private; 703 740 704 - gm12u320_stop_fb_update(gm12u320); 705 741 drm_dev_unplug(dev); 706 - drm_dev_put(dev); 742 + drm_atomic_helper_shutdown(dev); 707 743 } 708 744 709 745 static __maybe_unused int gm12u320_suspend(struct usb_interface *interface, 710 746 pm_message_t message) 711 747 { 712 748 struct drm_device *dev = usb_get_intfdata(interface); 713 - struct gm12u320_device *gm12u320 = dev->dev_private; 714 749 715 - if (gm12u320->pipe_enabled) 716 - gm12u320_stop_fb_update(gm12u320); 717 - 718 - return 0; 750 + return drm_mode_config_helper_suspend(dev); 719 751 } 720 752 721 753 static __maybe_unused int gm12u320_resume(struct usb_interface *interface) ··· 714 766 struct gm12u320_device *gm12u320 = dev->dev_private; 715 767 716 768 gm12u320_set_ecomode(gm12u320); 717 - if (gm12u320->pipe_enabled) 718 - gm12u320_start_fb_update(gm12u320); 719 769 720 - return 0; 770 + return drm_mode_config_helper_resume(dev); 721 771 } 722 772 723 773 static const struct usb_device_id id_table[] = {
+2 -3
drivers/gpu/drm/tiny/hx8357d.c
··· 21 21 #include <drm/drm_fb_helper.h> 22 22 #include <drm/drm_gem_cma_helper.h> 23 23 #include <drm/drm_gem_framebuffer_helper.h> 24 + #include <drm/drm_managed.h> 24 25 #include <drm/drm_mipi_dbi.h> 25 26 #include <drm/drm_modeset_helper.h> 26 27 #include <video/mipi_display.h> ··· 196 195 static struct drm_driver hx8357d_driver = { 197 196 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 198 197 .fops = &hx8357d_fops, 199 - .release = mipi_dbi_release, 200 198 DRM_GEM_CMA_VMAP_DRIVER_OPS, 201 199 .debugfs_init = mipi_dbi_debugfs_init, 202 200 .name = "hx8357d", ··· 236 236 kfree(dbidev); 237 237 return ret; 238 238 } 239 - 240 - drm_mode_config_init(drm); 239 + drmm_add_final_kfree(drm, dbidev); 241 240 242 241 dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW); 243 242 if (IS_ERR(dc)) {
+2 -3
drivers/gpu/drm/tiny/ili9225.c
··· 24 24 #include <drm/drm_fourcc.h> 25 25 #include <drm/drm_gem_cma_helper.h> 26 26 #include <drm/drm_gem_framebuffer_helper.h> 27 + #include <drm/drm_managed.h> 27 28 #include <drm/drm_mipi_dbi.h> 28 29 #include <drm/drm_rect.h> 29 30 ··· 346 345 static struct drm_driver ili9225_driver = { 347 346 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 348 347 .fops = &ili9225_fops, 349 - .release = mipi_dbi_release, 350 348 DRM_GEM_CMA_VMAP_DRIVER_OPS, 351 349 .name = "ili9225", 352 350 .desc = "Ilitek ILI9225", ··· 387 387 kfree(dbidev); 388 388 return ret; 389 389 } 390 - 391 - drm_mode_config_init(drm); 390 + drmm_add_final_kfree(drm, dbidev); 392 391 393 392 dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 394 393 if (IS_ERR(dbi->reset)) {
+2 -3
drivers/gpu/drm/tiny/ili9341.c
··· 20 20 #include <drm/drm_fb_helper.h> 21 21 #include <drm/drm_gem_cma_helper.h> 22 22 #include <drm/drm_gem_framebuffer_helper.h> 23 + #include <drm/drm_managed.h> 23 24 #include <drm/drm_mipi_dbi.h> 24 25 #include <drm/drm_modeset_helper.h> 25 26 #include <video/mipi_display.h> ··· 152 151 static struct drm_driver ili9341_driver = { 153 152 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 154 153 .fops = &ili9341_fops, 155 - .release = mipi_dbi_release, 156 154 DRM_GEM_CMA_VMAP_DRIVER_OPS, 157 155 .debugfs_init = mipi_dbi_debugfs_init, 158 156 .name = "ili9341", ··· 194 194 kfree(dbidev); 195 195 return ret; 196 196 } 197 - 198 - drm_mode_config_init(drm); 197 + drmm_add_final_kfree(drm, dbidev); 199 198 200 199 dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 201 200 if (IS_ERR(dbi->reset)) {
+2 -3
drivers/gpu/drm/tiny/ili9486.c
··· 19 19 #include <drm/drm_fb_helper.h> 20 20 #include <drm/drm_gem_cma_helper.h> 21 21 #include <drm/drm_gem_framebuffer_helper.h> 22 + #include <drm/drm_managed.h> 22 23 #include <drm/drm_mipi_dbi.h> 23 24 #include <drm/drm_modeset_helper.h> 24 25 ··· 165 164 static struct drm_driver ili9486_driver = { 166 165 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 167 166 .fops = &ili9486_fops, 168 - .release = mipi_dbi_release, 169 167 DRM_GEM_CMA_VMAP_DRIVER_OPS, 170 168 .debugfs_init = mipi_dbi_debugfs_init, 171 169 .name = "ili9486", ··· 208 208 kfree(dbidev); 209 209 return ret; 210 210 } 211 - 212 - drm_mode_config_init(drm); 211 + drmm_add_final_kfree(drm, dbidev); 213 212 214 213 dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 215 214 if (IS_ERR(dbi->reset)) {
+2 -3
drivers/gpu/drm/tiny/mi0283qt.c
··· 18 18 #include <drm/drm_fb_helper.h> 19 19 #include <drm/drm_gem_cma_helper.h> 20 20 #include <drm/drm_gem_framebuffer_helper.h> 21 + #include <drm/drm_managed.h> 21 22 #include <drm/drm_mipi_dbi.h> 22 23 #include <drm/drm_modeset_helper.h> 23 24 #include <video/mipi_display.h> ··· 156 155 static struct drm_driver mi0283qt_driver = { 157 156 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 158 157 .fops = &mi0283qt_fops, 159 - .release = mipi_dbi_release, 160 158 DRM_GEM_CMA_VMAP_DRIVER_OPS, 161 159 .debugfs_init = mipi_dbi_debugfs_init, 162 160 .name = "mi0283qt", ··· 198 198 kfree(dbidev); 199 199 return ret; 200 200 } 201 - 202 - drm_mode_config_init(drm); 201 + drmm_add_final_kfree(drm, dbidev); 203 202 204 203 dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 205 204 if (IS_ERR(dbi->reset)) {
+5 -13
drivers/gpu/drm/tiny/repaper.c
··· 31 31 #include <drm/drm_format_helper.h> 32 32 #include <drm/drm_gem_cma_helper.h> 33 33 #include <drm/drm_gem_framebuffer_helper.h> 34 + #include <drm/drm_managed.h> 34 35 #include <drm/drm_modes.h> 35 36 #include <drm/drm_rect.h> 36 37 #include <drm/drm_probe_helper.h> ··· 909 908 .atomic_commit = drm_atomic_helper_commit, 910 909 }; 911 910 912 - static void repaper_release(struct drm_device *drm) 913 - { 914 - struct repaper_epd *epd = drm_to_epd(drm); 915 - 916 - DRM_DEBUG_DRIVER("\n"); 917 - 918 - drm_mode_config_cleanup(drm); 919 - drm_dev_fini(drm); 920 - kfree(epd); 921 - } 922 - 923 911 static const uint32_t repaper_formats[] = { 924 912 DRM_FORMAT_XRGB8888, 925 913 }; ··· 946 956 static struct drm_driver repaper_driver = { 947 957 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 948 958 .fops = &repaper_fops, 949 - .release = repaper_release, 950 959 DRM_GEM_CMA_VMAP_DRIVER_OPS, 951 960 .name = "repaper", 952 961 .desc = "Pervasive Displays RePaper e-ink panels", ··· 1013 1024 kfree(epd); 1014 1025 return ret; 1015 1026 } 1027 + drmm_add_final_kfree(drm, epd); 1016 1028 1017 - drm_mode_config_init(drm); 1029 + ret = drmm_mode_config_init(drm); 1030 + if (ret) 1031 + return ret; 1018 1032 drm->mode_config.funcs = &repaper_mode_config_funcs; 1019 1033 1020 1034 epd->spi = spi;
+2 -3
drivers/gpu/drm/tiny/st7586.c
··· 21 21 #include <drm/drm_format_helper.h> 22 22 #include <drm/drm_gem_cma_helper.h> 23 23 #include <drm/drm_gem_framebuffer_helper.h> 24 + #include <drm/drm_managed.h> 24 25 #include <drm/drm_mipi_dbi.h> 25 26 #include <drm/drm_rect.h> 26 27 ··· 285 284 static struct drm_driver st7586_driver = { 286 285 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 287 286 .fops = &st7586_fops, 288 - .release = mipi_dbi_release, 289 287 DRM_GEM_CMA_VMAP_DRIVER_OPS, 290 288 .debugfs_init = mipi_dbi_debugfs_init, 291 289 .name = "st7586", ··· 328 328 kfree(dbidev); 329 329 return ret; 330 330 } 331 - 332 - drm_mode_config_init(drm); 331 + drmm_add_final_kfree(drm, dbidev); 333 332 334 333 bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay; 335 334
+2 -3
drivers/gpu/drm/tiny/st7735r.c
··· 21 21 #include <drm/drm_fb_helper.h> 22 22 #include <drm/drm_gem_cma_helper.h> 23 23 #include <drm/drm_gem_framebuffer_helper.h> 24 + #include <drm/drm_managed.h> 24 25 #include <drm/drm_mipi_dbi.h> 25 26 26 27 #define ST7735R_FRMCTR1 0xb1 ··· 157 156 static struct drm_driver st7735r_driver = { 158 157 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 159 158 .fops = &st7735r_fops, 160 - .release = mipi_dbi_release, 161 159 DRM_GEM_CMA_VMAP_DRIVER_OPS, 162 160 .debugfs_init = mipi_dbi_debugfs_init, 163 161 .name = "st7735r", ··· 209 209 kfree(dbidev); 210 210 return ret; 211 211 } 212 - 213 - drm_mode_config_init(drm); 212 + drmm_add_final_kfree(drm, dbidev); 214 213 215 214 dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 216 215 if (IS_ERR(dbi->reset)) {
+3 -1
drivers/gpu/drm/ttm/ttm_bo.c
··· 588 588 ttm_mem_io_unlock(man); 589 589 } 590 590 591 - if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) { 591 + if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || 592 + !dma_resv_trylock(bo->base.resv)) { 592 593 /* The BO is not idle, resurrect it for delayed destroy */ 593 594 ttm_bo_flush_all_fences(bo); 594 595 bo->deleted = true; ··· 622 621 spin_unlock(&ttm_bo_glob.lru_lock); 623 622 624 623 ttm_bo_cleanup_memtype_use(bo); 624 + dma_resv_unlock(bo->base.resv); 625 625 626 626 BUG_ON(bo->mem.mm_node != NULL); 627 627 atomic_dec(&ttm_bo_glob.bo_count);
+5 -17
drivers/gpu/drm/udl/udl_drv.c
··· 10 10 #include <drm/drm_fb_helper.h> 11 11 #include <drm/drm_file.h> 12 12 #include <drm/drm_gem_shmem_helper.h> 13 + #include <drm/drm_managed.h> 13 14 #include <drm/drm_ioctl.h> 14 15 #include <drm/drm_probe_helper.h> 15 16 #include <drm/drm_print.h> ··· 34 33 35 34 DEFINE_DRM_GEM_FOPS(udl_driver_fops); 36 35 37 - static void udl_driver_release(struct drm_device *dev) 38 - { 39 - udl_fini(dev); 40 - udl_modeset_cleanup(dev); 41 - drm_dev_fini(dev); 42 - kfree(dev); 43 - } 44 - 45 36 static struct drm_driver driver = { 46 37 .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, 47 - .release = udl_driver_release, 48 38 49 39 /* gem hooks */ 50 40 .gem_create_object = udl_driver_gem_create_object, ··· 69 77 70 78 udl->udev = udev; 71 79 udl->drm.dev_private = udl; 80 + drmm_add_final_kfree(&udl->drm, udl); 72 81 73 82 r = udl_init(udl); 74 83 if (r) { 75 - drm_dev_fini(&udl->drm); 76 - kfree(udl); 84 + drm_dev_put(&udl->drm); 77 85 return ERR_PTR(r); 78 86 } 79 87 ··· 97 105 98 106 DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index); 99 107 100 - r = drm_fbdev_generic_setup(&udl->drm, 0); 101 - if (r) 102 - goto err_drm_dev_unregister; 108 + drm_fbdev_generic_setup(&udl->drm, 0); 103 109 104 110 return 0; 105 111 106 - err_drm_dev_unregister: 107 - drm_dev_unregister(&udl->drm); 108 112 err_free: 109 113 drm_dev_put(&udl->drm); 110 114 return r; ··· 110 122 { 111 123 struct drm_device *dev = usb_get_intfdata(interface); 112 124 113 - drm_kms_helper_poll_disable(dev); 125 + drm_kms_helper_poll_fini(dev); 114 126 udl_drop_usb(dev); 115 127 drm_dev_unplug(dev); 116 128 drm_dev_put(dev);
-2
drivers/gpu/drm/udl/udl_drv.h
··· 68 68 69 69 /* modeset */ 70 70 int udl_modeset_init(struct drm_device *dev); 71 - void udl_modeset_cleanup(struct drm_device *dev); 72 71 struct drm_connector *udl_connector_init(struct drm_device *dev); 73 72 74 73 struct urb *udl_get_urb(struct drm_device *dev); ··· 76 77 void udl_urb_completion(struct urb *urb); 77 78 78 79 int udl_init(struct udl_device *udl); 79 - void udl_fini(struct drm_device *dev); 80 80 81 81 int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr, 82 82 const char *front, char **urb_buf_ptr,
-10
drivers/gpu/drm/udl/udl_main.c
··· 351 351 udl_free_urb_list(dev); 352 352 return 0; 353 353 } 354 - 355 - void udl_fini(struct drm_device *dev) 356 - { 357 - struct udl_device *udl = to_udl(dev); 358 - 359 - drm_kms_helper_poll_fini(dev); 360 - 361 - if (udl->urbs.count) 362 - udl_free_urb_list(dev); 363 - }
+6 -15
drivers/gpu/drm/udl/udl_modeset.c
··· 468 468 struct drm_connector *connector; 469 469 int ret; 470 470 471 - drm_mode_config_init(dev); 471 + ret = drmm_mode_config_init(dev); 472 + if (ret) 473 + return ret; 472 474 473 475 dev->mode_config.min_width = 640; 474 476 dev->mode_config.min_height = 480; ··· 484 482 dev->mode_config.funcs = &udl_mode_funcs; 485 483 486 484 connector = udl_connector_init(dev); 487 - if (IS_ERR(connector)) { 488 - ret = PTR_ERR(connector); 489 - goto err_drm_mode_config_cleanup; 490 - } 485 + if (IS_ERR(connector)) 486 + return PTR_ERR(connector); 491 487 492 488 format_count = ARRAY_SIZE(udl_simple_display_pipe_formats); 493 489 ··· 494 494 udl_simple_display_pipe_formats, 495 495 format_count, NULL, connector); 496 496 if (ret) 497 - goto err_drm_mode_config_cleanup; 497 + return ret; 498 498 499 499 drm_mode_config_reset(dev); 500 500 501 501 return 0; 502 - 503 - err_drm_mode_config_cleanup: 504 - drm_mode_config_cleanup(dev); 505 - return ret; 506 - } 507 - 508 - void udl_modeset_cleanup(struct drm_device *dev) 509 - { 510 - drm_mode_config_cleanup(dev); 511 502 }
+4 -4
drivers/gpu/drm/v3d/v3d_debugfs.c
··· 258 258 {"bo_stats", v3d_debugfs_bo_stats, 0}, 259 259 }; 260 260 261 - int 261 + void 262 262 v3d_debugfs_init(struct drm_minor *minor) 263 263 { 264 - return drm_debugfs_create_files(v3d_debugfs_list, 265 - ARRAY_SIZE(v3d_debugfs_list), 266 - minor->debugfs_root, minor); 264 + drm_debugfs_create_files(v3d_debugfs_list, 265 + ARRAY_SIZE(v3d_debugfs_list), 266 + minor->debugfs_root, minor); 267 267 }
+20 -18
drivers/gpu/drm/v3d/v3d_drv.c
··· 25 25 #include <drm/drm_drv.h> 26 26 #include <drm/drm_fb_cma_helper.h> 27 27 #include <drm/drm_fb_helper.h> 28 + #include <drm/drm_managed.h> 28 29 #include <uapi/drm/v3d_drm.h> 29 30 30 31 #include "v3d_drv.h" ··· 258 257 v3d->pdev = pdev; 259 258 drm = &v3d->drm; 260 259 260 + ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev); 261 + if (ret) { 262 + kfree(v3d); 263 + return ret; 264 + } 265 + 266 + platform_set_drvdata(pdev, drm); 267 + drm->dev_private = v3d; 268 + drmm_add_final_kfree(drm, v3d); 269 + 261 270 ret = map_regs(v3d, &v3d->hub_regs, "hub"); 262 271 if (ret) 263 - goto dev_free; 272 + goto dev_destroy; 264 273 265 274 ret = map_regs(v3d, &v3d->core_regs[0], "core0"); 266 275 if (ret) 267 - goto dev_free; 276 + goto dev_destroy; 268 277 269 278 mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); 270 279 dev->coherent_dma_mask = ··· 292 281 ret = PTR_ERR(v3d->reset); 293 282 294 283 if (ret == -EPROBE_DEFER) 295 - goto dev_free; 284 + goto dev_destroy; 296 285 297 286 v3d->reset = NULL; 298 287 ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); 299 288 if (ret) { 300 289 dev_err(dev, 301 290 "Failed to get reset control or bridge regs\n"); 302 - goto dev_free; 291 + goto dev_destroy; 303 292 } 304 293 } 305 294 306 295 if (v3d->ver < 41) { 307 296 ret = map_regs(v3d, &v3d->gca_regs, "gca"); 308 297 if (ret) 309 - goto dev_free; 298 + goto dev_destroy; 310 299 } 311 300 312 301 v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr, ··· 314 303 if (!v3d->mmu_scratch) { 315 304 dev_err(dev, "Failed to allocate MMU scratch page\n"); 316 305 ret = -ENOMEM; 317 - goto dev_free; 306 + goto dev_destroy; 318 307 } 319 308 320 309 pm_runtime_use_autosuspend(dev); 321 310 pm_runtime_set_autosuspend_delay(dev, 50); 322 311 pm_runtime_enable(dev); 323 312 324 - ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev); 325 - if (ret) 326 - goto dma_free; 327 - 328 - platform_set_drvdata(pdev, drm); 329 - drm->dev_private = v3d; 330 - 331 313 ret = v3d_gem_init(drm); 332 314 if (ret) 333 - goto dev_destroy; 315 + goto dma_free; 334 316 335 317 ret = v3d_irq_init(v3d); 336 318 if (ret) ··· 339 335 v3d_irq_disable(v3d); 340 336 gem_destroy: 341 337 v3d_gem_destroy(drm); 342 - dev_destroy: 343 - drm_dev_put(drm); 344 338 dma_free: 345 339 dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); 346 - dev_free: 347 - kfree(v3d); 340 + dev_destroy: 341 + drm_dev_put(drm); 348 342 return ret; 349 343 } 350 344
+1 -1
drivers/gpu/drm/v3d/v3d_drv.h
··· 316 316 struct sg_table *sgt); 317 317 318 318 /* v3d_debugfs.c */ 319 - int v3d_debugfs_init(struct drm_minor *minor); 319 + void v3d_debugfs_init(struct drm_minor *minor); 320 320 321 321 /* v3d_fence.c */ 322 322 extern const struct dma_fence_ops v3d_fence_ops;
+4 -4
drivers/gpu/drm/vboxvideo/vbox_drv.c
··· 17 17 #include <drm/drm_fb_helper.h> 18 18 #include <drm/drm_file.h> 19 19 #include <drm/drm_ioctl.h> 20 + #include <drm/drm_managed.h> 20 21 21 22 #include "vbox_drv.h" 22 23 ··· 59 58 vbox->ddev.pdev = pdev; 60 59 vbox->ddev.dev_private = vbox; 61 60 pci_set_drvdata(pdev, vbox); 61 + drmm_add_final_kfree(&vbox->ddev, vbox); 62 62 mutex_init(&vbox->hw_mutex); 63 63 64 64 ret = pci_enable_device(pdev); ··· 82 80 if (ret) 83 81 goto err_mode_fini; 84 82 85 - ret = drm_fbdev_generic_setup(&vbox->ddev, 32); 86 - if (ret) 87 - goto err_irq_fini; 88 - 89 83 ret = drm_dev_register(&vbox->ddev, 0); 90 84 if (ret) 91 85 goto err_irq_fini; 86 + 87 + drm_fbdev_generic_setup(&vbox->ddev, 32); 92 88 93 89 return 0; 94 90
+3 -8
drivers/gpu/drm/vc4/vc4_debugfs.c
··· 20 20 * Called at drm_dev_register() time on each of the minors registered 21 21 * by the DRM device, to attach the debugfs files. 22 22 */ 23 - int 23 + void 24 24 vc4_debugfs_init(struct drm_minor *minor) 25 25 { 26 26 struct vc4_dev *vc4 = to_vc4_dev(minor->dev); ··· 30 30 minor->debugfs_root, &vc4->load_tracker_enabled); 31 31 32 32 list_for_each_entry(entry, &vc4->debugfs_list, link) { 33 - int ret = drm_debugfs_create_files(&entry->info, 1, 34 - minor->debugfs_root, minor); 35 - 36 - if (ret) 37 - return ret; 33 + drm_debugfs_create_files(&entry->info, 1, 34 + minor->debugfs_root, minor); 38 35 } 39 - 40 - return 0; 41 36 } 42 37 43 38 static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
+2 -6
drivers/gpu/drm/vc4/vc4_dpi.c
··· 17 17 #include <drm/drm_of.h> 18 18 #include <drm/drm_panel.h> 19 19 #include <drm/drm_probe_helper.h> 20 + #include <drm/drm_simple_kms_helper.h> 20 21 #include <linux/clk.h> 21 22 #include <linux/component.h> 22 23 #include <linux/of_graph.h> ··· 113 112 static const struct debugfs_reg32 dpi_regs[] = { 114 113 VC4_REG32(DPI_C), 115 114 VC4_REG32(DPI_ID), 116 - }; 117 - 118 - static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = { 119 - .destroy = drm_encoder_cleanup, 120 115 }; 121 116 122 117 static void vc4_dpi_encoder_disable(struct drm_encoder *encoder) ··· 306 309 if (ret) 307 310 DRM_ERROR("Failed to turn on core clock: %d\n", ret); 308 311 309 - drm_encoder_init(drm, dpi->encoder, &vc4_dpi_encoder_funcs, 310 - DRM_MODE_ENCODER_DPI, NULL); 312 + drm_simple_encoder_init(drm, dpi->encoder, DRM_MODE_ENCODER_DPI); 311 313 drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs); 312 314 313 315 ret = vc4_dpi_init_bridge(dpi);
+1 -1
drivers/gpu/drm/vc4/vc4_drv.h
··· 759 759 unsigned int *top, unsigned int *bottom); 760 760 761 761 /* vc4_debugfs.c */ 762 - int vc4_debugfs_init(struct drm_minor *minor); 762 + void vc4_debugfs_init(struct drm_minor *minor); 763 763 #ifdef CONFIG_DEBUG_FS 764 764 void vc4_debugfs_add_file(struct drm_device *drm, 765 765 const char *filename,
+3 -12
drivers/gpu/drm/vc4/vc4_dsi.c
··· 37 37 #include <drm/drm_of.h> 38 38 #include <drm/drm_panel.h> 39 39 #include <drm/drm_probe_helper.h> 40 + #include <drm/drm_simple_kms_helper.h> 40 41 41 42 #include "vc4_drv.h" 42 43 #include "vc4_regs.h" ··· 651 650 VC4_REG32(DSI1_PHY_AFEC0), 652 651 VC4_REG32(DSI1_PHY_AFEC1), 653 652 VC4_REG32(DSI1_ID), 654 - }; 655 - 656 - static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder) 657 - { 658 - drm_encoder_cleanup(encoder); 659 - } 660 - 661 - static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = { 662 - .destroy = vc4_dsi_encoder_destroy, 663 653 }; 664 654 665 655 static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch) ··· 1607 1615 if (dsi->port == 1) 1608 1616 vc4->dsi1 = dsi; 1609 1617 1610 - drm_encoder_init(drm, dsi->encoder, &vc4_dsi_encoder_funcs, 1611 - DRM_MODE_ENCODER_DSI, NULL); 1618 + drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI); 1612 1619 drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs); 1613 1620 1614 1621 ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0); ··· 1647 1656 * normally. 1648 1657 */ 1649 1658 list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain); 1650 - vc4_dsi_encoder_destroy(dsi->encoder); 1659 + drm_encoder_cleanup(dsi->encoder); 1651 1660 1652 1661 if (dsi->port == 1) 1653 1662 vc4->dsi1 = NULL;
+4 -13
drivers/gpu/drm/vc4/vc4_hdmi.c
··· 34 34 #include <drm/drm_atomic_helper.h> 35 35 #include <drm/drm_edid.h> 36 36 #include <drm/drm_probe_helper.h> 37 + #include <drm/drm_simple_kms_helper.h> 37 38 #include <linux/clk.h> 38 39 #include <linux/component.h> 39 40 #include <linux/i2c.h> ··· 306 305 307 306 return connector; 308 307 } 309 - 310 - static void vc4_hdmi_encoder_destroy(struct drm_encoder *encoder) 311 - { 312 - drm_encoder_cleanup(encoder); 313 - } 314 - 315 - static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = { 316 - .destroy = vc4_hdmi_encoder_destroy, 317 - }; 318 308 319 309 static int vc4_hdmi_stop_packet(struct drm_encoder *encoder, 320 310 enum hdmi_infoframe_type type) ··· 1398 1406 } 1399 1407 pm_runtime_enable(dev); 1400 1408 1401 - drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs, 1402 - DRM_MODE_ENCODER_TMDS, NULL); 1409 + drm_simple_encoder_init(drm, hdmi->encoder, DRM_MODE_ENCODER_TMDS); 1403 1410 drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs); 1404 1411 1405 1412 hdmi->connector = ··· 1456 1465 vc4_hdmi_connector_destroy(hdmi->connector); 1457 1466 #endif 1458 1467 err_destroy_encoder: 1459 - vc4_hdmi_encoder_destroy(hdmi->encoder); 1468 + drm_encoder_cleanup(hdmi->encoder); 1460 1469 err_unprepare_hsm: 1461 1470 clk_disable_unprepare(hdmi->hsm_clock); 1462 1471 pm_runtime_disable(dev); ··· 1475 1484 1476 1485 cec_unregister_adapter(hdmi->cec_adap); 1477 1486 vc4_hdmi_connector_destroy(hdmi->connector); 1478 - vc4_hdmi_encoder_destroy(hdmi->encoder); 1487 + drm_encoder_cleanup(hdmi->encoder); 1479 1488 1480 1489 clk_disable_unprepare(hdmi->hsm_clock); 1481 1490 pm_runtime_disable(dev);
+2 -6
drivers/gpu/drm/vc4/vc4_vec.c
··· 17 17 #include <drm/drm_edid.h> 18 18 #include <drm/drm_panel.h> 19 19 #include <drm/drm_probe_helper.h> 20 + #include <drm/drm_simple_kms_helper.h> 20 21 #include <linux/clk.h> 21 22 #include <linux/component.h> 22 23 #include <linux/of_graph.h> ··· 375 374 return connector; 376 375 } 377 376 378 - static const struct drm_encoder_funcs vc4_vec_encoder_funcs = { 379 - .destroy = drm_encoder_cleanup, 380 - }; 381 - 382 377 static void vc4_vec_encoder_disable(struct drm_encoder *encoder) 383 378 { 384 379 struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder); ··· 563 566 564 567 pm_runtime_enable(dev); 565 568 566 - drm_encoder_init(drm, vec->encoder, &vc4_vec_encoder_funcs, 567 - DRM_MODE_ENCODER_TVDAC, NULL); 569 + drm_simple_encoder_init(drm, vec->encoder, DRM_MODE_ENCODER_TVDAC); 568 570 drm_encoder_helper_add(vec->encoder, &vc4_vec_encoder_helper_funcs); 569 571 570 572 vec->connector = vc4_vec_connector_init(drm, vec);
+8 -7
drivers/gpu/drm/vgem/vgem_drv.c
··· 39 39 #include <drm/drm_drv.h> 40 40 #include <drm/drm_file.h> 41 41 #include <drm/drm_ioctl.h> 42 + #include <drm/drm_managed.h> 42 43 #include <drm/drm_prime.h> 43 44 44 45 #include "vgem_drv.h" ··· 432 431 struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); 433 432 434 433 platform_device_unregister(vgem->platform); 435 - drm_dev_fini(&vgem->drm); 436 - 437 - kfree(vgem); 438 434 } 439 435 440 436 static struct drm_driver vgem_driver = { ··· 487 489 &vgem_device->platform->dev); 488 490 if (ret) 489 491 goto out_unregister; 492 + drmm_add_final_kfree(&vgem_device->drm, vgem_device); 490 493 491 494 /* Final step: expose the device/driver to userspace */ 492 - ret = drm_dev_register(&vgem_device->drm, 0); 495 + ret = drm_dev_register(&vgem_device->drm, 0); 493 496 if (ret) 494 - goto out_fini; 497 + goto out_put; 495 498 496 499 return 0; 497 500 498 - out_fini: 499 - drm_dev_fini(&vgem_device->drm); 501 + out_put: 502 + drm_dev_put(&vgem_device->drm); 503 + return ret; 504 + 500 505 out_unregister: 501 506 platform_device_unregister(vgem_device->platform); 502 507 out_free:
+1 -2
drivers/gpu/drm/virtio/virtgpu_debugfs.c
··· 72 72 73 73 #define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list) 74 74 75 - int 75 + void 76 76 virtio_gpu_debugfs_init(struct drm_minor *minor) 77 77 { 78 78 drm_debugfs_create_files(virtio_gpu_debugfs_list, 79 79 VIRTIO_GPU_DEBUGFS_ENTRIES, 80 80 minor->debugfs_root, minor); 81 - return 0; 82 81 }
+2 -6
drivers/gpu/drm/virtio/virtgpu_display.c
··· 30 30 #include <drm/drm_fourcc.h> 31 31 #include <drm/drm_gem_framebuffer_helper.h> 32 32 #include <drm/drm_probe_helper.h> 33 + #include <drm/drm_simple_kms_helper.h> 33 34 34 35 #include "virtgpu_drv.h" 35 36 ··· 241 240 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 242 241 }; 243 242 244 - static const struct drm_encoder_funcs virtio_gpu_enc_funcs = { 245 - .destroy = drm_encoder_cleanup, 246 - }; 247 - 248 243 static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) 249 244 { 250 245 struct drm_device *dev = vgdev->ddev; ··· 273 276 if (vgdev->has_edid) 274 277 drm_connector_attach_edid_property(connector); 275 278 276 - drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs, 277 - DRM_MODE_ENCODER_VIRTUAL, NULL); 279 + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); 278 280 drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs); 279 281 encoder->possible_crtcs = 1 << index; 280 282
+14 -22
drivers/gpu/drm/virtio/virtgpu_drv.h
··· 218 218 struct mutex context_lock; 219 219 }; 220 220 221 - /* virtio_ioctl.c */ 221 + /* virtgpu_ioctl.c */ 222 222 #define DRM_VIRTIO_NUM_IOCTLS 10 223 223 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; 224 224 225 - /* virtio_kms.c */ 225 + /* virtgpu_kms.c */ 226 226 int virtio_gpu_init(struct drm_device *dev); 227 227 void virtio_gpu_deinit(struct drm_device *dev); 228 228 void virtio_gpu_release(struct drm_device *dev); 229 229 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file); 230 230 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file); 231 231 232 - /* virtio_gem.c */ 233 - void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj); 234 - int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev); 235 - void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev); 236 - int virtio_gpu_gem_create(struct drm_file *file, 237 - struct drm_device *dev, 238 - struct virtio_gpu_object_params *params, 239 - struct drm_gem_object **obj_p, 240 - uint32_t *handle_p); 232 + /* virtgpu_gem.c */ 241 233 int virtio_gpu_gem_object_open(struct drm_gem_object *obj, 242 234 struct drm_file *file); 243 235 void virtio_gpu_gem_object_close(struct drm_gem_object *obj, ··· 255 263 struct virtio_gpu_object_array *objs); 256 264 void virtio_gpu_array_put_free_work(struct work_struct *work); 257 265 258 - /* virtio vg */ 266 + /* virtgpu_vq.c */ 259 267 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev); 260 268 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev); 261 269 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, ··· 279 287 uint32_t scanout_id, uint32_t resource_id, 280 288 uint32_t width, uint32_t height, 281 289 uint32_t x, uint32_t y); 282 - int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, 283 - struct virtio_gpu_object *obj, 284 - struct virtio_gpu_mem_entry *ents, 285 - unsigned int nents); 290 + void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, 291 + struct virtio_gpu_object *obj, 292 + struct virtio_gpu_mem_entry *ents, 293 + unsigned int nents); 286 294 int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev); 287 295 int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev); 288 296 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, ··· 335 343 336 344 void virtio_gpu_notify(struct virtio_gpu_device *vgdev); 337 345 338 - /* virtio_gpu_display.c */ 346 + /* virtgpu_display.c */ 339 347 void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); 340 348 void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); 341 349 342 - /* virtio_gpu_plane.c */ 350 + /* virtgpu_plane.c */ 343 351 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc); 344 352 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 345 353 enum drm_plane_type type, 346 354 int index); 347 355 348 - /* virtio_gpu_fence.c */ 356 + /* virtgpu_fence.c */ 349 357 struct virtio_gpu_fence *virtio_gpu_fence_alloc( 350 358 struct virtio_gpu_device *vgdev); 351 359 void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, ··· 354 362 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, 355 363 u64 last_seq); 356 364 357 - /* virtio_gpu_object */ 365 + /* virtgpu_object.c */ 358 366 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo); 359 367 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, 360 368 size_t size); ··· 370 378 struct drm_device *dev, struct dma_buf_attachment *attach, 371 379 struct sg_table *sgt); 372 380 373 - /* virgl debugfs */ 374 - int virtio_gpu_debugfs_init(struct drm_minor *minor); 381 + /* virtgpu_debugfs.c */ 382 + void virtio_gpu_debugfs_init(struct drm_minor *minor); 375 383 376 384 #endif
+7 -6
drivers/gpu/drm/virtio/virtgpu_gem.c
··· 28 28 29 29 #include "virtgpu_drv.h" 30 30 31 - int virtio_gpu_gem_create(struct drm_file *file, 32 - struct drm_device *dev, 33 - struct virtio_gpu_object_params *params, 34 - struct drm_gem_object **obj_p, 35 - uint32_t *handle_p) 31 + static int virtio_gpu_gem_create(struct drm_file *file, 32 + struct drm_device *dev, 33 + struct virtio_gpu_object_params *params, 34 + struct drm_gem_object **obj_p, 35 + uint32_t *handle_p) 36 36 { 37 37 struct virtio_gpu_device *vgdev = dev->dev_private; 38 38 struct virtio_gpu_object *obj; ··· 114 114 struct virtio_gpu_object_array *objs; 115 115 116 116 if (!vgdev->has_virgl_3d) 117 - return 0; 117 + goto out_notify; 118 118 119 119 objs = virtio_gpu_array_alloc(1); 120 120 if (!objs) ··· 123 123 124 124 virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, 125 125 objs); 126 + out_notify: 126 127 virtio_gpu_notify(vgdev); 127 128 return 0; 128 129 }
-1
drivers/gpu/drm/virtio/virtgpu_ioctl.c
··· 47 47 get_task_comm(dbgname, current); 48 48 virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, 49 49 strlen(dbgname), dbgname); 50 - virtio_gpu_notify(vgdev); 51 50 vfpriv->context_created = true; 52 51 53 52 out_unlock:
+1 -6
drivers/gpu/drm/virtio/virtgpu_object.c
··· 235 235 return ret; 236 236 } 237 237 238 - ret = virtio_gpu_object_attach(vgdev, bo, ents, nents); 239 - if (ret != 0) { 240 - virtio_gpu_free_object(&shmem_obj->base); 241 - return ret; 242 - } 238 + virtio_gpu_object_attach(vgdev, bo, ents, nents); 243 239 244 - virtio_gpu_notify(vgdev); 245 240 *bo_ptr = bo; 246 241 return 0; 247 242
+4 -5
drivers/gpu/drm/virtio/virtgpu_vq.c
··· 1087 1087 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); 1088 1088 } 1089 1089 1090 - int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, 1091 - struct virtio_gpu_object *obj, 1092 - struct virtio_gpu_mem_entry *ents, 1093 - unsigned int nents) 1090 + void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, 1091 + struct virtio_gpu_object *obj, 1092 + struct virtio_gpu_mem_entry *ents, 1093 + unsigned int nents) 1094 1094 { 1095 1095 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, 1096 1096 ents, nents, NULL); 1097 - return 0; 1098 1097 } 1099 1098 1100 1099 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
+9 -10
drivers/gpu/drm/vkms/vkms_drv.c
··· 21 21 #include <drm/drm_file.h> 22 22 #include <drm/drm_gem_framebuffer_helper.h> 23 23 #include <drm/drm_ioctl.h> 24 + #include <drm/drm_managed.h> 24 25 #include <drm/drm_probe_helper.h> 25 26 #include <drm/drm_vblank.h> 26 27 ··· 64 63 platform_device_unregister(vkms->platform); 65 64 drm_atomic_helper_shutdown(&vkms->drm); 66 65 drm_mode_config_cleanup(&vkms->drm); 67 - drm_dev_fini(&vkms->drm); 68 66 destroy_workqueue(vkms->output.composer_workq); 69 67 } 70 68 ··· 158 158 &vkms_device->platform->dev); 159 159 if (ret) 160 160 goto out_unregister; 161 + drmm_add_final_kfree(&vkms_device->drm, vkms_device); 161 162 162 163 ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev, 163 164 DMA_BIT_MASK(64)); 164 165 165 166 if (ret) { 166 167 DRM_ERROR("Could not initialize DMA support\n"); 167 - goto out_fini; 168 + goto out_put; 168 169 } 169 170 170 171 vkms_device->drm.irq_enabled = true; ··· 173 172 ret = drm_vblank_init(&vkms_device->drm, 1); 174 173 if (ret) { 175 174 DRM_ERROR("Failed to vblank\n"); 176 - goto out_fini; 175 + goto out_put; 177 176 } 178 177 179 178 ret = vkms_modeset_init(vkms_device); 180 179 if (ret) 181 - goto out_fini; 180 + goto out_put; 182 181 183 182 ret = drm_dev_register(&vkms_device->drm, 0); 184 183 if (ret) 185 - goto out_fini; 184 + goto out_put; 186 185 187 186 return 0; 188 187 189 - out_fini: 190 - drm_dev_fini(&vkms_device->drm); 188 + out_put: 189 + drm_dev_put(&vkms_device->drm); 190 + return ret; 191 191 192 192 out_unregister: 193 193 platform_device_unregister(vkms_device->platform); 194 - 195 194 out_free: 196 195 kfree(vkms_device); 197 196 return ret; ··· 206 205 207 206 drm_dev_unregister(&vkms_device->drm); 208 207 drm_dev_put(&vkms_device->drm); 209 - 210 - kfree(vkms_device); 211 208 } 212 209 213 210 module_init(vkms_init);
+2 -6
drivers/gpu/drm/vkms/vkms_output.c
··· 3 3 #include "vkms_drv.h" 4 4 #include <drm/drm_atomic_helper.h> 5 5 #include <drm/drm_probe_helper.h> 6 + #include <drm/drm_simple_kms_helper.h> 6 7 7 8 static void vkms_connector_destroy(struct drm_connector *connector) 8 9 { ··· 16 15 .reset = drm_atomic_helper_connector_reset, 17 16 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 18 17 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 19 - }; 20 - 21 - static const struct drm_encoder_funcs vkms_encoder_funcs = { 22 - .destroy = drm_encoder_cleanup, 23 18 }; 24 19 25 20 static int vkms_conn_get_modes(struct drm_connector *connector) ··· 67 70 68 71 drm_connector_helper_add(connector, &vkms_conn_helper_funcs); 69 72 70 - ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs, 71 - DRM_MODE_ENCODER_VIRTUAL, NULL); 73 + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); 72 74 if (ret) { 73 75 DRM_ERROR("Failed to init encoder\n"); 74 76 goto err_encoder;
+1 -3
drivers/gpu/drm/xen/xen_drm_front.c
··· 460 460 drm_atomic_helper_shutdown(dev); 461 461 drm_mode_config_cleanup(dev); 462 462 463 - drm_dev_fini(dev); 464 - kfree(dev); 465 - 466 463 if (front_info->cfg.be_alloc) 467 464 xenbus_switch_state(front_info->xb_dev, 468 465 XenbusStateInitialising); ··· 558 561 fail_modeset: 559 562 drm_kms_helper_poll_fini(drm_dev); 560 563 drm_mode_config_cleanup(drm_dev); 564 + drm_dev_put(drm_dev); 561 565 fail: 562 566 kfree(drm_info); 563 567 return ret;
+2 -6
drivers/gpu/drm/zte/zx_hdmi.c
··· 20 20 #include <drm/drm_of.h> 21 21 #include <drm/drm_probe_helper.h> 22 22 #include <drm/drm_print.h> 23 + #include <drm/drm_simple_kms_helper.h> 23 24 24 25 #include <sound/hdmi-codec.h> 25 26 ··· 255 254 .mode_set = zx_hdmi_encoder_mode_set, 256 255 }; 257 256 258 - static const struct drm_encoder_funcs zx_hdmi_encoder_funcs = { 259 - .destroy = drm_encoder_cleanup, 260 - }; 261 - 262 257 static int zx_hdmi_connector_get_modes(struct drm_connector *connector) 263 258 { 264 259 struct zx_hdmi *hdmi = to_zx_hdmi(connector); ··· 310 313 311 314 encoder->possible_crtcs = VOU_CRTC_MASK; 312 315 313 - drm_encoder_init(drm, encoder, &zx_hdmi_encoder_funcs, 314 - DRM_MODE_ENCODER_TMDS, NULL); 316 + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); 315 317 drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs); 316 318 317 319 hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+2 -6
drivers/gpu/drm/zte/zx_tvenc.c
··· 14 14 #include <drm/drm_atomic_helper.h> 15 15 #include <drm/drm_print.h> 16 16 #include <drm/drm_probe_helper.h> 17 + #include <drm/drm_simple_kms_helper.h> 17 18 18 19 #include "zx_drm_drv.h" 19 20 #include "zx_tvenc_regs.h" ··· 219 218 .mode_set = zx_tvenc_encoder_mode_set, 220 219 }; 221 220 222 - static const struct drm_encoder_funcs zx_tvenc_encoder_funcs = { 223 - .destroy = drm_encoder_cleanup, 224 - }; 225 - 226 221 static int zx_tvenc_connector_get_modes(struct drm_connector *connector) 227 222 { 228 223 struct zx_tvenc *tvenc = to_zx_tvenc(connector); ··· 282 285 */ 283 286 encoder->possible_crtcs = BIT(1); 284 287 285 - drm_encoder_init(drm, encoder, &zx_tvenc_encoder_funcs, 286 - DRM_MODE_ENCODER_TVDAC, NULL); 288 + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TVDAC); 287 289 drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs); 288 290 289 291 connector->interlace_allowed = true;
+2 -6
drivers/gpu/drm/zte/zx_vga.c
··· 14 14 #include <drm/drm_atomic_helper.h> 15 15 #include <drm/drm_print.h> 16 16 #include <drm/drm_probe_helper.h> 17 + #include <drm/drm_simple_kms_helper.h> 17 18 18 19 #include "zx_drm_drv.h" 19 20 #include "zx_vga_regs.h" ··· 71 70 static const struct drm_encoder_helper_funcs zx_vga_encoder_helper_funcs = { 72 71 .enable = zx_vga_encoder_enable, 73 72 .disable = zx_vga_encoder_disable, 74 - }; 75 - 76 - static const struct drm_encoder_funcs zx_vga_encoder_funcs = { 77 - .destroy = drm_encoder_cleanup, 78 73 }; 79 74 80 75 static int zx_vga_connector_get_modes(struct drm_connector *connector) ··· 151 154 152 155 encoder->possible_crtcs = VOU_CRTC_MASK; 153 156 154 - ret = drm_encoder_init(drm, encoder, &zx_vga_encoder_funcs, 155 - DRM_MODE_ENCODER_DAC, NULL); 157 + ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_DAC); 156 158 if (ret) { 157 159 DRM_DEV_ERROR(dev, "failed to init encoder: %d\n", ret); 158 160 return ret;
-1
drivers/video/fbdev/atmel_lcdfb.c
··· 1114 1114 1115 1115 sinfo->irq_base = platform_get_irq(pdev, 0); 1116 1116 if (sinfo->irq_base < 0) { 1117 - dev_err(dev, "unable to get irq\n"); 1118 1117 ret = sinfo->irq_base; 1119 1118 goto stop_clk; 1120 1119 }
+1 -1
drivers/video/fbdev/aty/atyfb_base.c
··· 126 126 #ifdef DEBUG 127 127 #define DPRINTK(fmt, args...) printk(KERN_DEBUG "atyfb: " fmt, ## args) 128 128 #else 129 - #define DPRINTK(fmt, args...) 129 + #define DPRINTK(fmt, args...) no_printk(fmt, ##args) 130 130 #endif 131 131 132 132 #define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args)
+1 -1
drivers/video/fbdev/core/fbmon.c
··· 44 44 #ifdef DEBUG 45 45 #define DPRINTK(fmt, args...) printk(fmt,## args) 46 46 #else 47 - #define DPRINTK(fmt, args...) 47 + #define DPRINTK(fmt, args...) no_printk(fmt, ##args) 48 48 #endif 49 49 50 50 #define FBMON_FIX_HEADER 1
+2
drivers/video/fbdev/cyber2000fb.c
··· 1160 1160 #define DDC_SDA_IN (1 << 6) 1161 1161 1162 1162 static void cyber2000fb_enable_ddc(struct cfb_info *cfb) 1163 + __acquires(&cfb->reg_b0_lock) 1163 1164 { 1164 1165 spin_lock(&cfb->reg_b0_lock); 1165 1166 cyber2000fb_writew(0x1bf, 0x3ce, cfb); 1166 1167 } 1167 1168 1168 1169 static void cyber2000fb_disable_ddc(struct cfb_info *cfb) 1170 + __releases(&cfb->reg_b0_lock) 1169 1171 { 1170 1172 cyber2000fb_writew(0x0bf, 0x3ce, cfb); 1171 1173 spin_unlock(&cfb->reg_b0_lock);
-22
drivers/video/fbdev/matrox/g450_pll.c
··· 333 333 unsigned int *deltaarray) 334 334 { 335 335 unsigned int mnpcount; 336 - unsigned int pixel_vco; 337 336 const struct matrox_pll_limits* pi; 338 337 struct matrox_pll_cache* ci; 339 338 340 - pixel_vco = 0; 341 339 switch (pll) { 342 340 case M_PIXEL_PLL_A: 343 341 case M_PIXEL_PLL_B: ··· 418 420 419 421 mnp = matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) << 16; 420 422 mnp |= matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) << 8; 421 - pixel_vco = g450_mnp2vco(minfo, mnp); 422 423 matroxfb_DAC_unlock_irqrestore(flags); 423 424 } 424 425 pi = &minfo->limits.video; ··· 438 441 unsigned int delta; 439 442 440 443 vco = g450_mnp2vco(minfo, mnp); 441 - #if 0 442 - if (pll == M_VIDEO_PLL) { 443 - unsigned int big, small; 444 - 445 - if (vco < pixel_vco) { 446 - small = vco; 447 - big = pixel_vco; 448 - } else { 449 - small = pixel_vco; 450 - big = vco; 451 - } 452 - while (big > small) { 453 - big >>= 1; 454 - } 455 - if (big == small) { 456 - continue; 457 - } 458 - } 459 - #endif 460 444 delta = pll_freq_delta(fout, g450_vco2f(mnp, vco)); 461 445 for (idx = mnpcount; idx > 0; idx--) { 462 446 /* == is important; due to nextpll algorithm we get
+1 -1
drivers/video/fbdev/matrox/matroxfb_base.h
··· 86 86 #ifdef DEBUG 87 87 #define dprintk(X...) printk(X) 88 88 #else 89 - #define dprintk(X...) 89 + #define dprintk(X...) no_printk(X) 90 90 #endif 91 91 92 92 #ifndef PCI_SS_VENDOR_ID_SIEMENS_NIXDORF
-2
drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
··· 184 184 static void mb86290fb_imageblit(struct fb_info *info, 185 185 const struct fb_image *image) 186 186 { 187 - int mdr; 188 187 u32 *cmd = NULL; 189 188 void (*cmdfn) (u32 *, u16, u16, u16, u16, u16, u32, u32, 190 189 const struct fb_image *, struct fb_info *) = NULL; ··· 195 196 u16 dx = image->dx, dy = image->dy; 196 197 int x2, y2, vxres, vyres; 197 198 198 - mdr = (GDC_ROP_COPY << 9); 199 199 x2 = image->dx + image->width; 200 200 y2 = image->dy + image->height; 201 201 vxres = info->var.xres_virtual;
+10 -10
drivers/video/fbdev/mx3fb.c
··· 509 509 uint16_t h_start_width, uint16_t h_sync_width, 510 510 uint16_t h_end_width, uint16_t v_start_width, 511 511 uint16_t v_sync_width, uint16_t v_end_width, 512 - struct ipu_di_signal_cfg sig) 512 + const struct ipu_di_signal_cfg *sig) 513 513 { 514 514 unsigned long lock_flags; 515 515 uint32_t reg; ··· 591 591 592 592 /* DI settings */ 593 593 old_conf = mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF) & 0x78FFFFFF; 594 - old_conf |= sig.datamask_en << DI_D3_DATAMSK_SHIFT | 595 - sig.clksel_en << DI_D3_CLK_SEL_SHIFT | 596 - sig.clkidle_en << DI_D3_CLK_IDLE_SHIFT; 594 + old_conf |= sig->datamask_en << DI_D3_DATAMSK_SHIFT | 595 + sig->clksel_en << DI_D3_CLK_SEL_SHIFT | 596 + sig->clkidle_en << DI_D3_CLK_IDLE_SHIFT; 597 597 mx3fb_write_reg(mx3fb, old_conf, DI_DISP_IF_CONF); 598 598 599 599 old_conf = mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL) & 0xE0FFFFFF; 600 - old_conf |= sig.data_pol << DI_D3_DATA_POL_SHIFT | 601 - sig.clk_pol << DI_D3_CLK_POL_SHIFT | 602 - sig.enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT | 603 - sig.Hsync_pol << DI_D3_HSYNC_POL_SHIFT | 604 - sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT; 600 + old_conf |= sig->data_pol << DI_D3_DATA_POL_SHIFT | 601 + sig->clk_pol << DI_D3_CLK_POL_SHIFT | 602 + sig->enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT | 603 + sig->Hsync_pol << DI_D3_HSYNC_POL_SHIFT | 604 + sig->Vsync_pol << DI_D3_VSYNC_POL_SHIFT; 605 605 mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL); 606 606 607 607 map = &di_mappings[mx3fb->disp_data_fmt]; ··· 855 855 fbi->var.upper_margin, 856 856 fbi->var.vsync_len, 857 857 fbi->var.lower_margin + 858 - fbi->var.vsync_len, sig_cfg) != 0) { 858 + fbi->var.vsync_len, &sig_cfg) != 0) { 859 859 dev_err(fbi->device, 860 860 "mx3fb: Error initializing panel.\n"); 861 861 return -EINVAL;
+7 -7
drivers/video/fbdev/omap/omapfb_main.c
··· 1247 1247 size = 0; 1248 1248 while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) { 1249 1249 omapfb_get_caps(fbdev, plane, &caps); 1250 - size += snprintf(&buf[size], PAGE_SIZE - size, 1250 + size += scnprintf(&buf[size], PAGE_SIZE - size, 1251 1251 "plane#%d %#010x %#010x %#010x\n", 1252 1252 plane, caps.ctrl, caps.plane_color, caps.wnd_color); 1253 1253 plane++; ··· 1268 1268 size = 0; 1269 1269 while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) { 1270 1270 omapfb_get_caps(fbdev, plane, &caps); 1271 - size += snprintf(&buf[size], PAGE_SIZE - size, 1271 + size += scnprintf(&buf[size], PAGE_SIZE - size, 1272 1272 "plane#%d:\n", plane); 1273 1273 for (i = 0; i < ARRAY_SIZE(ctrl_caps) && 1274 1274 size < PAGE_SIZE; i++) { 1275 1275 if (ctrl_caps[i].flag & caps.ctrl) 1276 - size += snprintf(&buf[size], PAGE_SIZE - size, 1276 + size += scnprintf(&buf[size], PAGE_SIZE - size, 1277 1277 " %s\n", ctrl_caps[i].name); 1278 1278 } 1279 - size += snprintf(&buf[size], PAGE_SIZE - size, 1279 + size += scnprintf(&buf[size], PAGE_SIZE - size, 1280 1280 " plane colors:\n"); 1281 1281 for (i = 0; i < ARRAY_SIZE(color_caps) && 1282 1282 size < PAGE_SIZE; i++) { 1283 1283 if (color_caps[i].flag & caps.plane_color) 1284 - size += snprintf(&buf[size], PAGE_SIZE - size, 1284 + size += scnprintf(&buf[size], PAGE_SIZE - size, 1285 1285 " %s\n", color_caps[i].name); 1286 1286 } 1287 - size += snprintf(&buf[size], PAGE_SIZE - size, 1287 + size += scnprintf(&buf[size], PAGE_SIZE - size, 1288 1288 " window colors:\n"); 1289 1289 for (i = 0; i < ARRAY_SIZE(color_caps) && 1290 1290 size < PAGE_SIZE; i++) { 1291 1291 if (color_caps[i].flag & caps.wnd_color) 1292 - size += snprintf(&buf[size], PAGE_SIZE - size, 1292 + size += scnprintf(&buf[size], PAGE_SIZE - size, 1293 1293 " %s\n", color_caps[i].name); 1294 1294 } 1295 1295
-114
drivers/video/fbdev/omap2/omapfb/dss/dispc.c
··· 557 557 } 558 558 EXPORT_SYMBOL(dispc_mgr_get_sync_lost_irq); 559 559 560 - u32 dispc_wb_get_framedone_irq(void) 561 - { 562 - return DISPC_IRQ_FRAMEDONEWB; 563 - } 564 - 565 560 bool dispc_mgr_go_busy(enum omap_channel channel) 566 561 { 567 562 return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1; ··· 573 578 mgr_fld_write(channel, DISPC_MGR_FLD_GO, 1); 574 579 } 575 580 EXPORT_SYMBOL(dispc_mgr_go); 576 - 577 - bool dispc_wb_go_busy(void) 578 - { 579 - return REG_GET(DISPC_CONTROL2, 6, 6) == 1; 580 - } 581 - 582 - void dispc_wb_go(void) 583 - { 584 - enum omap_plane plane = OMAP_DSS_WB; 585 - bool enable, go; 586 - 587 - enable = REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1; 588 - 589 - if (!enable) 590 - return; 591 - 592 - go = REG_GET(DISPC_CONTROL2, 6, 6) == 1; 593 - if (go) { 594 - DSSERR("GO bit not down for WB\n"); 595 - return; 596 - } 597 - 598 - REG_FLD_MOD(DISPC_CONTROL2, 1, 6, 6); 599 - } 600 581 601 582 static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value) 602 583 { ··· 997 1026 case 3: 998 1027 return OMAP_DSS_CHANNEL_WB; 999 1028 } 1000 - } 1001 - 1002 - void dispc_wb_set_channel_in(enum dss_writeback_channel channel) 1003 - { 1004 - enum omap_plane plane = OMAP_DSS_WB; 1005 - 1006 - REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), channel, 18, 16); 1007 1029 } 1008 1030 1009 1031 static void dispc_ovl_set_burst_size(enum omap_plane plane, ··· 2769 2805 } 2770 2806 EXPORT_SYMBOL(dispc_ovl_setup); 2771 2807 2772 - int dispc_wb_setup(const struct omap_dss_writeback_info *wi, 2773 - bool mem_to_mem, const struct omap_video_timings *mgr_timings) 2774 - { 2775 - int r; 2776 - u32 l; 2777 - enum omap_plane plane = OMAP_DSS_WB; 2778 - const int pos_x = 0, pos_y = 0; 2779 - const u8 zorder = 0, global_alpha = 0; 2780 - const bool replication = false; 2781 - bool truncation; 2782 - int in_width = mgr_timings->x_res; 2783 - int in_height = mgr_timings->y_res; 2784 - enum omap_overlay_caps caps = 2785 - OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA; 2786 - 2787 - DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, " 2788 - "rot %d, mir %d\n", wi->paddr, wi->p_uv_addr, in_width, 2789 - in_height, wi->width, wi->height, wi->color_mode, wi->rotation, 2790 - wi->mirror); 2791 - 2792 - r = dispc_ovl_setup_common(plane, caps, wi->paddr, wi->p_uv_addr, 2793 - wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width, 2794 - wi->height, wi->color_mode, wi->rotation, wi->mirror, zorder, 2795 - wi->pre_mult_alpha, global_alpha, wi->rotation_type, 2796 - replication, mgr_timings, mem_to_mem); 2797 - 2798 - switch (wi->color_mode) { 2799 - case OMAP_DSS_COLOR_RGB16: 2800 - case OMAP_DSS_COLOR_RGB24P: 2801 - case OMAP_DSS_COLOR_ARGB16: 2802 - case OMAP_DSS_COLOR_RGBA16: 2803 - case OMAP_DSS_COLOR_RGB12U: 2804 - case OMAP_DSS_COLOR_ARGB16_1555: 2805 - case OMAP_DSS_COLOR_XRGB16_1555: 2806 - case OMAP_DSS_COLOR_RGBX16: 2807 - truncation = true; 2808 - break; 2809 - default: 2810 - truncation = false; 2811 - break; 2812 - } 2813 - 2814 - /* setup extra DISPC_WB_ATTRIBUTES */ 2815 - l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); 2816 - l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */ 2817 - l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */ 2818 - if (mem_to_mem) 2819 - l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */ 2820 - else 2821 - l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */ 2822 - dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l); 2823 - 2824 - if (mem_to_mem) { 2825 - /* WBDELAYCOUNT */ 2826 - REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0); 2827 - } else { 2828 - int wbdelay; 2829 - 2830 - wbdelay = min(mgr_timings->vfp + mgr_timings->vsw + 2831 - mgr_timings->vbp, 255); 2832 - 2833 - /* WBDELAYCOUNT */ 2834 - REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0); 2835 - } 2836 - 2837 - return r; 2838 - } 2839 - 2840 2808 int dispc_ovl_enable(enum omap_plane plane, bool enable) 2841 2809 { 2842 2810 DSSDBG("dispc_enable_plane %d, %d\n", plane, enable); ··· 2798 2902 return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); 2799 2903 } 2800 2904 EXPORT_SYMBOL(dispc_mgr_is_enabled); 2801 - 2802 - void dispc_wb_enable(bool enable) 2803 - { 2804 - dispc_ovl_enable(OMAP_DSS_WB, enable); 2805 - } 2806 - 2807 - bool dispc_wb_is_enabled(void) 2808 - { 2809 - return dispc_ovl_enabled(OMAP_DSS_WB); 2810 - } 2811 2905 2812 2906 static void dispc_lcd_enable_signal_polarity(bool act_high) 2813 2907 {
-20
drivers/video/fbdev/omap2/omapfb/dss/dss.h
··· 89 89 DSS_DSI_CONTENT_GENERIC, 90 90 }; 91 91 92 - enum dss_writeback_channel { 93 - DSS_WB_LCD1_MGR = 0, 94 - DSS_WB_LCD2_MGR = 1, 95 - DSS_WB_TV_MGR = 2, 96 - DSS_WB_OVL0 = 3, 97 - DSS_WB_OVL1 = 4, 98 - DSS_WB_OVL2 = 5, 99 - DSS_WB_OVL3 = 6, 100 - DSS_WB_LCD3_MGR = 7, 101 - }; 102 - 103 92 enum dss_pll_id { 104 93 DSS_PLL_DSI1, 105 94 DSS_PLL_DSI2, ··· 391 402 int dispc_mgr_get_clock_div(enum omap_channel channel, 392 403 struct dispc_clock_info *cinfo); 393 404 void dispc_set_tv_pclk(unsigned long pclk); 394 - 395 - u32 dispc_wb_get_framedone_irq(void); 396 - bool dispc_wb_go_busy(void); 397 - void dispc_wb_go(void); 398 - void dispc_wb_enable(bool enable); 399 - bool dispc_wb_is_enabled(void); 400 - void dispc_wb_set_channel_in(enum dss_writeback_channel channel); 401 - int dispc_wb_setup(const struct omap_dss_writeback_info *wi, 402 - bool mem_to_mem, const struct omap_video_timings *timings); 403 405 404 406 u32 dispc_read_irqstatus(void); 405 407 void dispc_clear_irqstatus(u32 mask);
+4 -4
drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
··· 147 147 if (ovl == fbdev->overlays[ovlnum]) 148 148 break; 149 149 150 - l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", 150 + l += scnprintf(buf + l, PAGE_SIZE - l, "%s%d", 151 151 t == 0 ? "" : ",", ovlnum); 152 152 } 153 153 154 - l += snprintf(buf + l, PAGE_SIZE - l, "\n"); 154 + l += scnprintf(buf + l, PAGE_SIZE - l, "\n"); 155 155 156 156 omapfb_unlock(fbdev); 157 157 unlock_fb_info(fbi); ··· 328 328 lock_fb_info(fbi); 329 329 330 330 for (t = 0; t < ofbi->num_overlays; t++) { 331 - l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", 331 + l += scnprintf(buf + l, PAGE_SIZE - l, "%s%d", 332 332 t == 0 ? "" : ",", ofbi->rotation[t]); 333 333 } 334 334 335 - l += snprintf(buf + l, PAGE_SIZE - l, "\n"); 335 + l += scnprintf(buf + l, PAGE_SIZE - l, "\n"); 336 336 337 337 unlock_fb_info(fbi); 338 338
+1 -1
drivers/video/fbdev/pm2fb.c
··· 54 54 #define DPRINTK(a, b...) \ 55 55 printk(KERN_DEBUG "pm2fb: %s: " a, __func__ , ## b) 56 56 #else 57 - #define DPRINTK(a, b...) 57 + #define DPRINTK(a, b...) no_printk(a, ##b) 58 58 #endif 59 59 60 60 #define PM2_PIXMAP_SIZE (1600 * 4)
+4 -4
drivers/video/fbdev/pm3fb.c
··· 44 44 #define DPRINTK(a, b...) \ 45 45 printk(KERN_DEBUG "pm3fb: %s: " a, __func__ , ## b) 46 46 #else 47 - #define DPRINTK(a, b...) 47 + #define DPRINTK(a, b...) no_printk(a, ##b) 48 48 #endif 49 49 50 50 #define PM3_PIXMAP_SIZE (2048 * 4) ··· 306 306 PM3PixelSize_GLOBAL_32BIT); 307 307 break; 308 308 default: 309 - DPRINTK(1, "Unsupported depth %d\n", 309 + DPRINTK("Unsupported depth %d\n", 310 310 info->var.bits_per_pixel); 311 311 break; 312 312 } ··· 349 349 (1 << 10) | (0 << 3)); 350 350 break; 351 351 default: 352 - DPRINTK(1, "Unsupported depth %d\n", 353 - info->current_par->depth); 352 + DPRINTK("Unsupported depth %d\n", 353 + info->var.bits_per_pixel); 354 354 break; 355 355 } 356 356 }
+1 -1
drivers/video/fbdev/savage/savagefb.h
··· 21 21 #ifdef SAVAGEFB_DEBUG 22 22 # define DBG(x) printk (KERN_DEBUG "savagefb: %s\n", (x)); 23 23 #else 24 - # define DBG(x) 24 + # define DBG(x) no_printk(x) 25 25 # define SavagePrintRegs(...) 26 26 #endif 27 27
+1 -1
drivers/video/fbdev/uvesafb.c
··· 1560 1560 int ret = 0, i; 1561 1561 1562 1562 for (i = 0; i < par->vbe_modes_cnt && ret < PAGE_SIZE; i++) { 1563 - ret += snprintf(buf + ret, PAGE_SIZE - ret, 1563 + ret += scnprintf(buf + ret, PAGE_SIZE - ret, 1564 1564 "%dx%d-%d, 0x%.4x\n", 1565 1565 par->vbe_modes[i].x_res, par->vbe_modes[i].y_res, 1566 1566 par->vbe_modes[i].depth, par->vbe_modes[i].mode_id);
+4 -2
drivers/video/fbdev/via/debug.h
··· 7 7 #ifndef __DEBUG_H__ 8 8 #define __DEBUG_H__ 9 9 10 + #include <linux/printk.h> 11 + 10 12 #ifndef VIAFB_DEBUG 11 13 #define VIAFB_DEBUG 0 12 14 #endif ··· 16 14 #if VIAFB_DEBUG 17 15 #define DEBUG_MSG(f, a...) printk(f, ## a) 18 16 #else 19 - #define DEBUG_MSG(f, a...) 17 + #define DEBUG_MSG(f, a...) no_printk(f, ## a) 20 18 #endif 21 19 22 20 #define VIAFB_WARN 0 23 21 #if VIAFB_WARN 24 22 #define WARN_MSG(f, a...) printk(f, ## a) 25 23 #else 26 - #define WARN_MSG(f, a...) 24 + #define WARN_MSG(f, a...) no_printk(f, ## a) 27 25 #endif 28 26 29 27 #endif /* __DEBUG_H__ */
+1 -1
drivers/video/fbdev/via/viafbdev.c
··· 1144 1144 if (value != NULL) { 1145 1145 if (kstrtou8(value, 0, &reg_val) < 0) 1146 1146 return -EINVAL; 1147 - DEBUG_MSG(KERN_INFO "DVP0:reg_val[%l]=:%x\n", i, 1147 + DEBUG_MSG(KERN_INFO "DVP0:reg_val[%lu]=:%x\n", i, 1148 1148 reg_val); 1149 1149 switch (i) { 1150 1150 case 0:
+1 -1
include/drm/drm_client.h
··· 188 188 drm_for_each_connector_iter(connector, iter) \ 189 189 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 190 190 191 - int drm_client_debugfs_init(struct drm_minor *minor); 191 + void drm_client_debugfs_init(struct drm_minor *minor); 192 192 193 193 #endif
+2 -2
include/drm/drm_connector.h
··· 1617 1617 }; 1618 1618 1619 1619 struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, 1620 - char topology[8]); 1620 + const char topology[8]); 1621 1621 struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, 1622 - char topology[8]); 1622 + const char topology[8]); 1623 1623 void drm_mode_put_tile_group(struct drm_device *dev, 1624 1624 struct drm_tile_group *tg); 1625 1625
+7 -9
include/drm/drm_debugfs.h
··· 80 80 }; 81 81 82 82 #if defined(CONFIG_DEBUG_FS) 83 - int drm_debugfs_create_files(const struct drm_info_list *files, 84 - int count, struct dentry *root, 85 - struct drm_minor *minor); 83 + void drm_debugfs_create_files(const struct drm_info_list *files, 84 + int count, struct dentry *root, 85 + struct drm_minor *minor); 86 86 int drm_debugfs_remove_files(const struct drm_info_list *files, 87 87 int count, struct drm_minor *minor); 88 88 #else 89 - static inline int drm_debugfs_create_files(const struct drm_info_list *files, 90 - int count, struct dentry *root, 91 - struct drm_minor *minor) 92 - { 93 - return 0; 94 - } 89 + static inline void drm_debugfs_create_files(const struct drm_info_list *files, 90 + int count, struct dentry *root, 91 + struct drm_minor *minor) 92 + {} 95 93 96 94 static inline int drm_debugfs_remove_files(const struct drm_info_list *files, 97 95 int count, struct drm_minor *minor)
+15
include/drm/drm_device.h
··· 67 67 /** @dev: Device structure of bus-device */ 68 68 struct device *dev; 69 69 70 + /** 71 + * @managed: 72 + * 73 + * Managed resources linked to the lifetime of this &drm_device as 74 + * tracked by @ref. 75 + */ 76 + struct { 77 + /** @managed.resources: managed resources list */ 78 + struct list_head resources; 79 + /** @managed.final_kfree: pointer for final kfree() call */ 80 + void *final_kfree; 81 + /** @managed.lock: protects @managed.resources */ 82 + spinlock_t lock; 83 + } managed; 84 + 70 85 /** @driver: DRM driver managing the device */ 71 86 struct drm_driver *driver; 72 87
+1 -1
include/drm/drm_displayid.h
··· 97 97 (idx) + sizeof(struct displayid_block) <= (length) && \ 98 98 (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \ 99 99 (block)->num_bytes > 0; \ 100 - (idx) += (block)->num_bytes + sizeof(struct displayid_block), \ 100 + (idx) += sizeof(struct displayid_block) + (block)->num_bytes, \ 101 101 (block) = (struct displayid_block *)&(displayid)[idx]) 102 102 103 103 #endif
+34 -38
include/drm/drm_dp_mst_helper.h
··· 157 157 */ 158 158 bool has_audio; 159 159 160 + /** 161 + * @fec_capable: bool indicating if FEC can be supported up to that 162 + * point in the MST topology. 163 + */ 160 164 bool fec_capable; 165 + }; 166 + 167 + /* sideband msg header - not bit struct */ 168 + struct drm_dp_sideband_msg_hdr { 169 + u8 lct; 170 + u8 lcr; 171 + u8 rad[8]; 172 + bool broadcast; 173 + bool path_msg; 174 + u8 msg_len; 175 + bool somt; 176 + bool eomt; 177 + bool seqno; 178 + }; 179 + 180 + struct drm_dp_sideband_msg_rx { 181 + u8 chunk[48]; 182 + u8 msg[256]; 183 + u8 curchunk_len; 184 + u8 curchunk_idx; /* chunk we are parsing now */ 185 + u8 curchunk_hdrlen; 186 + u8 curlen; /* total length of the msg */ 187 + bool have_somt; 188 + bool have_eomt; 189 + struct drm_dp_sideband_msg_hdr initial_hdr; 161 190 }; 162 191 163 192 /** ··· 261 232 int last_seqno; 262 233 bool link_address_sent; 263 234 235 + /** 236 + * @down_rep_recv: Message receiver state for down replies. 237 + */ 238 + struct drm_dp_sideband_msg_rx down_rep_recv[2]; 239 + 264 240 /* global unique identifier to identify branch devices */ 265 241 u8 guid[16]; 266 242 }; 267 243 268 - 269 - /* sideband msg header - not bit struct */ 270 - struct drm_dp_sideband_msg_hdr { 271 - u8 lct; 272 - u8 lcr; 273 - u8 rad[8]; 274 - bool broadcast; 275 - bool path_msg; 276 - u8 msg_len; 277 - bool somt; 278 - bool eomt; 279 - bool seqno; 280 - }; 281 244 282 245 struct drm_dp_nak_reply { 283 246 u8 guid[16]; ··· 326 305 u8 port_number; 327 306 }; 328 307 329 - 330 - struct drm_dp_sideband_msg_rx { 331 - u8 chunk[48]; 332 - u8 msg[256]; 333 - u8 curchunk_len; 334 - u8 curchunk_idx; /* chunk we are parsing now */ 335 - u8 curchunk_hdrlen; 336 - u8 curlen; /* total length of the msg */ 337 - bool have_somt; 338 - bool have_eomt; 339 - struct drm_dp_sideband_msg_hdr initial_hdr; 340 - }; 341 308 342 309 #define DRM_DP_MAX_SDP_STREAMS 16 343 310 struct drm_dp_allocate_payload { ··· 488 479 struct drm_dp_mst_topology_cbs { 489 480 /* create a connector for a port */ 490 481 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); 491 - void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, 492 - struct drm_connector *connector); 493 482 }; 494 483 495 484 #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) ··· 563 556 int conn_base_id; 564 557 565 558 /** 566 - * @down_rep_recv: Message receiver state for down replies. 567 - */ 568 - struct drm_dp_sideband_msg_rx down_rep_recv; 569 - /** 570 559 * @up_req_recv: Message receiver state for up requests. 571 560 */ 572 561 struct drm_dp_sideband_msg_rx up_req_recv; ··· 591 588 * ID table for @mst_primary. Protected by @lock. 592 589 */ 593 590 bool payload_id_table_cleared : 1; 594 - 595 - /** 596 - * @is_waiting_for_dwn_reply: whether we're waiting for a down reply. 597 - */ 598 - bool is_waiting_for_dwn_reply : 1; 599 591 600 592 /** 601 593 * @mst_primary: Pointer to the primary/first branch device. ··· 732 734 struct drm_dp_mst_topology_mgr *mgr, 733 735 struct drm_dp_mst_port *port); 734 736 735 - bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, 736 - struct drm_dp_mst_port *port); 737 737 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); 738 738 739 739
+6 -5
include/drm/drm_drv.h
··· 262 262 * @release: 263 263 * 264 264 * Optional callback for destroying device data after the final 265 - * reference is released, i.e. the device is being destroyed. Drivers 266 - * using this callback are responsible for calling drm_dev_fini() 267 - * to finalize the device and then freeing the struct themselves. 265 + * reference is released, i.e. the device is being destroyed. 266 + * 267 + * This is deprecated, clean up all memory allocations associated with a 268 + * &drm_device using drmm_add_action(), drmm_kmalloc() and related 269 + * managed resources functions. 268 270 */ 269 271 void (*release) (struct drm_device *); 270 272 ··· 325 323 * 326 324 * Allows drivers to create driver-specific debugfs files. 327 325 */ 328 - int (*debugfs_init)(struct drm_minor *minor); 326 + void (*debugfs_init)(struct drm_minor *minor); 329 327 330 328 /** 331 329 * @gem_free_object: deconstructor for drm_gem_objects ··· 622 620 int devm_drm_dev_init(struct device *parent, 623 621 struct drm_device *dev, 624 622 struct drm_driver *driver); 625 - void drm_dev_fini(struct drm_device *dev); 626 623 627 624 struct drm_device *drm_dev_alloc(struct drm_driver *driver, 628 625 struct device *parent);
+6 -2
include/drm/drm_encoder.h
··· 142 142 * the bits for all &drm_crtc objects this encoder can be connected to 143 143 * before calling drm_dev_register(). 144 144 * 145 - * In reality almost every driver gets this wrong. 145 + * You will get a WARN if you get this wrong in the driver. 146 146 * 147 147 * Note that since CRTC objects can't be hotplugged the assigned indices 148 148 * are stable and hence known before registering all objects. ··· 159 159 * encoders can be used in a cloned configuration, they both should have 160 160 * each another bits set. 161 161 * 162 - * In reality almost every driver gets this wrong. 162 + * As an exception to the above rule if the driver doesn't implement 163 + * any cloning it can leave @possible_clones set to 0. The core will 164 + * automagically fix this up by setting the bit for the encoder itself. 165 + * 166 + * You will get a WARN if you get this wrong in the driver. 163 167 * 164 168 * Note that since encoder objects can't be hotplugged the assigned indices 165 169 * are stable and hence known before registering all objects.
+3 -3
include/drm/drm_fb_helper.h
··· 269 269 void drm_fb_helper_lastclose(struct drm_device *dev); 270 270 void drm_fb_helper_output_poll_changed(struct drm_device *dev); 271 271 272 - int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp); 272 + void drm_fbdev_generic_setup(struct drm_device *dev, 273 + unsigned int preferred_bpp); 273 274 #else 274 275 static inline void drm_fb_helper_prepare(struct drm_device *dev, 275 276 struct drm_fb_helper *helper, ··· 444 443 { 445 444 } 446 445 447 - static inline int 446 + static inline void 448 447 drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) 449 448 { 450 - return 0; 451 449 } 452 450 453 451 #endif
+11
include/drm/drm_file.h
··· 202 202 bool writeback_connectors; 203 203 204 204 /** 205 + * @was_master: 206 + * 207 + * This client has or had, master capability. Protected by struct 208 + * &drm_device.master_mutex. 209 + * 210 + * This is used to ensure that CAP_SYS_ADMIN is not enforced, if the 211 + * client is or was master in the past. 212 + */ 213 + bool was_master; 214 + 215 + /** 205 216 * @is_master: 206 217 * 207 218 * This client is the creator of @master. Protected by struct
+38
include/drm/drm_framebuffer.h
··· 297 297 int drm_framebuffer_plane_height(int height, 298 298 const struct drm_framebuffer *fb, int plane); 299 299 300 + /** 301 + * struct drm_afbc_framebuffer - a special afbc frame buffer object 302 + * 303 + * A derived class of struct drm_framebuffer, dedicated for afbc use cases. 304 + */ 305 + struct drm_afbc_framebuffer { 306 + /** 307 + * @base: base framebuffer structure. 308 + */ 309 + struct drm_framebuffer base; 310 + /** 311 + * @block_width: width of a single afbc block 312 + */ 313 + u32 block_width; 314 + /** 315 + * @block_height: height of a single afbc block 316 + */ 317 + u32 block_height; 318 + /** 319 + * @aligned_width: aligned frame buffer width 320 + */ 321 + u32 aligned_width; 322 + /** 323 + * @aligned_height: aligned frame buffer height 324 + */ 325 + u32 aligned_height; 326 + /** 327 + * @offset: offset of the first afbc header 328 + */ 329 + u32 offset; 330 + /** 331 + * @afbc_size: minimum size of afbc buffer 332 + */ 333 + u32 afbc_size; 334 + }; 335 + 336 + #define fb_to_afbc_fb(x) container_of(x, struct drm_afbc_framebuffer, base) 337 + 300 338 #endif
+15
include/drm/drm_gem_framebuffer_helper.h
··· 1 1 #ifndef __DRM_GEM_FB_HELPER_H__ 2 2 #define __DRM_GEM_FB_HELPER_H__ 3 3 4 + struct drm_afbc_framebuffer; 4 5 struct drm_device; 5 6 struct drm_fb_helper_surface_size; 6 7 struct drm_file; ··· 13 12 struct drm_plane_state; 14 13 struct drm_simple_display_pipe; 15 14 15 + #define AFBC_VENDOR_AND_TYPE_MASK GENMASK_ULL(63, 52) 16 + 16 17 struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb, 17 18 unsigned int plane); 18 19 void drm_gem_fb_destroy(struct drm_framebuffer *fb); 19 20 int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file, 20 21 unsigned int *handle); 21 22 23 + int drm_gem_fb_init_with_funcs(struct drm_device *dev, 24 + struct drm_framebuffer *fb, 25 + struct drm_file *file, 26 + const struct drm_mode_fb_cmd2 *mode_cmd, 27 + const struct drm_framebuffer_funcs *funcs); 22 28 struct drm_framebuffer * 23 29 drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, 24 30 const struct drm_mode_fb_cmd2 *mode_cmd, ··· 36 28 struct drm_framebuffer * 37 29 drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, 38 30 const struct drm_mode_fb_cmd2 *mode_cmd); 31 + 32 + #define drm_is_afbc(modifier) \ 33 + (((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0)) 34 + 35 + int drm_gem_fb_afbc_init(struct drm_device *dev, 36 + const struct drm_mode_fb_cmd2 *mode_cmd, 37 + struct drm_afbc_framebuffer *afbc_fb); 39 38 40 39 int drm_gem_fb_prepare_fb(struct drm_plane *plane, 41 40 struct drm_plane_state *state);
+1 -1
include/drm/drm_gem_vram_helper.h
··· 196 196 return container_of(bdev, struct drm_vram_mm, bdev); 197 197 } 198 198 199 - int drm_vram_mm_debugfs_init(struct drm_minor *minor); 199 + void drm_vram_mm_debugfs_init(struct drm_minor *minor); 200 200 201 201 /* 202 202 * Helpers for integration with struct drm_device
+15
include/drm/drm_legacy.h
··· 194 194 195 195 #ifdef CONFIG_PCI 196 196 197 + struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, 198 + size_t align); 199 + void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah); 200 + 197 201 int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); 198 202 void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); 199 203 200 204 #else 205 + 206 + static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, 207 + size_t size, size_t align) 208 + { 209 + return NULL; 210 + } 211 + 212 + static inline void drm_pci_free(struct drm_device *dev, 213 + struct drm_dma_handle *dmah) 214 + { 215 + } 201 216 202 217 static inline int drm_legacy_pci_init(struct drm_driver *driver, 203 218 struct pci_driver *pdriver)
+109
include/drm/drm_managed.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #ifndef _DRM_MANAGED_H_ 4 + #define _DRM_MANAGED_H_ 5 + 6 + #include <linux/gfp.h> 7 + #include <linux/overflow.h> 8 + #include <linux/types.h> 9 + 10 + struct drm_device; 11 + 12 + typedef void (*drmres_release_t)(struct drm_device *dev, void *res); 13 + 14 + /** 15 + * drmm_add_action - add a managed release action to a &drm_device 16 + * @dev: DRM device 17 + * @action: function which should be called when @dev is released 18 + * @data: opaque pointer, passed to @action 19 + * 20 + * This function adds the @release action with optional parameter @data to the 21 + * list of cleanup actions for @dev. The cleanup actions will be run in reverse 22 + * order in the final drm_dev_put() call for @dev. 23 + */ 24 + #define drmm_add_action(dev, action, data) \ 25 + __drmm_add_action(dev, action, data, #action) 26 + 27 + int __must_check __drmm_add_action(struct drm_device *dev, 28 + drmres_release_t action, 29 + void *data, const char *name); 30 + 31 + /** 32 + * drmm_add_action_or_reset - add a managed release action to a &drm_device 33 + * @dev: DRM device 34 + * @action: function which should be called when @dev is released 35 + * @data: opaque pointer, passed to @action 36 + * 37 + * Similar to drmm_add_action(), with the only difference that upon failure 38 + * @action is directly called for any cleanup work necessary on failures. 39 + */ 40 + #define drmm_add_action_or_reset(dev, action, data) \ 41 + __drmm_add_action_or_reset(dev, action, data, #action) 42 + 43 + int __must_check __drmm_add_action_or_reset(struct drm_device *dev, 44 + drmres_release_t action, 45 + void *data, const char *name); 46 + 47 + void drmm_add_final_kfree(struct drm_device *dev, void *container); 48 + 49 + void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc; 50 + 51 + /** 52 + * drmm_kzalloc - &drm_device managed kzalloc() 53 + * @dev: DRM device 54 + * @size: size of the memory allocation 55 + * @gfp: GFP allocation flags 56 + * 57 + * This is a &drm_device managed version of kzalloc(). The allocated memory is 58 + * automatically freed on the final drm_dev_put(). Memory can also be freed 59 + * before the final drm_dev_put() by calling drmm_kfree(). 60 + */ 61 + static inline void *drmm_kzalloc(struct drm_device *dev, size_t size, gfp_t gfp) 62 + { 63 + return drmm_kmalloc(dev, size, gfp | __GFP_ZERO); 64 + } 65 + 66 + /** 67 + * drmm_kmalloc_array - &drm_device managed kmalloc_array() 68 + * @dev: DRM device 69 + * @n: number of array elements to allocate 70 + * @size: size of array member 71 + * @flags: GFP allocation flags 72 + * 73 + * This is a &drm_device managed version of kmalloc_array(). The allocated 74 + * memory is automatically freed on the final drm_dev_put() and works exactly 75 + * like a memory allocation obtained by drmm_kmalloc(). 76 + */ 77 + static inline void *drmm_kmalloc_array(struct drm_device *dev, 78 + size_t n, size_t size, gfp_t flags) 79 + { 80 + size_t bytes; 81 + 82 + if (unlikely(check_mul_overflow(n, size, &bytes))) 83 + return NULL; 84 + 85 + return drmm_kmalloc(dev, bytes, flags); 86 + } 87 + 88 + /** 89 + * drmm_kcalloc - &drm_device managed kcalloc() 90 + * @dev: DRM device 91 + * @n: number of array elements to allocate 92 + * @size: size of array member 93 + * @flags: GFP allocation flags 94 + * 95 + * This is a &drm_device managed version of kcalloc(). The allocated memory is 96 + * automatically freed on the final drm_dev_put() and works exactly like a 97 + * memory allocation obtained by drmm_kmalloc(). 98 + */ 99 + static inline void *drmm_kcalloc(struct drm_device *dev, 100 + size_t n, size_t size, gfp_t flags) 101 + { 102 + return drmm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); 103 + } 104 + 105 + char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp); 106 + 107 + void drmm_kfree(struct drm_device *dev, void *data); 108 + 109 + #endif
+4 -4
include/drm/drm_mipi_dbi.h
··· 152 152 int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev, 153 153 const struct drm_simple_display_pipe_funcs *funcs, 154 154 const struct drm_display_mode *mode, unsigned int rotation); 155 - void mipi_dbi_release(struct drm_device *drm); 156 155 void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe, 157 156 struct drm_plane_state *old_state); 158 157 void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev, ··· 169 170 170 171 int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val); 171 172 int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len); 172 - int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len); 173 + int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data, 174 + size_t len); 173 175 int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, 174 176 struct drm_rect *clip, bool swap); 175 177 /** ··· 187 187 */ 188 188 #define mipi_dbi_command(dbi, cmd, seq...) \ 189 189 ({ \ 190 - u8 d[] = { seq }; \ 190 + const u8 d[] = { seq }; \ 191 191 mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \ 192 192 }) 193 193 194 194 #ifdef CONFIG_DEBUG_FS 195 - int mipi_dbi_debugfs_init(struct drm_minor *minor); 195 + void mipi_dbi_debugfs_init(struct drm_minor *minor); 196 196 #else 197 197 #define mipi_dbi_debugfs_init NULL 198 198 #endif
+17 -1
include/drm/drm_mode_config.h
··· 929 929 const struct drm_mode_config_helper_funcs *helper_private; 930 930 }; 931 931 932 - void drm_mode_config_init(struct drm_device *dev); 932 + int __must_check drmm_mode_config_init(struct drm_device *dev); 933 + 934 + /** 935 + * drm_mode_config_init - DRM mode_configuration structure initialization 936 + * @dev: DRM device 937 + * 938 + * This is the unmanaged version of drmm_mode_config_init() for drivers which 939 + * still explicitly call drm_mode_config_cleanup(). 940 + * 941 + * FIXME: This function is deprecated and drivers should be converted over to 942 + * drmm_mode_config_init(). 943 + */ 944 + static inline int drm_mode_config_init(struct drm_device *dev) 945 + { 946 + return drmm_mode_config_init(dev); 947 + } 948 + 933 949 void drm_mode_config_reset(struct drm_device *dev); 934 950 void drm_mode_config_cleanup(struct drm_device *dev); 935 951
+27
include/drm/drm_modeset_helper_vtables.h
··· 1075 1075 void (*atomic_commit)(struct drm_connector *connector, 1076 1076 struct drm_connector_state *state); 1077 1077 1078 + /** 1079 + * @prepare_writeback_job: 1080 + * 1081 + * As writeback jobs contain a framebuffer, drivers may need to 1082 + * prepare and clean them up the same way they can prepare and 1083 + * clean up framebuffers for planes. This optional connector operation 1084 + * is used to support the preparation of writeback jobs. The job 1085 + * prepare operation is called from drm_atomic_helper_prepare_planes() 1086 + * for struct &drm_writeback_connector connectors only. 1087 + * 1088 + * This operation is optional. 1089 + * 1090 + * This callback is used by the atomic modeset helpers. 1091 + */ 1078 1092 int (*prepare_writeback_job)(struct drm_writeback_connector *connector, 1079 1093 struct drm_writeback_job *job); 1094 + /** 1095 + * @cleanup_writeback_job: 1096 + * 1097 + * This optional connector operation is used to support the 1098 + * cleanup of writeback jobs. The job cleanup operation is called 1099 + * from the existing drm_writeback_cleanup_job() function, invoked 1100 + * both when destroying the job as part of an aborted commit, or when 1101 + * the job completes. 1102 + * 1103 + * This operation is optional. 1104 + * 1105 + * This callback is used by the atomic modeset helpers. 1106 + */ 1080 1107 void (*cleanup_writeback_job)(struct drm_writeback_connector *connector, 1081 1108 struct drm_writeback_job *job); 1082 1109 };
-63
include/drm/drm_pci.h
··· 1 - /* 2 - * Internal Header for the Direct Rendering Manager 3 - * 4 - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 5 - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 - * Copyright (c) 2009-2010, Code Aurora Forum. 7 - * All rights reserved. 8 - * 9 - * Author: Rickard E. (Rik) Faith <faith@valinux.com> 10 - * Author: Gareth Hughes <gareth@valinux.com> 11 - * 12 - * Permission is hereby granted, free of charge, to any person obtaining a 13 - * copy of this software and associated documentation files (the "Software"), 14 - * to deal in the Software without restriction, including without limitation 15 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 16 - * and/or sell copies of the Software, and to permit persons to whom the 17 - * Software is furnished to do so, subject to the following conditions: 18 - * 19 - * The above copyright notice and this permission notice (including the next 20 - * paragraph) shall be included in all copies or substantial portions of the 21 - * Software. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 27 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 28 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 29 - * OTHER DEALINGS IN THE SOFTWARE. 30 - */ 31 - 32 - #ifndef _DRM_PCI_H_ 33 - #define _DRM_PCI_H_ 34 - 35 - #include <linux/pci.h> 36 - 37 - struct drm_dma_handle; 38 - struct drm_device; 39 - struct drm_driver; 40 - struct drm_master; 41 - 42 - #ifdef CONFIG_PCI 43 - 44 - struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, 45 - size_t align); 46 - void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah); 47 - 48 - #else 49 - 50 - static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, 51 - size_t size, size_t align) 52 - { 53 - return NULL; 54 - } 55 - 56 - static inline void drm_pci_free(struct drm_device *dev, 57 - struct drm_dma_handle *dmah) 58 - { 59 - } 60 - 61 - #endif 62 - 63 - #endif /* _DRM_PCI_H_ */
+6
include/drm/drm_print.h
··· 313 313 * @DRM_UT_DP: Used in the DP code. 314 314 */ 315 315 DRM_UT_DP = 0x100, 316 + /** 317 + * @DRM_UT_DRMRES: Used in the drm managed resources code. 318 + */ 319 + DRM_UT_DRMRES = 0x200, 316 320 }; 317 321 318 322 static inline bool drm_debug_enabled(enum drm_debug_category category) ··· 446 442 drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__) 447 443 #define drm_dbg_dp(drm, fmt, ...) \ 448 444 drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__) 445 + #define drm_dbg_drmres(drm, fmt, ...) \ 446 + drm_dev_dbg((drm)->dev, DRM_UT_DRMRES, fmt, ##__VA_ARGS__) 449 447 450 448 451 449 /*
+9
include/drm/drm_writeback.h
··· 15 15 #include <drm/drm_encoder.h> 16 16 #include <linux/workqueue.h> 17 17 18 + /** 19 + * struct drm_writeback_connector - DRM writeback connector 20 + */ 18 21 struct drm_writeback_connector { 22 + /** 23 + * @base: base drm_connector object 24 + */ 19 25 struct drm_connector base; 20 26 21 27 /** ··· 84 78 char timeline_name[32]; 85 79 }; 86 80 81 + /** 82 + * struct drm_writeback_job - DRM writeback job 83 + */ 87 84 struct drm_writeback_job { 88 85 /** 89 86 * @connector:
+1
include/drm/gpu_scheduler.h
··· 56 56 * Jobs from this entity can be scheduled on any scheduler 57 57 * on this list. 58 58 * @num_sched_list: number of drm_gpu_schedulers in the sched_list. 59 + * @priority: priority of the entity 59 60 * @rq_lock: lock to modify the runqueue to which this entity belongs. 60 61 * @job_queue: the list of jobs of this entity. 61 62 * @fence_seq: a linearly increasing seqno incremented with each
-31
include/drm/ttm/ttm_debug.h
··· 1 - /************************************************************************** 2 - * 3 - * Copyright (c) 2017 Advanced Micro Devices, Inc. 4 - * All Rights Reserved. 5 - * 6 - * Permission is hereby granted, free of charge, to any person obtaining a 7 - * copy of this software and associated documentation files (the 8 - * "Software"), to deal in the Software without restriction, including 9 - * without limitation the rights to use, copy, modify, merge, publish, 10 - * distribute, sub license, and/or sell copies of the Software, and to 11 - * permit persons to whom the Software is furnished to do so, subject to 12 - * the following conditions: 13 - * 14 - * The above copyright notice and this permission notice (including the 15 - * next paragraph) shall be included in all copies or substantial portions 16 - * of the Software. 17 - * 18 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 - * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 - * 26 - **************************************************************************/ 27 - /* 28 - * Authors: Tom St Denis <tom.stdenis@amd.com> 29 - */ 30 - extern void ttm_trace_dma_map(struct device *dev, struct ttm_dma_tt *tt); 31 - extern void ttm_trace_dma_unmap(struct device *dev, struct ttm_dma_tt *tt);
+10
include/linux/dma-buf.h
··· 335 335 */ 336 336 struct dma_buf_attach_ops { 337 337 /** 338 + * @allow_peer2peer: 339 + * 340 + * If this is set to true the importer must be able to handle peer 341 + * resources without struct pages. 342 + */ 343 + bool allow_peer2peer; 344 + 345 + /** 338 346 * @move_notify 339 347 * 340 348 * If this callback is provided the framework can avoid pinning the ··· 370 362 * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf. 371 363 * @sgt: cached mapping. 372 364 * @dir: direction of cached mapping. 365 + * @peer2peer: true if the importer can handle peer resources without pages. 373 366 * @priv: exporter specific attachment data. 374 367 * @importer_ops: importer operations for this attachment, if provided 375 368 * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held. ··· 391 382 struct list_head node; 392 383 struct sg_table *sgt; 393 384 enum dma_data_direction dir; 385 + bool peer2peer; 394 386 const struct dma_buf_attach_ops *importer_ops; 395 387 void *importer_priv; 396 388 void *priv;
+2
mm/slob.c
··· 524 524 { 525 525 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); 526 526 } 527 + EXPORT_SYMBOL(__kmalloc_track_caller); 527 528 528 529 #ifdef CONFIG_NUMA 529 530 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, ··· 532 531 { 533 532 return __do_kmalloc_node(size, gfp, node, caller); 534 533 } 534 + EXPORT_SYMBOL(__kmalloc_node_track_caller); 535 535 #endif 536 536 537 537 void kfree(const void *block)
+2
mm/slub.c
··· 4385 4385 4386 4386 return ret; 4387 4387 } 4388 + EXPORT_SYMBOL(__kmalloc_track_caller); 4388 4389 4389 4390 #ifdef CONFIG_NUMA 4390 4391 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ··· 4416 4415 4417 4416 return ret; 4418 4417 } 4418 + EXPORT_SYMBOL(__kmalloc_node_track_caller); 4419 4419 #endif 4420 4420 4421 4421 #ifdef CONFIG_SYSFS