Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-msm-next-2023-12-15' of https://gitlab.freedesktop.org/drm/msm into drm-next

Updates for v6.8:

Core:
- Add support for SDM670, SM8650
- Handle the CFG interconnect to fix the obscure hangs / timeouts
on register write
- Kconfig fix for QMP dependency
- DT schema fixes

DPU:
- Add support for SDM670, SM8650
- Enable SmartDMA on SM8350 and SM8450
- Correct UBWC settings for SC8280XP
- Fix catalog settings for SC8180X
- Actually make use of the version to switch between QSEED3/3LITE/4
scalers
- Use devres-managed and drm-managed allocations where appropriate
- misc other fixes
- Enabled YUV writeback on SC7280, SM8250
- Enabled writeback on SM8350, SM8450
- CRC fix when encoder is selected as the input source
- other misc fixes

MDP4:
- Use devres-managed and drm-managed allocations where appropriate
- flush vblank event on CRTC disable

MDP5:
- Use devres-managed and drm-managed allocations where appropriate

DP:
- Add support for SM8650
- Enable PM runtime support
- Merge msm-specific debugfs dir with the generic one
- Described DisplayPort on SM8150 in DeviceTree bindings
- Moved dp_display_get_next_bridge() to probe()

DSI:
- Add support for SM8650
- Enable PM runtime support

GPU/GEM:
- demote userspace triggerable warnings to debug
- add GEM object metadata UAPI
- move GPU devcoredumps to GPU device
- fix hangcheck to skip retired submits
- expose UBWC config to userspace
- fix a680 chip-id
- drm_exec conversion
- drm/ci: remove rebase-merge directory (to unblock CI)

[airlied: fix drm_exec/amd interaction]
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGs9auYqmo-7NSd9FsbNBCDf7aBevd=4xkcF3A5G_OGvMQ@mail.gmail.com

+3623 -2015
+2
Documentation/devicetree/bindings/display/msm/dp-controller.yaml
··· 26 26 - qcom,sc8280xp-edp 27 27 - qcom,sdm845-dp 28 28 - qcom,sm8350-dp 29 + - qcom,sm8650-dp 29 30 - items: 30 31 - enum: 32 + - qcom,sm8150-dp 31 33 - qcom,sm8250-dp 32 34 - qcom,sm8450-dp 33 35 - qcom,sm8550-dp
+3
Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
··· 25 25 - qcom,sc7180-dsi-ctrl 26 26 - qcom,sc7280-dsi-ctrl 27 27 - qcom,sdm660-dsi-ctrl 28 + - qcom,sdm670-dsi-ctrl 28 29 - qcom,sdm845-dsi-ctrl 29 30 - qcom,sm6115-dsi-ctrl 30 31 - qcom,sm6125-dsi-ctrl ··· 36 35 - qcom,sm8350-dsi-ctrl 37 36 - qcom,sm8450-dsi-ctrl 38 37 - qcom,sm8550-dsi-ctrl 38 + - qcom,sm8650-dsi-ctrl 39 39 - const: qcom,mdss-dsi-ctrl 40 40 - enum: 41 41 - qcom,dsi-ctrl-6g-qcm2290 ··· 335 333 - qcom,sm8350-dsi-ctrl 336 334 - qcom,sm8450-dsi-ctrl 337 335 - qcom,sm8550-dsi-ctrl 336 + - qcom,sm8650-dsi-ctrl 338 337 then: 339 338 properties: 340 339 clocks:
+1
Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
··· 22 22 - qcom,sm8350-dsi-phy-5nm 23 23 - qcom,sm8450-dsi-phy-5nm 24 24 - qcom,sm8550-dsi-phy-4nm 25 + - qcom,sm8650-dsi-phy-4nm 25 26 26 27 reg: 27 28 items:
+14 -4
Documentation/devicetree/bindings/display/msm/mdss-common.yaml
··· 61 61 62 62 ranges: true 63 63 64 + # This is not a perfect description, but it's impossible to discern and match 65 + # the entries like we do with interconnect-names 64 66 interconnects: 65 67 minItems: 1 66 68 items: 67 69 - description: Interconnect path from mdp0 (or a single mdp) port to the data bus 68 70 - description: Interconnect path from mdp1 port to the data bus 71 + - description: Interconnect path from CPU to the reg bus 69 72 70 73 interconnect-names: 71 - minItems: 1 72 - items: 73 - - const: mdp0-mem 74 - - const: mdp1-mem 74 + oneOf: 75 + - minItems: 1 76 + items: 77 + - const: mdp0-mem 78 + - const: cpu-cfg 79 + 80 + - minItems: 2 81 + items: 82 + - const: mdp0-mem 83 + - const: mdp1-mem 84 + - const: cpu-cfg 75 85 76 86 resets: 77 87 items:
+15 -6
Documentation/devicetree/bindings/display/msm/qcom,qcm2290-mdss.yaml
··· 36 36 maxItems: 2 37 37 38 38 interconnects: 39 - maxItems: 1 39 + items: 40 + - description: Interconnect path from mdp0 port to the data bus 41 + - description: Interconnect path from CPU to the reg bus 40 42 41 43 interconnect-names: 42 - maxItems: 1 44 + items: 45 + - const: mdp0-mem 46 + - const: cpu-cfg 43 47 44 48 patternProperties: 45 49 "^display-controller@[0-9a-f]+$": ··· 60 56 61 57 properties: 62 58 compatible: 63 - const: qcom,dsi-ctrl-6g-qcm2290 59 + items: 60 + - const: qcom,qcm2290-dsi-ctrl 61 + - const: qcom,mdss-dsi-ctrl 64 62 65 63 "^phy@[0-9a-f]+$": 66 64 type: object ··· 102 96 interrupt-controller; 103 97 #interrupt-cells = <1>; 104 98 105 - interconnects = <&mmrt_virt MASTER_MDP0 &bimc SLAVE_EBI1>; 106 - interconnect-names = "mdp0-mem"; 99 + interconnects = <&mmrt_virt MASTER_MDP0 &bimc SLAVE_EBI1>, 100 + <&bimc MASTER_APPSS_PROC &config_noc SLAVE_DISPLAY_CFG>; 101 + interconnect-names = "mdp0-mem", 102 + "cpu-cfg"; 107 103 108 104 iommus = <&apps_smmu 0x420 0x2>, 109 105 <&apps_smmu 0x421 0x0>; ··· 144 136 }; 145 137 146 138 dsi@5e94000 { 147 - compatible = "qcom,dsi-ctrl-6g-qcm2290"; 139 + compatible = "qcom,qcm2290-dsi-ctrl", 140 + "qcom,mdss-dsi-ctrl"; 148 141 reg = <0x05e94000 0x400>; 149 142 reg-names = "dsi_ctrl"; 150 143
+10 -4
Documentation/devicetree/bindings/display/msm/qcom,sc7180-mdss.yaml
··· 36 36 maxItems: 1 37 37 38 38 interconnects: 39 - maxItems: 1 39 + items: 40 + - description: Interconnect path from mdp0 port to the data bus 41 + - description: Interconnect path from CPU to the reg bus 40 42 41 43 interconnect-names: 42 - maxItems: 1 44 + items: 45 + - const: mdp0-mem 46 + - const: cpu-cfg 43 47 44 48 patternProperties: 45 49 "^display-controller@[0-9a-f]+$": ··· 110 106 interrupt-controller; 111 107 #interrupt-cells = <1>; 112 108 113 - interconnects = <&mmss_noc MASTER_MDP0 &mc_virt SLAVE_EBI1>; 114 - interconnect-names = "mdp0-mem"; 109 + interconnects = <&mmss_noc MASTER_MDP0 &mc_virt SLAVE_EBI1>, 110 + <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_DISPLAY_CFG>; 111 + interconnect-names = "mdp0-mem", 112 + "cpu-cfg"; 115 113 116 114 iommus = <&apps_smmu 0x800 0x2>; 117 115 ranges;
+10 -4
Documentation/devicetree/bindings/display/msm/qcom,sc7280-mdss.yaml
··· 36 36 maxItems: 1 37 37 38 38 interconnects: 39 - maxItems: 1 39 + items: 40 + - description: Interconnect path from mdp0 port to the data bus 41 + - description: Interconnect path from CPU to the reg bus 40 42 41 43 interconnect-names: 42 - maxItems: 1 44 + items: 45 + - const: mdp0-mem 46 + - const: cpu-cfg 43 47 44 48 patternProperties: 45 49 "^display-controller@[0-9a-f]+$": ··· 122 118 interrupt-controller; 123 119 #interrupt-cells = <1>; 124 120 125 - interconnects = <&mmss_noc MASTER_MDP0 &mc_virt SLAVE_EBI1>; 126 - interconnect-names = "mdp0-mem"; 121 + interconnects = <&mmss_noc MASTER_MDP0 &mc_virt SLAVE_EBI1>, 122 + <&gem_noc MASTER_APPSS_PROC &cnoc2 SLAVE_DISPLAY_CFG>; 123 + interconnect-names = "mdp0-mem", 124 + "cpu-cfg"; 127 125 128 126 iommus = <&apps_smmu 0x900 0x402>; 129 127 ranges;
+292
Documentation/devicetree/bindings/display/msm/qcom,sdm670-mdss.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/msm/qcom,sdm670-mdss.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Qualcomm SDM670 Display MDSS 8 + 9 + maintainers: 10 + - Richard Acayan <mailingradian@gmail.com> 11 + 12 + description: 13 + SDM670 MSM Mobile Display Subsystem (MDSS), which encapsulates sub-blocks 14 + like DPU display controller, DSI and DP interfaces etc. 15 + 16 + $ref: /schemas/display/msm/mdss-common.yaml# 17 + 18 + properties: 19 + compatible: 20 + const: qcom,sdm670-mdss 21 + 22 + clocks: 23 + items: 24 + - description: Display AHB clock from gcc 25 + - description: Display core clock 26 + 27 + clock-names: 28 + items: 29 + - const: iface 30 + - const: core 31 + 32 + iommus: 33 + maxItems: 2 34 + 35 + interconnects: 36 + maxItems: 2 37 + 38 + interconnect-names: 39 + maxItems: 2 40 + 41 + patternProperties: 42 + "^display-controller@[0-9a-f]+$": 43 + type: object 44 + additionalProperties: true 45 + 46 + properties: 47 + compatible: 48 + const: qcom,sdm670-dpu 49 + 50 + "^displayport-controller@[0-9a-f]+$": 51 + type: object 52 + additionalProperties: true 53 + 54 + properties: 55 + compatible: 56 + const: qcom,sdm670-dp 57 + 58 + "^dsi@[0-9a-f]+$": 59 + type: object 60 + additionalProperties: true 61 + 62 + properties: 63 + compatible: 64 + contains: 65 + const: qcom,sdm670-dsi-ctrl 66 + 67 + "^phy@[0-9a-f]+$": 68 + type: object 69 + additionalProperties: true 70 + 71 + properties: 72 + compatible: 73 + const: qcom,dsi-phy-10nm 74 + 75 + required: 76 + - compatible 77 + 78 + unevaluatedProperties: false 79 + 80 + examples: 81 + - | 82 + #include <dt-bindings/clock/qcom,dispcc-sdm845.h> 83 + #include <dt-bindings/clock/qcom,gcc-sdm845.h> 84 + #include <dt-bindings/clock/qcom,rpmh.h> 85 + #include <dt-bindings/interconnect/qcom,sdm670-rpmh.h> 86 + #include <dt-bindings/interrupt-controller/arm-gic.h> 87 + #include <dt-bindings/power/qcom-rpmpd.h> 88 + 89 + display-subsystem@ae00000 { 90 + compatible = "qcom,sdm670-mdss"; 91 + reg = <0x0ae00000 0x1000>; 92 + reg-names = "mdss"; 93 + power-domains = <&dispcc MDSS_GDSC>; 94 + 95 + clocks = <&gcc GCC_DISP_AHB_CLK>, 96 + <&dispcc DISP_CC_MDSS_MDP_CLK>; 97 + clock-names = "iface", "core"; 98 + 99 + interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; 100 + interrupt-controller; 101 + #interrupt-cells = <1>; 102 + 103 + interconnects = <&mmss_noc MASTER_MDP_PORT0 0 &mem_noc SLAVE_EBI_CH0 0>, 104 + <&mmss_noc MASTER_MDP_PORT1 0 &mem_noc SLAVE_EBI_CH0 0>; 105 + interconnect-names = "mdp0-mem", "mdp1-mem"; 106 + 107 + iommus = <&apps_smmu 0x880 0x8>, 108 + <&apps_smmu 0xc80 0x8>; 109 + 110 + #address-cells = <1>; 111 + #size-cells = <1>; 112 + ranges; 113 + 114 + display-controller@ae01000 { 115 + compatible = "qcom,sdm670-dpu"; 116 + reg = <0x0ae01000 0x8f000>, 117 + <0x0aeb0000 0x2008>; 118 + reg-names = "mdp", "vbif"; 119 + 120 + clocks = <&gcc GCC_DISP_AXI_CLK>, 121 + <&dispcc DISP_CC_MDSS_AHB_CLK>, 122 + <&dispcc DISP_CC_MDSS_AXI_CLK>, 123 + <&dispcc DISP_CC_MDSS_MDP_CLK>, 124 + <&dispcc DISP_CC_MDSS_VSYNC_CLK>; 125 + clock-names = "gcc-bus", "iface", "bus", "core", "vsync"; 126 + 127 + interrupt-parent = <&mdss>; 128 + interrupts = <0>; 129 + power-domains = <&rpmhpd SDM670_CX>; 130 + operating-points-v2 = <&mdp_opp_table>; 131 + 132 + ports { 133 + #address-cells = <1>; 134 + #size-cells = <0>; 135 + 136 + port@0 { 137 + reg = <0>; 138 + dpu_intf1_out: endpoint { 139 + remote-endpoint = <&mdss_dsi0_in>; 140 + }; 141 + }; 142 + 143 + port@1 { 144 + reg = <1>; 145 + dpu_intf2_out: endpoint { 146 + remote-endpoint = <&mdss_dsi1_in>; 147 + }; 148 + }; 149 + }; 150 + }; 151 + 152 + dsi@ae94000 { 153 + compatible = "qcom,sdm670-dsi-ctrl", "qcom,mdss-dsi-ctrl"; 154 + reg = <0x0ae94000 0x400>; 155 + reg-names = "dsi_ctrl"; 156 + 157 + interrupt-parent = <&mdss>; 158 + interrupts = <4>; 159 + 160 + clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>, 161 + <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>, 162 + <&dispcc DISP_CC_MDSS_PCLK0_CLK>, 163 + <&dispcc DISP_CC_MDSS_ESC0_CLK>, 164 + <&dispcc DISP_CC_MDSS_AHB_CLK>, 165 + <&dispcc DISP_CC_MDSS_AXI_CLK>; 166 + clock-names = "byte", 167 + "byte_intf", 168 + "pixel", 169 + "core", 170 + "iface", 171 + "bus"; 172 + assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, 173 + <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>; 174 + assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>; 175 + 176 + operating-points-v2 = <&dsi_opp_table>; 177 + power-domains = <&rpmhpd SDM670_CX>; 178 + 179 + phys = <&mdss_dsi0_phy>; 180 + phy-names = "dsi"; 181 + 182 + #address-cells = <1>; 183 + #size-cells = <0>; 184 + 185 + ports { 186 + #address-cells = <1>; 187 + #size-cells = <0>; 188 + 189 + port@0 { 190 + reg = <0>; 191 + mdss_dsi0_in: endpoint { 192 + remote-endpoint = <&dpu_intf1_out>; 193 + }; 194 + }; 195 + 196 + port@1 { 197 + reg = <1>; 198 + mdss_dsi0_out: endpoint { 199 + }; 200 + }; 201 + }; 202 + }; 203 + 204 + mdss_dsi0_phy: phy@ae94400 { 205 + compatible = "qcom,dsi-phy-10nm"; 206 + reg = <0x0ae94400 0x200>, 207 + <0x0ae94600 0x280>, 208 + <0x0ae94a00 0x1e0>; 209 + reg-names = "dsi_phy", 210 + "dsi_phy_lane", 211 + "dsi_pll"; 212 + 213 + #clock-cells = <1>; 214 + #phy-cells = <0>; 215 + 216 + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, 217 + <&rpmhcc RPMH_CXO_CLK>; 218 + clock-names = "iface", "ref"; 219 + vdds-supply = <&vreg_dsi_phy>; 220 + }; 221 + 222 + dsi@ae96000 { 223 + compatible = "qcom,sdm670-dsi-ctrl", "qcom,mdss-dsi-ctrl"; 224 + reg = <0x0ae96000 0x400>; 225 + reg-names = "dsi_ctrl"; 226 + 227 + interrupt-parent = <&mdss>; 228 + interrupts = <5>; 229 + 230 + clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK>, 231 + <&dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>, 232 + <&dispcc DISP_CC_MDSS_PCLK1_CLK>, 233 + <&dispcc DISP_CC_MDSS_ESC1_CLK>, 234 + <&dispcc DISP_CC_MDSS_AHB_CLK>, 235 + <&dispcc DISP_CC_MDSS_AXI_CLK>; 236 + clock-names = "byte", 237 + "byte_intf", 238 + "pixel", 239 + "core", 240 + "iface", 241 + "bus"; 242 + assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>, 243 + <&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>; 244 + assigned-clock-parents = <&dsi1_phy 0>, <&dsi1_phy 1>; 245 + 246 + operating-points-v2 = <&dsi_opp_table>; 247 + power-domains = <&rpmhpd SDM670_CX>; 248 + 249 + phys = <&dsi1_phy>; 250 + phy-names = "dsi"; 251 + 252 + #address-cells = <1>; 253 + #size-cells = <0>; 254 + 255 + ports { 256 + #address-cells = <1>; 257 + #size-cells = <0>; 258 + 259 + port@0 { 260 + reg = <0>; 261 + mdss_dsi1_in: endpoint { 262 + remote-endpoint = <&dpu_intf2_out>; 263 + }; 264 + }; 265 + 266 + port@1 { 267 + reg = <1>; 268 + mdss_dsi1_out: endpoint { 269 + }; 270 + }; 271 + }; 272 + }; 273 + 274 + mdss_dsi1_phy: phy@ae96400 { 275 + compatible = "qcom,dsi-phy-10nm"; 276 + reg = <0x0ae96400 0x200>, 277 + <0x0ae96600 0x280>, 278 + <0x0ae96a00 0x10e>; 279 + reg-names = "dsi_phy", 280 + "dsi_phy_lane", 281 + "dsi_pll"; 282 + 283 + #clock-cells = <1>; 284 + #phy-cells = <0>; 285 + 286 + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, 287 + <&rpmhcc RPMH_CXO_CLK>; 288 + clock-names = "iface", "ref"; 289 + vdds-supply = <&vreg_dsi_phy>; 290 + }; 291 + }; 292 + ...
+3 -1
Documentation/devicetree/bindings/display/msm/qcom,sdm845-dpu.yaml
··· 13 13 14 14 properties: 15 15 compatible: 16 - const: qcom,sdm845-dpu 16 + enum: 17 + - qcom,sdm670-dpu 18 + - qcom,sdm845-dpu 17 19 18 20 reg: 19 21 items:
+10
Documentation/devicetree/bindings/display/msm/qcom,sm6115-mdss.yaml
··· 29 29 iommus: 30 30 maxItems: 2 31 31 32 + interconnects: 33 + items: 34 + - description: Interconnect path from mdp0 port to the data bus 35 + - description: Interconnect path from CPU to the reg bus 36 + 37 + interconnect-names: 38 + items: 39 + - const: mdp0-mem 40 + - const: cpu-cfg 41 + 32 42 patternProperties: 33 43 "^display-controller@[0-9a-f]+$": 34 44 type: object
+6 -2
Documentation/devicetree/bindings/display/msm/qcom,sm6125-mdss.yaml
··· 35 35 maxItems: 1 36 36 37 37 interconnects: 38 - maxItems: 2 38 + items: 39 + - description: Interconnect path from mdp0 port to the data bus 40 + - description: Interconnect path from CPU to the reg bus 39 41 40 42 interconnect-names: 41 - maxItems: 2 43 + items: 44 + - const: mdp0-mem 45 + - const: cpu-cfg 42 46 43 47 patternProperties: 44 48 "^display-controller@[0-9a-f]+$":
+6 -2
Documentation/devicetree/bindings/display/msm/qcom,sm6350-mdss.yaml
··· 35 35 maxItems: 1 36 36 37 37 interconnects: 38 - maxItems: 2 38 + items: 39 + - description: Interconnect path from mdp0 port to the data bus 40 + - description: Interconnect path from CPU to the reg bus 39 41 40 42 interconnect-names: 41 - maxItems: 2 43 + items: 44 + - const: mdp0-mem 45 + - const: cpu-cfg 42 46 43 47 patternProperties: 44 48 "^display-controller@[0-9a-f]+$":
+6 -2
Documentation/devicetree/bindings/display/msm/qcom,sm6375-mdss.yaml
··· 35 35 maxItems: 1 36 36 37 37 interconnects: 38 - maxItems: 2 38 + items: 39 + - description: Interconnect path from mdp0 port to the data bus 40 + - description: Interconnect path from CPU to the reg bus 39 41 40 42 interconnect-names: 41 - maxItems: 2 43 + items: 44 + - const: mdp0-mem 45 + - const: cpu-cfg 42 46 43 47 patternProperties: 44 48 "^display-controller@[0-9a-f]+$":
+3 -3
Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml
··· 69 69 70 70 properties: 71 71 compatible: 72 - const: qcom,dsi-phy-7nm 72 + const: qcom,dsi-phy-7nm-8150 73 73 74 74 unevaluatedProperties: false 75 75 ··· 247 247 }; 248 248 249 249 dsi0_phy: phy@ae94400 { 250 - compatible = "qcom,dsi-phy-7nm"; 250 + compatible = "qcom,dsi-phy-7nm-8150"; 251 251 reg = <0x0ae94400 0x200>, 252 252 <0x0ae94600 0x280>, 253 253 <0x0ae94900 0x260>; ··· 318 318 }; 319 319 320 320 dsi1_phy: phy@ae96400 { 321 - compatible = "qcom,dsi-phy-7nm"; 321 + compatible = "qcom,dsi-phy-7nm-8150"; 322 322 reg = <0x0ae96400 0x200>, 323 323 <0x0ae96600 0x280>, 324 324 <0x0ae96900 0x260>;
+10
Documentation/devicetree/bindings/display/msm/qcom,sm8250-mdss.yaml
··· 52 52 compatible: 53 53 const: qcom,sm8250-dpu 54 54 55 + "^displayport-controller@[0-9a-f]+$": 56 + type: object 57 + additionalProperties: true 58 + 59 + properties: 60 + compatible: 61 + items: 62 + - const: qcom,sm8250-dp 63 + - const: qcom,sm8350-dp 64 + 55 65 "^dsi@[0-9a-f]+$": 56 66 type: object 57 67 additionalProperties: true
+8 -5
Documentation/devicetree/bindings/display/msm/qcom,sm8450-mdss.yaml
··· 30 30 maxItems: 1 31 31 32 32 interconnects: 33 - maxItems: 2 33 + maxItems: 3 34 34 35 35 interconnect-names: 36 - maxItems: 2 36 + maxItems: 3 37 37 38 38 patternProperties: 39 39 "^display-controller@[0-9a-f]+$": ··· 91 91 reg = <0x0ae00000 0x1000>; 92 92 reg-names = "mdss"; 93 93 94 - interconnects = <&mmss_noc MASTER_MDP_DISP 0 &mc_virt SLAVE_EBI1_DISP 0>, 95 - <&mmss_noc MASTER_MDP_DISP 0 &mc_virt SLAVE_EBI1_DISP 0>; 96 - interconnect-names = "mdp0-mem", "mdp1-mem"; 94 + interconnects = <&mmss_noc MASTER_MDP_DISP &mc_virt SLAVE_EBI1_DISP>, 95 + <&mmss_noc MASTER_MDP_DISP &mc_virt SLAVE_EBI1_DISP>, 96 + <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_DISPLAY_CFG>; 97 + interconnect-names = "mdp0-mem", 98 + "mdp1-mem", 99 + "cpu-cfg"; 97 100 98 101 resets = <&dispcc DISP_CC_MDSS_CORE_BCR>; 99 102
+127
Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/msm/qcom,sm8650-dpu.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Qualcomm SM8650 Display DPU 8 + 9 + maintainers: 10 + - Neil Armstrong <neil.armstrong@linaro.org> 11 + 12 + $ref: /schemas/display/msm/dpu-common.yaml# 13 + 14 + properties: 15 + compatible: 16 + const: qcom,sm8650-dpu 17 + 18 + reg: 19 + items: 20 + - description: Address offset and size for mdp register set 21 + - description: Address offset and size for vbif register set 22 + 23 + reg-names: 24 + items: 25 + - const: mdp 26 + - const: vbif 27 + 28 + clocks: 29 + items: 30 + - description: Display hf axi 31 + - description: Display MDSS ahb 32 + - description: Display lut 33 + - description: Display core 34 + - description: Display vsync 35 + 36 + clock-names: 37 + items: 38 + - const: nrt_bus 39 + - const: iface 40 + - const: lut 41 + - const: core 42 + - const: vsync 43 + 44 + required: 45 + - compatible 46 + - reg 47 + - reg-names 48 + - clocks 49 + - clock-names 50 + 51 + unevaluatedProperties: false 52 + 53 + examples: 54 + - | 55 + #include <dt-bindings/interrupt-controller/arm-gic.h> 56 + #include <dt-bindings/power/qcom,rpmhpd.h> 57 + 58 + display-controller@ae01000 { 59 + compatible = "qcom,sm8650-dpu"; 60 + reg = <0x0ae01000 0x8f000>, 61 + <0x0aeb0000 0x2008>; 62 + reg-names = "mdp", "vbif"; 63 + 64 + clocks = <&gcc_axi_clk>, 65 + <&dispcc_ahb_clk>, 66 + <&dispcc_mdp_lut_clk>, 67 + <&dispcc_mdp_clk>, 68 + <&dispcc_vsync_clk>; 69 + clock-names = "nrt_bus", 70 + "iface", 71 + "lut", 72 + "core", 73 + "vsync"; 74 + 75 + assigned-clocks = <&dispcc_vsync_clk>; 76 + assigned-clock-rates = <19200000>; 77 + 78 + operating-points-v2 = <&mdp_opp_table>; 79 + power-domains = <&rpmhpd RPMHPD_MMCX>; 80 + 81 + interrupt-parent = <&mdss>; 82 + interrupts = <0>; 83 + 84 + ports { 85 + #address-cells = <1>; 86 + #size-cells = <0>; 87 + 88 + port@0 { 89 + reg = <0>; 90 + dpu_intf1_out: endpoint { 91 + remote-endpoint = <&dsi0_in>; 92 + }; 93 + }; 94 + 95 + port@1 { 96 + reg = <1>; 97 + dpu_intf2_out: endpoint { 98 + remote-endpoint = <&dsi1_in>; 99 + }; 100 + }; 101 + }; 102 + 103 + mdp_opp_table: opp-table { 104 + compatible = "operating-points-v2"; 105 + 106 + opp-200000000 { 107 + opp-hz = /bits/ 64 <200000000>; 108 + required-opps = <&rpmhpd_opp_low_svs>; 109 + }; 110 + 111 + opp-325000000 { 112 + opp-hz = /bits/ 64 <325000000>; 113 + required-opps = <&rpmhpd_opp_svs>; 114 + }; 115 + 116 + opp-375000000 { 117 + opp-hz = /bits/ 64 <375000000>; 118 + required-opps = <&rpmhpd_opp_svs_l1>; 119 + }; 120 + 121 + opp-514000000 { 122 + opp-hz = /bits/ 64 <514000000>; 123 + required-opps = <&rpmhpd_opp_nom>; 124 + }; 125 + }; 126 + }; 127 + ...
+328
Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/msm/qcom,sm8650-mdss.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Qualcomm SM8650 Display MDSS 8 + 9 + maintainers: 10 + - Neil Armstrong <neil.armstrong@linaro.org> 11 + 12 + description: 13 + SM8650 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like 14 + DPU display controller, DSI and DP interfaces etc. 15 + 16 + $ref: /schemas/display/msm/mdss-common.yaml# 17 + 18 + properties: 19 + compatible: 20 + const: qcom,sm8650-mdss 21 + 22 + clocks: 23 + items: 24 + - description: Display AHB 25 + - description: Display hf AXI 26 + - description: Display core 27 + 28 + iommus: 29 + maxItems: 1 30 + 31 + interconnects: 32 + maxItems: 2 33 + 34 + interconnect-names: 35 + maxItems: 2 36 + 37 + patternProperties: 38 + "^display-controller@[0-9a-f]+$": 39 + type: object 40 + properties: 41 + compatible: 42 + const: qcom,sm8650-dpu 43 + 44 + "^displayport-controller@[0-9a-f]+$": 45 + type: object 46 + properties: 47 + compatible: 48 + const: qcom,sm8650-dp 49 + 50 + "^dsi@[0-9a-f]+$": 51 + type: object 52 + properties: 53 + compatible: 54 + items: 55 + - const: qcom,sm8650-dsi-ctrl 56 + - const: qcom,mdss-dsi-ctrl 57 + 58 + "^phy@[0-9a-f]+$": 59 + type: object 60 + properties: 61 + compatible: 62 + const: qcom,sm8650-dsi-phy-4nm 63 + 64 + required: 65 + - compatible 66 + 67 + unevaluatedProperties: false 68 + 69 + examples: 70 + - | 71 + #include <dt-bindings/clock/qcom,rpmh.h> 72 + #include <dt-bindings/interrupt-controller/arm-gic.h> 73 + #include <dt-bindings/power/qcom,rpmhpd.h> 74 + 75 + display-subsystem@ae00000 { 76 + compatible = "qcom,sm8650-mdss"; 77 + reg = <0x0ae00000 0x1000>; 78 + reg-names = "mdss"; 79 + 80 + resets = <&dispcc_core_bcr>; 81 + 82 + power-domains = <&dispcc_gdsc>; 83 + 84 + clocks = <&gcc_ahb_clk>, 85 + <&gcc_axi_clk>, 86 + <&dispcc_mdp_clk>; 87 + clock-names = "bus", "nrt_bus", "core"; 88 + 89 + interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; 90 + interrupt-controller; 91 + #interrupt-cells = <1>; 92 + 93 + iommus = <&apps_smmu 0x1c00 0x2>; 94 + 95 + #address-cells = <1>; 96 + #size-cells = <1>; 97 + ranges; 98 + 99 + display-controller@ae01000 { 100 + compatible = "qcom,sm8650-dpu"; 101 + reg = <0x0ae01000 0x8f000>, 102 + <0x0aeb0000 0x2008>; 103 + reg-names = "mdp", "vbif"; 104 + 105 + clocks = <&gcc_axi_clk>, 106 + <&dispcc_ahb_clk>, 107 + <&dispcc_mdp_lut_clk>, 108 + <&dispcc_mdp_clk>, 109 + <&dispcc_mdp_vsync_clk>; 110 + clock-names = "nrt_bus", 111 + "iface", 112 + "lut", 113 + "core", 114 + "vsync"; 115 + 116 + assigned-clocks = <&dispcc_mdp_vsync_clk>; 117 + assigned-clock-rates = <19200000>; 118 + 119 + operating-points-v2 = <&mdp_opp_table>; 120 + power-domains = <&rpmhpd RPMHPD_MMCX>; 121 + 122 + interrupt-parent = <&mdss>; 123 + interrupts = <0>; 124 + 125 + ports { 126 + #address-cells = <1>; 127 + #size-cells = <0>; 128 + 129 + port@0 { 130 + reg = <0>; 131 + dpu_intf1_out: endpoint { 132 + remote-endpoint = <&dsi0_in>; 133 + }; 134 + }; 135 + 136 + port@1 { 137 + reg = <1>; 138 + dpu_intf2_out: endpoint { 139 + remote-endpoint = <&dsi1_in>; 140 + }; 141 + }; 142 + }; 143 + 144 + mdp_opp_table: opp-table { 145 + compatible = "operating-points-v2"; 146 + 147 + opp-200000000 { 148 + opp-hz = /bits/ 64 <200000000>; 149 + required-opps = <&rpmhpd_opp_low_svs>; 150 + }; 151 + 152 + opp-325000000 { 153 + opp-hz = /bits/ 64 <325000000>; 154 + required-opps = <&rpmhpd_opp_svs>; 155 + }; 156 + 157 + opp-375000000 { 158 + opp-hz = /bits/ 64 <375000000>; 159 + required-opps = <&rpmhpd_opp_svs_l1>; 160 + }; 161 + 162 + opp-514000000 { 163 + opp-hz = /bits/ 64 <514000000>; 164 + required-opps = <&rpmhpd_opp_nom>; 165 + }; 166 + }; 167 + }; 168 + 169 + dsi@ae94000 { 170 + compatible = "qcom,sm8650-dsi-ctrl", "qcom,mdss-dsi-ctrl"; 171 + reg = <0x0ae94000 0x400>; 172 + reg-names = "dsi_ctrl"; 173 + 174 + interrupt-parent = <&mdss>; 175 + interrupts = <4>; 176 + 177 + clocks = <&dispc_byte_clk>, 178 + <&dispcc_intf_clk>, 179 + <&dispcc_pclk>, 180 + <&dispcc_esc_clk>, 181 + <&dispcc_ahb_clk>, 182 + <&gcc_bus_clk>; 183 + clock-names = "byte", 184 + "byte_intf", 185 + "pixel", 186 + "core", 187 + "iface", 188 + "bus"; 189 + 190 + assigned-clocks = <&dispcc_byte_clk>, 191 + <&dispcc_pclk>; 192 + assigned-clock-parents = <&dsi0_phy 0>, <&dsi0_phy 1>; 193 + 194 + operating-points-v2 = <&dsi_opp_table>; 195 + power-domains = <&rpmhpd RPMHPD_MMCX>; 196 + 197 + phys = <&dsi0_phy>; 198 + phy-names = "dsi"; 199 + 200 + #address-cells = <1>; 201 + #size-cells = <0>; 202 + 203 + ports { 204 + #address-cells = <1>; 205 + #size-cells = <0>; 206 + 207 + port@0 { 208 + reg = <0>; 209 + dsi0_in: endpoint { 210 + remote-endpoint = <&dpu_intf1_out>; 211 + }; 212 + }; 213 + 214 + port@1 { 215 + reg = <1>; 216 + dsi0_out: endpoint { 217 + }; 218 + }; 219 + }; 220 + 221 + dsi_opp_table: opp-table { 222 + compatible = "operating-points-v2"; 223 + 224 + opp-187500000 { 225 + opp-hz = /bits/ 64 <187500000>; 226 + required-opps = <&rpmhpd_opp_low_svs>; 227 + }; 228 + 229 + opp-300000000 { 230 + opp-hz = /bits/ 64 <300000000>; 231 + required-opps = <&rpmhpd_opp_svs>; 232 + }; 233 + 234 + opp-358000000 { 235 + opp-hz = /bits/ 64 <358000000>; 236 + required-opps = <&rpmhpd_opp_svs_l1>; 237 + }; 238 + }; 239 + }; 240 + 241 + dsi0_phy: phy@ae94400 { 242 + compatible = "qcom,sm8650-dsi-phy-4nm"; 243 + reg = <0x0ae95000 0x200>, 244 + <0x0ae95200 0x280>, 245 + <0x0ae95500 0x400>; 246 + reg-names = "dsi_phy", 247 + "dsi_phy_lane", 248 + "dsi_pll"; 249 + 250 + #clock-cells = <1>; 251 + #phy-cells = <0>; 252 + 253 + clocks = <&dispcc_iface_clk>, 254 + <&rpmhcc_ref_clk>; 255 + clock-names = "iface", "ref"; 256 + }; 257 + 258 + dsi@ae96000 { 259 + compatible = "qcom,sm8650-dsi-ctrl", "qcom,mdss-dsi-ctrl"; 260 + reg = <0x0ae96000 0x400>; 261 + reg-names = "dsi_ctrl"; 262 + 263 + interrupt-parent = <&mdss>; 264 + interrupts = <5>; 265 + 266 + clocks = <&dispc_byte_clk>, 267 + <&dispcc_intf_clk>, 268 + <&dispcc_pclk>, 269 + <&dispcc_esc_clk>, 270 + <&dispcc_ahb_clk>, 271 + <&gcc_bus_clk>; 272 + clock-names = "byte", 273 + "byte_intf", 274 + "pixel", 275 + "core", 276 + "iface", 277 + "bus"; 278 + 279 + assigned-clocks = <&dispcc_byte_clk>, 280 + <&dispcc_pclk>; 281 + assigned-clock-parents = <&dsi1_phy 0>, <&dsi1_phy 1>; 282 + 283 + operating-points-v2 = <&dsi_opp_table>; 284 + power-domains = <&rpmhpd RPMHPD_MMCX>; 285 + 286 + phys = <&dsi1_phy>; 287 + phy-names = "dsi"; 288 + 289 + #address-cells = <1>; 290 + #size-cells = <0>; 291 + 292 + ports { 293 + #address-cells = <1>; 294 + #size-cells = <0>; 295 + 296 + port@0 { 297 + reg = <0>; 298 + dsi1_in: endpoint { 299 + remote-endpoint = <&dpu_intf2_out>; 300 + }; 301 + }; 302 + 303 + port@1 { 304 + reg = <1>; 305 + dsi1_out: endpoint { 306 + }; 307 + }; 308 + }; 309 + }; 310 + 311 + dsi1_phy: phy@ae96400 { 312 + compatible = "qcom,sm8650-dsi-phy-4nm"; 313 + reg = <0x0ae97000 0x200>, 314 + <0x0ae97200 0x280>, 315 + <0x0ae97500 0x400>; 316 + reg-names = "dsi_phy", 317 + "dsi_phy_lane", 318 + "dsi_pll"; 319 + 320 + #clock-cells = <1>; 321 + #phy-cells = <0>; 322 + 323 + clocks = <&dispcc_iface_clk>, 324 + <&rpmhcc_ref_clk>; 325 + clock-names = "iface", "ref"; 326 + }; 327 + }; 328 + ...
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 1147 1147 1148 1148 ctx->n_vms = 1; 1149 1149 ctx->sync = &mem->sync; 1150 - drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 1150 + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 1151 1151 drm_exec_until_all_locked(&ctx->exec) { 1152 1152 ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); 1153 1153 drm_exec_retry_on_contention(&ctx->exec); ··· 1186 1186 int ret; 1187 1187 1188 1188 ctx->sync = &mem->sync; 1189 - drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 1189 + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 1190 1190 drm_exec_until_all_locked(&ctx->exec) { 1191 1191 ctx->n_vms = 0; 1192 1192 list_for_each_entry(entry, &mem->attachments, list) { ··· 2595 2595 2596 2596 amdgpu_sync_create(&sync); 2597 2597 2598 - drm_exec_init(&exec, 0); 2598 + drm_exec_init(&exec, 0, 0); 2599 2599 /* Reserve all BOs and page tables for validation */ 2600 2600 drm_exec_until_all_locked(&exec) { 2601 2601 /* Reserve all the page directories */ ··· 2853 2853 2854 2854 mutex_lock(&process_info->lock); 2855 2855 2856 - drm_exec_init(&exec, 0); 2856 + drm_exec_init(&exec, 0, 0); 2857 2857 drm_exec_until_all_locked(&exec) { 2858 2858 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2859 2859 vm_list_node) {
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 66 66 67 67 amdgpu_sync_create(&p->sync); 68 68 drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT | 69 - DRM_EXEC_IGNORE_DUPLICATES); 69 + DRM_EXEC_IGNORE_DUPLICATES, 0); 70 70 return 0; 71 71 } 72 72
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
··· 70 70 struct drm_exec exec; 71 71 int r; 72 72 73 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 73 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 74 74 drm_exec_until_all_locked(&exec) { 75 75 r = amdgpu_vm_lock_pd(vm, &exec, 0); 76 76 if (likely(!r)) ··· 110 110 struct drm_exec exec; 111 111 int r; 112 112 113 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 113 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 114 114 drm_exec_until_all_locked(&exec) { 115 115 r = amdgpu_vm_lock_pd(vm, &exec, 0); 116 116 if (likely(!r))
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 203 203 struct drm_exec exec; 204 204 long r; 205 205 206 - drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); 206 + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 207 207 drm_exec_until_all_locked(&exec) { 208 208 r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); 209 209 drm_exec_retry_on_contention(&exec); ··· 739 739 } 740 740 741 741 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | 742 - DRM_EXEC_IGNORE_DUPLICATES); 742 + DRM_EXEC_IGNORE_DUPLICATES, 0); 743 743 drm_exec_until_all_locked(&exec) { 744 744 if (gobj) { 745 745 r = drm_exec_lock_obj(&exec, gobj);
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
··· 1183 1183 1184 1184 amdgpu_sync_create(&sync); 1185 1185 1186 - drm_exec_init(&exec, 0); 1186 + drm_exec_init(&exec, 0, 0); 1187 1187 drm_exec_until_all_locked(&exec) { 1188 1188 r = drm_exec_lock_obj(&exec, 1189 1189 &ctx_data->meta_data_obj->tbo.base); ··· 1254 1254 struct drm_exec exec; 1255 1255 long r; 1256 1256 1257 - drm_exec_init(&exec, 0); 1257 + drm_exec_init(&exec, 0, 0); 1258 1258 drm_exec_until_all_locked(&exec) { 1259 1259 r = drm_exec_lock_obj(&exec, 1260 1260 &ctx_data->meta_data_obj->tbo.base);
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
··· 61 61 if (!bo) 62 62 return -EINVAL; 63 63 64 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 64 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 65 65 drm_exec_until_all_locked(&exec) { 66 66 r = amdgpu_vm_lock_pd(vm, &exec, 0); 67 67 if (likely(!r)) ··· 122 122 123 123 vm = &fpriv->vm; 124 124 125 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 125 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 126 126 drm_exec_until_all_locked(&exec) { 127 127 r = amdgpu_vm_lock_pd(vm, &exec, 0); 128 128 if (likely(!r))
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
··· 86 86 87 87 amdgpu_sync_create(&sync); 88 88 89 - drm_exec_init(&exec, 0); 89 + drm_exec_init(&exec, 0, 0); 90 90 drm_exec_until_all_locked(&exec) { 91 91 r = drm_exec_lock_obj(&exec, &bo->tbo.base); 92 92 drm_exec_retry_on_contention(&exec); ··· 149 149 struct drm_exec exec; 150 150 long r; 151 151 152 - drm_exec_init(&exec, 0); 152 + drm_exec_init(&exec, 0, 0); 153 153 drm_exec_until_all_locked(&exec) { 154 154 r = drm_exec_lock_obj(&exec, &bo->tbo.base); 155 155 drm_exec_retry_on_contention(&exec);
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 1481 1481 uint32_t gpuidx; 1482 1482 int r; 1483 1483 1484 - drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0); 1484 + drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0); 1485 1485 drm_exec_until_all_locked(&ctx->exec) { 1486 1486 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { 1487 1487 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
+3
drivers/gpu/drm/ci/build.sh
··· 58 58 git config --global user.name "freedesktop.org CI" 59 59 git config --global pull.rebase true 60 60 61 + # cleanup git state on the worker 62 + rm -rf .git/rebase-merge 63 + 61 64 # Try to merge fixes from target repo 62 65 if [ "$(git ls-remote --exit-code --heads ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes)" ]; then 63 66 git pull ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes
+10 -3
drivers/gpu/drm/drm_exec.c
··· 69 69 * drm_exec_init - initialize a drm_exec object 70 70 * @exec: the drm_exec object to initialize 71 71 * @flags: controls locking behavior, see DRM_EXEC_* defines 72 + * @nr: the initial # of objects 72 73 * 73 74 * Initialize the object and make sure that we can track locked objects. 75 + * 76 + * If nr is non-zero then it is used as the initial objects table size. 77 + * In either case, the table will grow (be re-allocated) on demand. 74 78 */ 75 - void drm_exec_init(struct drm_exec *exec, uint32_t flags) 79 + void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr) 76 80 { 81 + if (!nr) 82 + nr = PAGE_SIZE / sizeof(void *); 83 + 77 84 exec->flags = flags; 78 - exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL); 85 + exec->objects = kvmalloc_array(nr, sizeof(void *), GFP_KERNEL); 79 86 80 87 /* If allocation here fails, just delay that till the first use */ 81 - exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0; 88 + exec->max_objects = exec->objects ? nr : 0; 82 89 exec->num_objects = 0; 83 90 exec->contended = DRM_EXEC_DUMMY; 84 91 exec->prelocked = NULL;
+2 -2
drivers/gpu/drm/drm_gpuvm.c
··· 1250 1250 unsigned int num_fences = vm_exec->num_fences; 1251 1251 int ret; 1252 1252 1253 - drm_exec_init(exec, vm_exec->flags); 1253 + drm_exec_init(exec, vm_exec->flags, 0); 1254 1254 1255 1255 drm_exec_until_all_locked(exec) { 1256 1256 ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences); ··· 1341 1341 struct drm_exec *exec = &vm_exec->exec; 1342 1342 int ret; 1343 1343 1344 - drm_exec_init(exec, vm_exec->flags); 1344 + drm_exec_init(exec, vm_exec->flags, 0); 1345 1345 1346 1346 drm_exec_until_all_locked(exec) { 1347 1347 ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range,
+1 -1
drivers/gpu/drm/imagination/pvr_job.c
··· 746 746 if (err) 747 747 goto out_job_data_cleanup; 748 748 749 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES); 749 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES, 0); 750 750 751 751 xa_init_flags(&signal_array, XA_FLAGS_ALLOC); 752 752
+2
drivers/gpu/drm/msm/Kconfig
··· 6 6 depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST 7 7 depends on COMMON_CLK 8 8 depends on IOMMU_SUPPORT 9 + depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n 9 10 depends on QCOM_OCMEM || QCOM_OCMEM=n 10 11 depends on QCOM_LLCC || QCOM_LLCC=n 11 12 depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n ··· 17 16 select DRM_DP_AUX_BUS 18 17 select DRM_DISPLAY_DP_HELPER 19 18 select DRM_DISPLAY_HELPER 19 + select DRM_EXEC 20 20 select DRM_KMS_HELPER 21 21 select DRM_PANEL 22 22 select DRM_BRIDGE
+1
drivers/gpu/drm/msm/Makefile
··· 63 63 disp/dpu1/dpu_encoder_phys_wb.o \ 64 64 disp/dpu1/dpu_formats.o \ 65 65 disp/dpu1/dpu_hw_catalog.o \ 66 + disp/dpu1/dpu_hw_cdm.o \ 66 67 disp/dpu1/dpu_hw_ctl.o \ 67 68 disp/dpu1/dpu_hw_dsc.o \ 68 69 disp/dpu1/dpu_hw_dsc_1_2.o \
+12 -9
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 684 684 { 685 685 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 686 686 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); 687 - u32 regbit; 687 + u32 hbb; 688 688 int ret; 689 689 690 690 gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); ··· 820 820 821 821 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); 822 822 823 - /* Set the highest bank bit */ 824 - if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu)) 825 - regbit = 2; 826 - else 827 - regbit = 1; 823 + BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13); 824 + hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13; 828 825 829 - gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7); 830 - gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1); 826 + gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, hbb << 7); 827 + gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, hbb << 1); 831 828 832 829 if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) || 833 830 adreno_is_a540(adreno_gpu)) 834 - gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit); 831 + gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, hbb); 835 832 836 833 /* Disable All flat shading optimization (ALLFLATOPTDIS) */ 837 834 gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10)); ··· 1781 1784 1782 1785 /* Set up the preemption specific bits and pieces for each ringbuffer */ 1783 1786 a5xx_preempt_init(gpu); 1787 + 1788 + /* Set the highest bank bit */ 1789 + if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu)) 1790 + adreno_gpu->ubwc_config.highest_bank_bit = 15; 1791 + else 1792 + adreno_gpu->ubwc_config.highest_bank_bit = 14; 1784 1793 1785 1794 return gpu; 1786 1795 }
+69 -53
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 1270 1270 gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]); 1271 1271 } 1272 1272 1273 - static void a6xx_set_ubwc_config(struct msm_gpu *gpu) 1273 + static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) 1274 1274 { 1275 - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 1276 1275 /* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */ 1277 - u32 rgb565_predicator = 0; 1276 + gpu->ubwc_config.rgb565_predicator = 0; 1278 1277 /* Unknown, introduced with A650 family */ 1279 - u32 uavflagprd_inv = 0; 1278 + gpu->ubwc_config.uavflagprd_inv = 0; 1280 1279 /* Whether the minimum access length is 64 bits */ 1281 - u32 min_acc_len = 0; 1280 + gpu->ubwc_config.min_acc_len = 0; 1282 1281 /* Entirely magic, per-GPU-gen value */ 1283 - u32 ubwc_mode = 0; 1282 + gpu->ubwc_config.ubwc_mode = 0; 1284 1283 /* 1285 1284 * The Highest Bank Bit value represents the bit of the highest DDR bank. 1286 - * We then subtract 13 from it (13 is the minimum value allowed by hw) and 1287 - * write the lowest two bits of the remaining value as hbb_lo and the 1288 - * one above it as hbb_hi to the hardware. This should ideally use DRAM 1289 - * type detection. 1285 + * This should ideally use DRAM type detection. 1290 1286 */ 1291 - u32 hbb_hi = 0; 1292 - u32 hbb_lo = 2; 1293 - /* Unknown, introduced with A640/680 */ 1294 - u32 amsbc = 0; 1287 + gpu->ubwc_config.highest_bank_bit = 15; 1295 1288 1296 - if (adreno_is_a610(adreno_gpu)) { 1297 - /* HBB = 14 */ 1298 - hbb_lo = 1; 1299 - min_acc_len = 1; 1300 - ubwc_mode = 1; 1289 + if (adreno_is_a610(gpu)) { 1290 + gpu->ubwc_config.highest_bank_bit = 14; 1291 + gpu->ubwc_config.min_acc_len = 1; 1292 + gpu->ubwc_config.ubwc_mode = 1; 1301 1293 } 1302 1294 1303 1295 /* a618 is using the hw default values */ 1304 - if (adreno_is_a618(adreno_gpu)) 1296 + if (adreno_is_a618(gpu)) 1305 1297 return; 1306 1298 1307 - if (adreno_is_a619_holi(adreno_gpu)) 1308 - hbb_lo = 0; 1299 + if (adreno_is_a619_holi(gpu)) 1300 + gpu->ubwc_config.highest_bank_bit = 13; 1309 1301 1310 - if (adreno_is_a640_family(adreno_gpu)) 1311 - amsbc = 1; 1302 + if (adreno_is_a640_family(gpu)) 1303 + gpu->ubwc_config.amsbc = 1; 1312 1304 1313 - if (adreno_is_a650(adreno_gpu) || 1314 - adreno_is_a660(adreno_gpu) || 1315 - adreno_is_a730(adreno_gpu) || 1316 - adreno_is_a740_family(adreno_gpu)) { 1305 + if (adreno_is_a650(gpu) || 1306 + adreno_is_a660(gpu) || 1307 + adreno_is_a690(gpu) || 1308 + adreno_is_a730(gpu) || 1309 + adreno_is_a740_family(gpu)) { 1317 1310 /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */ 1318 - hbb_lo = 3; 1319 - amsbc = 1; 1320 - rgb565_predicator = 1; 1321 - uavflagprd_inv = 2; 1311 + gpu->ubwc_config.highest_bank_bit = 16; 1312 + gpu->ubwc_config.amsbc = 1; 1313 + gpu->ubwc_config.rgb565_predicator = 1; 1314 + gpu->ubwc_config.uavflagprd_inv = 2; 1322 1315 } 1323 1316 1324 - if (adreno_is_a690(adreno_gpu)) { 1325 - hbb_lo = 2; 1326 - amsbc = 1; 1327 - rgb565_predicator = 1; 1328 - uavflagprd_inv = 2; 1317 + if (adreno_is_7c3(gpu)) { 1318 + gpu->ubwc_config.highest_bank_bit = 14; 1319 + gpu->ubwc_config.amsbc = 1; 1320 + gpu->ubwc_config.rgb565_predicator = 1; 1321 + gpu->ubwc_config.uavflagprd_inv = 2; 1329 1322 } 1323 + } 1330 1324 1331 - if (adreno_is_7c3(adreno_gpu)) { 1332 - hbb_lo = 1; 1333 - amsbc = 1; 1334 - rgb565_predicator = 1; 1335 - uavflagprd_inv = 2; 1336 - } 1325 + static void a6xx_set_ubwc_config(struct msm_gpu *gpu) 1326 + { 1327 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 1328 + /* 1329 + * We subtract 13 from the highest bank bit (13 is the minimum value 1330 + * allowed by hw) and write the lowest two bits of the remaining value 1331 + * as hbb_lo and the one above it as hbb_hi to the hardware. 1332 + */ 1333 + BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13); 1334 + u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13; 1335 + u32 hbb_hi = hbb >> 2; 1336 + u32 hbb_lo = hbb & 3; 1337 1337 1338 1338 gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 1339 - rgb565_predicator << 11 | hbb_hi << 10 | amsbc << 4 | 1340 - min_acc_len << 3 | hbb_lo << 1 | ubwc_mode); 1339 + adreno_gpu->ubwc_config.rgb565_predicator << 11 | 1340 + hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 | 1341 + adreno_gpu->ubwc_config.min_acc_len << 3 | 1342 + hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode); 1341 1343 1342 1344 gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 | 1343 - min_acc_len << 3 | hbb_lo << 1 | ubwc_mode); 1345 + adreno_gpu->ubwc_config.min_acc_len << 3 | 1346 + hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode); 1344 1347 1345 1348 gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 | 1346 - uavflagprd_inv << 4 | min_acc_len << 3 | 1347 - hbb_lo << 1 | ubwc_mode); 1349 + adreno_gpu->ubwc_config.uavflagprd_inv << 4 | 1350 + adreno_gpu->ubwc_config.min_acc_len << 3 | 1351 + hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode); 1348 1352 1349 1353 if (adreno_is_a7xx(adreno_gpu)) 1350 1354 gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL, 1351 1355 FIELD_PREP(GENMASK(8, 5), hbb_lo)); 1352 1356 1353 - gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21); 1357 + gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 1358 + adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21); 1354 1359 } 1355 1360 1356 1361 static int a6xx_cp_init(struct msm_gpu *gpu) ··· 1746 1741 /* Setting the primFifo thresholds default values, 1747 1742 * and vccCacheSkipDis=1 bit (0x200) for A640 and newer 1748 1743 */ 1749 - if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu) || adreno_is_a690(adreno_gpu)) 1744 + if (adreno_is_a690(adreno_gpu)) 1745 + gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00800200); 1746 + else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) 1750 1747 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); 1751 1748 else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu)) 1752 1749 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200); ··· 1782 1775 if (adreno_is_a730(adreno_gpu) || 1783 1776 adreno_is_a740_family(adreno_gpu)) 1784 1777 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff); 1778 + else if (adreno_is_a690(adreno_gpu)) 1779 + gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x4fffff); 1785 1780 else if (adreno_is_a619(adreno_gpu)) 1786 1781 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff); 1787 1782 else if (adreno_is_a610(adreno_gpu)) ··· 1791 1782 else 1792 1783 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff); 1793 1784 1794 - gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1); 1785 + gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, BIT(7) | 0x1); 1795 1786 1796 1787 /* Set weights for bicubic filtering */ 1797 1788 if (adreno_is_a650_family(adreno_gpu)) { ··· 1817 1808 a6xx_set_cp_protect(gpu); 1818 1809 1819 1810 if (adreno_is_a660_family(adreno_gpu)) { 1820 - gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1); 1811 + if (adreno_is_a690(adreno_gpu)) 1812 + gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x00028801); 1813 + else 1814 + gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1); 1821 1815 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0); 1822 1816 } 1823 1817 1818 + if (adreno_is_a690(adreno_gpu)) 1819 + gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90); 1824 1820 /* Set dualQ + disable afull for A660 GPU */ 1825 - if (adreno_is_a660(adreno_gpu)) 1821 + else if (adreno_is_a660(adreno_gpu)) 1826 1822 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); 1827 1823 else if (adreno_is_a7xx(adreno_gpu)) 1828 1824 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, ··· 2921 2907 if (gpu->aspace) 2922 2908 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, 2923 2909 a6xx_fault_handler); 2910 + 2911 + a6xx_calc_ubwc_config(adreno_gpu); 2924 2912 2925 2913 return gpu; 2926 2914 }
+1 -1
drivers/gpu/drm/msm/adreno/adreno_device.c
··· 464 464 { 190, 1 }, 465 465 ), 466 466 }, { 467 - .chip_ids = ADRENO_CHIP_IDS(0x06080000), 467 + .chip_ids = ADRENO_CHIP_IDS(0x06080001), 468 468 .family = ADRENO_6XX_GEN2, 469 469 .revn = 680, 470 470 .fw = {
+3
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 373 373 return -EINVAL; 374 374 *value = ctx->aspace->va_size; 375 375 return 0; 376 + case MSM_PARAM_HIGHEST_BANK_BIT: 377 + *value = adreno_gpu->ubwc_config.highest_bank_bit; 378 + return 0; 376 379 default: 377 380 DBG("%s: invalid param: %u", gpu->name, param); 378 381 return -EINVAL;
+9
drivers/gpu/drm/msm/adreno/adreno_gpu.h
··· 165 165 /* firmware: */ 166 166 const struct firmware *fw[ADRENO_FW_MAX]; 167 167 168 + struct { 169 + u32 rgb565_predicator; 170 + u32 uavflagprd_inv; 171 + u32 min_acc_len; 172 + u32 ubwc_mode; 173 + u32 highest_bank_bit; 174 + u32 amsbc; 175 + } ubwc_config; 176 + 168 177 /* 169 178 * Register offsets are different between some GPUs. 170 179 * GPU specific offsets will be exported by GPU specific
+457
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved. 5 + */ 6 + 7 + #ifndef _DPU_10_0_SM8650_H 8 + #define _DPU_10_0_SM8650_H 9 + 10 + static const struct dpu_caps sm8650_dpu_caps = { 11 + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 12 + .max_mixer_blendstages = 0xb, 13 + .has_src_split = true, 14 + .has_dim_layer = true, 15 + .has_idle_pc = true, 16 + .has_3d_merge = true, 17 + .max_linewidth = 8192, 18 + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, 19 + }; 20 + 21 + static const struct dpu_mdp_cfg sm8650_mdp = { 22 + .name = "top_0", 23 + .base = 0, .len = 0x494, 24 + .features = BIT(DPU_MDP_PERIPH_0_REMOVED), 25 + .clk_ctrls = { 26 + [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 }, 27 + }, 28 + }; 29 + 30 + /* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ 31 + static const struct dpu_ctl_cfg sm8650_ctl[] = { 32 + { 33 + .name = "ctl_0", .id = CTL_0, 34 + .base = 0x15000, .len = 0x1000, 35 + .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY), 36 + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), 37 + }, { 38 + .name = "ctl_1", .id = CTL_1, 39 + .base = 0x16000, .len = 0x1000, 40 + .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY), 41 + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), 42 + }, { 43 + .name = "ctl_2", .id = CTL_2, 44 + .base = 0x17000, .len = 0x1000, 45 + .features = CTL_SM8550_MASK, 46 + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), 47 + }, { 48 + .name = "ctl_3", .id = CTL_3, 49 + .base = 0x18000, .len = 0x1000, 50 + .features = CTL_SM8550_MASK, 51 + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12), 52 + }, { 53 + .name = "ctl_4", .id = CTL_4, 54 + .base = 0x19000, .len = 0x1000, 55 + .features = CTL_SM8550_MASK, 56 + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13), 57 + }, { 58 + .name = "ctl_5", .id = CTL_5, 59 + .base = 0x1a000, .len = 0x1000, 60 + .features = CTL_SM8550_MASK, 61 + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23), 62 + }, 63 + }; 64 + 65 + static const struct dpu_sspp_cfg sm8650_sspp[] = { 66 + { 67 + .name = "sspp_0", .id = SSPP_VIG0, 68 + .base = 0x4000, .len = 0x344, 69 + .features = VIG_SDM845_MASK_SDMA, 70 + .sblk = &dpu_vig_sblk_qseed3_3_3, 71 + .xin_id = 0, 72 + .type = SSPP_TYPE_VIG, 73 + }, { 74 + .name = "sspp_1", .id = SSPP_VIG1, 75 + .base = 0x6000, .len = 0x344, 76 + .features = VIG_SDM845_MASK_SDMA, 77 + .sblk = &dpu_vig_sblk_qseed3_3_3, 78 + .xin_id = 4, 79 + .type = SSPP_TYPE_VIG, 80 + }, { 81 + .name = "sspp_2", .id = SSPP_VIG2, 82 + .base = 0x8000, .len = 0x344, 83 + .features = VIG_SDM845_MASK_SDMA, 84 + .sblk = &dpu_vig_sblk_qseed3_3_3, 85 + .xin_id = 8, 86 + .type = SSPP_TYPE_VIG, 87 + }, { 88 + .name = "sspp_3", .id = SSPP_VIG3, 89 + .base = 0xa000, .len = 0x344, 90 + .features = VIG_SDM845_MASK_SDMA, 91 + .sblk = &dpu_vig_sblk_qseed3_3_3, 92 + .xin_id = 12, 93 + .type = SSPP_TYPE_VIG, 94 + }, { 95 + .name = "sspp_8", .id = SSPP_DMA0, 96 + .base = 0x24000, .len = 0x344, 97 + .features = DMA_SDM845_MASK_SDMA, 98 + .sblk = &dpu_dma_sblk, 99 + .xin_id = 1, 100 + .type = SSPP_TYPE_DMA, 101 + }, { 102 + .name = "sspp_9", .id = SSPP_DMA1, 103 + .base = 0x26000, .len = 0x344, 104 + .features = DMA_SDM845_MASK_SDMA, 105 + .sblk = &dpu_dma_sblk, 106 + .xin_id = 5, 107 + .type = SSPP_TYPE_DMA, 108 + }, { 109 + .name = "sspp_10", .id = SSPP_DMA2, 110 + .base = 0x28000, .len = 0x344, 111 + .features = DMA_SDM845_MASK_SDMA, 112 + .sblk = &dpu_dma_sblk, 113 + .xin_id = 9, 114 + .type = SSPP_TYPE_DMA, 115 + }, { 116 + .name = "sspp_11", .id = SSPP_DMA3, 117 + .base = 0x2a000, .len = 0x344, 118 + .features = DMA_SDM845_MASK_SDMA, 119 + .sblk = &dpu_dma_sblk, 120 + .xin_id = 13, 121 + .type = SSPP_TYPE_DMA, 122 + }, { 123 + .name = "sspp_12", .id = SSPP_DMA4, 124 + .base = 0x2c000, .len = 0x344, 125 + .features = DMA_CURSOR_SDM845_MASK_SDMA, 126 + .sblk = &dpu_dma_sblk, 127 + .xin_id = 14, 128 + .type = SSPP_TYPE_DMA, 129 + }, { 130 + .name = "sspp_13", .id = SSPP_DMA5, 131 + .base = 0x2e000, .len = 0x344, 132 + .features = DMA_CURSOR_SDM845_MASK_SDMA, 133 + .sblk = &dpu_dma_sblk, 134 + .xin_id = 15, 135 + .type = SSPP_TYPE_DMA, 136 + }, 137 + }; 138 + 139 + static const struct dpu_lm_cfg sm8650_lm[] = { 140 + { 141 + .name = "lm_0", .id = LM_0, 142 + .base = 0x44000, .len = 0x400, 143 + .features = MIXER_SDM845_MASK, 144 + .sblk = &sdm845_lm_sblk, 145 + .lm_pair = LM_1, 146 + .pingpong = PINGPONG_0, 147 + .dspp = DSPP_0, 148 + }, { 149 + .name = "lm_1", .id = LM_1, 150 + .base = 0x45000, .len = 0x400, 151 + .features = MIXER_SDM845_MASK, 152 + .sblk = &sdm845_lm_sblk, 153 + .lm_pair = LM_0, 154 + .pingpong = PINGPONG_1, 155 + .dspp = DSPP_1, 156 + }, { 157 + .name = "lm_2", .id = LM_2, 158 + .base = 0x46000, .len = 0x400, 159 + .features = MIXER_SDM845_MASK, 160 + .sblk = &sdm845_lm_sblk, 161 + .lm_pair = LM_3, 162 + .pingpong = PINGPONG_2, 163 + }, { 164 + .name = "lm_3", .id = LM_3, 165 + .base = 0x47000, .len = 0x400, 166 + .features = MIXER_SDM845_MASK, 167 + .sblk = &sdm845_lm_sblk, 168 + .lm_pair = LM_2, 169 + .pingpong = PINGPONG_3, 170 + }, { 171 + .name = "lm_4", .id = LM_4, 172 + .base = 0x48000, .len = 0x400, 173 + .features = MIXER_SDM845_MASK, 174 + .sblk = &sdm845_lm_sblk, 175 + .lm_pair = LM_5, 176 + .pingpong = PINGPONG_4, 177 + }, { 178 + .name = "lm_5", .id = LM_5, 179 + .base = 0x49000, .len = 0x400, 180 + .features = MIXER_SDM845_MASK, 181 + .sblk = &sdm845_lm_sblk, 182 + .lm_pair = LM_4, 183 + .pingpong = PINGPONG_5, 184 + }, 185 + }; 186 + 187 + static const struct dpu_dspp_cfg sm8650_dspp[] = { 188 + { 189 + .name = "dspp_0", .id = DSPP_0, 190 + .base = 0x54000, .len = 0x1800, 191 + .features = DSPP_SC7180_MASK, 192 + .sblk = &sdm845_dspp_sblk, 193 + }, { 194 + .name = "dspp_1", .id = DSPP_1, 195 + .base = 0x56000, .len = 0x1800, 196 + .features = DSPP_SC7180_MASK, 197 + .sblk = &sdm845_dspp_sblk, 198 + }, { 199 + .name = "dspp_2", .id = DSPP_2, 200 + .base = 0x58000, .len = 0x1800, 201 + .features = DSPP_SC7180_MASK, 202 + .sblk = &sdm845_dspp_sblk, 203 + }, { 204 + .name = "dspp_3", .id = DSPP_3, 205 + .base = 0x5a000, .len = 0x1800, 206 + .features = DSPP_SC7180_MASK, 207 + .sblk = &sdm845_dspp_sblk, 208 + }, 209 + }; 210 + 211 + static const struct dpu_pingpong_cfg sm8650_pp[] = { 212 + { 213 + .name = "pingpong_0", .id = PINGPONG_0, 214 + .base = 0x69000, .len = 0, 215 + .features = BIT(DPU_PINGPONG_DITHER), 216 + .sblk = &sc7280_pp_sblk, 217 + .merge_3d = MERGE_3D_0, 218 + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), 219 + }, { 220 + .name = "pingpong_1", .id = PINGPONG_1, 221 + .base = 0x6a000, .len = 0, 222 + .features = BIT(DPU_PINGPONG_DITHER), 223 + .sblk = &sc7280_pp_sblk, 224 + .merge_3d = MERGE_3D_0, 225 + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), 226 + }, { 227 + .name = "pingpong_2", .id = PINGPONG_2, 228 + .base = 0x6b000, .len = 0, 229 + .features = BIT(DPU_PINGPONG_DITHER), 230 + .sblk = &sc7280_pp_sblk, 231 + .merge_3d = MERGE_3D_1, 232 + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), 233 + }, { 234 + .name = "pingpong_3", .id = PINGPONG_3, 235 + .base = 0x6c000, .len = 0, 236 + .features = BIT(DPU_PINGPONG_DITHER), 237 + .sblk = &sc7280_pp_sblk, 238 + .merge_3d = MERGE_3D_1, 239 + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), 240 + }, { 241 + .name = "pingpong_4", .id = PINGPONG_4, 242 + .base = 0x6d000, .len = 0, 243 + .features = BIT(DPU_PINGPONG_DITHER), 244 + .sblk = &sc7280_pp_sblk, 245 + .merge_3d = MERGE_3D_2, 246 + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), 247 + }, { 248 + .name = "pingpong_5", .id = PINGPONG_5, 249 + .base = 0x6e000, .len = 0, 250 + .features = BIT(DPU_PINGPONG_DITHER), 251 + .sblk = &sc7280_pp_sblk, 252 + .merge_3d = MERGE_3D_2, 253 + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), 254 + }, { 255 + .name = "pingpong_6", .id = PINGPONG_6, 256 + .base = 0x66000, .len = 0, 257 + .features = BIT(DPU_PINGPONG_DITHER), 258 + .sblk = &sc7280_pp_sblk, 259 + .merge_3d = MERGE_3D_3, 260 + }, { 261 + .name = "pingpong_7", .id = PINGPONG_7, 262 + .base = 0x66400, .len = 0, 263 + .features = BIT(DPU_PINGPONG_DITHER), 264 + .sblk = &sc7280_pp_sblk, 265 + .merge_3d = MERGE_3D_3, 266 + }, { 267 + .name = "pingpong_8", .id = PINGPONG_8, 268 + .base = 0x7e000, .len = 0, 269 + .features = BIT(DPU_PINGPONG_DITHER), 270 + .sblk = &sc7280_pp_sblk, 271 + .merge_3d = MERGE_3D_4, 272 + }, { 273 + .name = "pingpong_9", .id = PINGPONG_9, 274 + .base = 0x7e400, .len = 0, 275 + .features = BIT(DPU_PINGPONG_DITHER), 276 + .sblk = &sc7280_pp_sblk, 277 + .merge_3d = MERGE_3D_4, 278 + }, 279 + }; 280 + 281 + static const struct dpu_merge_3d_cfg sm8650_merge_3d[] = { 282 + { 283 + .name = "merge_3d_0", .id = MERGE_3D_0, 284 + .base = 0x4e000, .len = 0x8, 285 + }, { 286 + .name = "merge_3d_1", .id = MERGE_3D_1, 287 + .base = 0x4f000, .len = 0x8, 288 + }, { 289 + .name = "merge_3d_2", .id = MERGE_3D_2, 290 + .base = 0x50000, .len = 0x8, 291 + }, { 292 + .name = "merge_3d_3", .id = MERGE_3D_3, 293 + .base = 0x66700, .len = 0x8, 294 + }, { 295 + .name = "merge_3d_4", .id = MERGE_3D_4, 296 + .base = 0x7e700, .len = 0x8, 297 + }, 298 + }; 299 + 300 + /* 301 + * NOTE: Each display compression engine (DCE) contains dual hard 302 + * slice DSC encoders so both share same base address but with 303 + * its own different sub block address. 304 + */ 305 + static const struct dpu_dsc_cfg sm8650_dsc[] = { 306 + { 307 + .name = "dce_0_0", .id = DSC_0, 308 + .base = 0x80000, .len = 0x6, 309 + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), 310 + .sblk = &dsc_sblk_0, 311 + }, { 312 + .name = "dce_0_1", .id = DSC_1, 313 + .base = 0x80000, .len = 0x6, 314 + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), 315 + .sblk = &dsc_sblk_1, 316 + }, { 317 + .name = "dce_1_0", .id = DSC_2, 318 + .base = 0x81000, .len = 0x6, 319 + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), 320 + .sblk = &dsc_sblk_0, 321 + }, { 322 + .name = "dce_1_1", .id = DSC_3, 323 + .base = 0x81000, .len = 0x6, 324 + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), 325 + .sblk = &dsc_sblk_1, 326 + }, { 327 + .name = "dce_2_0", .id = DSC_4, 328 + .base = 0x82000, .len = 0x6, 329 + .features = BIT(DPU_DSC_HW_REV_1_2), 330 + .sblk = &dsc_sblk_0, 331 + }, { 332 + .name = "dce_2_1", .id = DSC_5, 333 + .base = 0x82000, .len = 0x6, 334 + .features = BIT(DPU_DSC_HW_REV_1_2), 335 + .sblk = &dsc_sblk_1, 336 + }, 337 + }; 338 + 339 + static const struct dpu_wb_cfg sm8650_wb[] = { 340 + { 341 + .name = "wb_2", .id = WB_2, 342 + .base = 0x65000, .len = 0x2c8, 343 + .features = WB_SM8250_MASK, 344 + .format_list = wb2_formats_rgb, 345 + .num_formats = ARRAY_SIZE(wb2_formats_rgb), 346 + .xin_id = 6, 347 + .vbif_idx = VBIF_RT, 348 + .maxlinewidth = 4096, 349 + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), 350 + }, 351 + }; 352 + 353 + static const struct dpu_intf_cfg sm8650_intf[] = { 354 + { 355 + .name = "intf_0", .id = INTF_0, 356 + .base = 0x34000, .len = 0x280, 357 + .features = INTF_SC7280_MASK, 358 + .type = INTF_DP, 359 + .controller_id = MSM_DP_CONTROLLER_0, 360 + .prog_fetch_lines_worst_case = 24, 361 + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), 362 + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), 363 + }, { 364 + .name = "intf_1", .id = INTF_1, 365 + .base = 0x35000, .len = 0x300, 366 + .features = INTF_SC7280_MASK, 367 + .type = INTF_DSI, 368 + .controller_id = MSM_DSI_CONTROLLER_0, 369 + .prog_fetch_lines_worst_case = 24, 370 + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), 371 + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), 372 + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2), 373 + }, { 374 + .name = "intf_2", .id = INTF_2, 375 + .base = 0x36000, .len = 0x300, 376 + .features = INTF_SC7280_MASK, 377 + .type = INTF_DSI, 378 + .controller_id = MSM_DSI_CONTROLLER_1, 379 + .prog_fetch_lines_worst_case = 24, 380 + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), 381 + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), 382 + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2), 383 + }, { 384 + .name = "intf_3", .id = INTF_3, 385 + .base = 0x37000, .len = 0x280, 386 + .features = INTF_SC7280_MASK, 387 + .type = INTF_DP, 388 + .controller_id = MSM_DP_CONTROLLER_1, 389 + .prog_fetch_lines_worst_case = 24, 390 + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), 391 + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), 392 + }, 393 + }; 394 + 395 + static const struct dpu_perf_cfg sm8650_perf_data = { 396 + .max_bw_low = 17000000, 397 + .max_bw_high = 27000000, 398 + .min_core_ib = 2500000, 399 + .min_llcc_ib = 0, 400 + .min_dram_ib = 800000, 401 + .min_prefill_lines = 35, 402 + /* FIXME: lut tables */ 403 + .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0}, 404 + .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff}, 405 + .qos_lut_tbl = { 406 + {.nentry = ARRAY_SIZE(sc7180_qos_linear), 407 + .entries = sc7180_qos_linear 408 + }, 409 + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), 410 + .entries = sc7180_qos_macrotile 411 + }, 412 + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), 413 + .entries = sc7180_qos_nrt 414 + }, 415 + /* TODO: macrotile-qseed is different from macrotile */ 416 + }, 417 + .cdp_cfg = { 418 + {.rd_enable = 1, .wr_enable = 1}, 419 + {.rd_enable = 1, .wr_enable = 0} 420 + }, 421 + .clk_inefficiency_factor = 105, 422 + .bw_inefficiency_factor = 120, 423 + }; 424 + 425 + static const struct dpu_mdss_version sm8650_mdss_ver = { 426 + .core_major_ver = 10, 427 + .core_minor_ver = 0, 428 + }; 429 + 430 + const struct dpu_mdss_cfg dpu_sm8650_cfg = { 431 + .mdss_ver = &sm8650_mdss_ver, 432 + .caps = &sm8650_dpu_caps, 433 + .mdp = &sm8650_mdp, 434 + .ctl_count = ARRAY_SIZE(sm8650_ctl), 435 + .ctl = sm8650_ctl, 436 + .sspp_count = ARRAY_SIZE(sm8650_sspp), 437 + .sspp = sm8650_sspp, 438 + .mixer_count = ARRAY_SIZE(sm8650_lm), 439 + .mixer = sm8650_lm, 440 + .dspp_count = ARRAY_SIZE(sm8650_dspp), 441 + .dspp = sm8650_dspp, 442 + .pingpong_count = ARRAY_SIZE(sm8650_pp), 443 + .pingpong = sm8650_pp, 444 + .dsc_count = ARRAY_SIZE(sm8650_dsc), 445 + .dsc = sm8650_dsc, 446 + .merge_3d_count = ARRAY_SIZE(sm8650_merge_3d), 447 + .merge_3d = sm8650_merge_3d, 448 + .wb_count = ARRAY_SIZE(sm8650_wb), 449 + .wb = sm8650_wb, 450 + .intf_count = ARRAY_SIZE(sm8650_intf), 451 + .intf = sm8650_intf, 452 + .vbif_count = ARRAY_SIZE(sm8650_vbif), 453 + .vbif = sm8650_vbif, 454 + .perf = &sm8650_perf_data, 455 + }; 456 + 457 + #endif
+8 -9
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
··· 10 10 static const struct dpu_caps msm8998_dpu_caps = { 11 11 .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 12 12 .max_mixer_blendstages = 0x7, 13 - .qseed_type = DPU_SSPP_SCALER_QSEED3, 14 13 .has_src_split = true, 15 14 .has_dim_layer = true, 16 15 .has_idle_pc = true, ··· 69 70 .name = "sspp_0", .id = SSPP_VIG0, 70 71 .base = 0x4000, .len = 0x1ac, 71 72 .features = VIG_MSM8998_MASK, 72 - .sblk = &msm8998_vig_sblk_0, 73 + .sblk = &dpu_vig_sblk_qseed3_1_2, 73 74 .xin_id = 0, 74 75 .type = SSPP_TYPE_VIG, 75 76 .clk_ctrl = DPU_CLK_CTRL_VIG0, ··· 77 78 .name = "sspp_1", .id = SSPP_VIG1, 78 79 .base = 0x6000, .len = 0x1ac, 79 80 .features = VIG_MSM8998_MASK, 80 - .sblk = &msm8998_vig_sblk_1, 81 + .sblk = &dpu_vig_sblk_qseed3_1_2, 81 82 .xin_id = 4, 82 83 .type = SSPP_TYPE_VIG, 83 84 .clk_ctrl = DPU_CLK_CTRL_VIG1, ··· 85 86 .name = "sspp_2", .id = SSPP_VIG2, 86 87 .base = 0x8000, .len = 0x1ac, 87 88 .features = VIG_MSM8998_MASK, 88 - .sblk = &msm8998_vig_sblk_2, 89 + .sblk = &dpu_vig_sblk_qseed3_1_2, 89 90 .xin_id = 8, 90 91 .type = SSPP_TYPE_VIG, 91 92 .clk_ctrl = DPU_CLK_CTRL_VIG2, ··· 93 94 .name = "sspp_3", .id = SSPP_VIG3, 94 95 .base = 0xa000, .len = 0x1ac, 95 96 .features = VIG_MSM8998_MASK, 96 - .sblk = &msm8998_vig_sblk_3, 97 + .sblk = &dpu_vig_sblk_qseed3_1_2, 97 98 .xin_id = 12, 98 99 .type = SSPP_TYPE_VIG, 99 100 .clk_ctrl = DPU_CLK_CTRL_VIG3, ··· 101 102 .name = "sspp_8", .id = SSPP_DMA0, 102 103 .base = 0x24000, .len = 0x1ac, 103 104 .features = DMA_MSM8998_MASK, 104 - .sblk = &sdm845_dma_sblk_0, 105 + .sblk = &dpu_dma_sblk, 105 106 .xin_id = 1, 106 107 .type = SSPP_TYPE_DMA, 107 108 .clk_ctrl = DPU_CLK_CTRL_DMA0, ··· 109 110 .name = "sspp_9", .id = SSPP_DMA1, 110 111 .base = 0x26000, .len = 0x1ac, 111 112 .features = DMA_MSM8998_MASK, 112 - .sblk = &sdm845_dma_sblk_1, 113 + .sblk = &dpu_dma_sblk, 113 114 .xin_id = 5, 114 115 .type = SSPP_TYPE_DMA, 115 116 .clk_ctrl = DPU_CLK_CTRL_DMA1, ··· 117 118 .name = "sspp_10", .id = SSPP_DMA2, 118 119 .base = 0x28000, .len = 0x1ac, 119 120 .features = DMA_CURSOR_MSM8998_MASK, 120 - .sblk = &sdm845_dma_sblk_2, 121 + .sblk = &dpu_dma_sblk, 121 122 .xin_id = 9, 122 123 .type = SSPP_TYPE_DMA, 123 124 .clk_ctrl = DPU_CLK_CTRL_DMA2, ··· 125 126 .name = "sspp_11", .id = SSPP_DMA3, 126 127 .base = 0x2a000, .len = 0x1ac, 127 128 .features = DMA_CURSOR_MSM8998_MASK, 128 - .sblk = &sdm845_dma_sblk_3, 129 + .sblk = &dpu_dma_sblk, 129 130 .xin_id = 13, 130 131 .type = SSPP_TYPE_DMA, 131 132 .clk_ctrl = DPU_CLK_CTRL_DMA3,
+8 -9
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
··· 10 10 static const struct dpu_caps sdm845_dpu_caps = { 11 11 .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 12 12 .max_mixer_blendstages = 0xb, 13 - .qseed_type = DPU_SSPP_SCALER_QSEED3, 14 13 .has_src_split = true, 15 14 .has_dim_layer = true, 16 15 .has_idle_pc = true, ··· 67 68 .name = "sspp_0", .id = SSPP_VIG0, 68 69 .base = 0x4000, .len = 0x1c8, 69 70 .features = VIG_SDM845_MASK_SDMA, 70 - .sblk = &sdm845_vig_sblk_0, 71 + .sblk = &dpu_vig_sblk_qseed3_1_3, 71 72 .xin_id = 0, 72 73 .type = SSPP_TYPE_VIG, 73 74 .clk_ctrl = DPU_CLK_CTRL_VIG0, ··· 75 76 .name = "sspp_1", .id = SSPP_VIG1, 76 77 .base = 0x6000, .len = 0x1c8, 77 78 .features = VIG_SDM845_MASK_SDMA, 78 - .sblk = &sdm845_vig_sblk_1, 79 + .sblk = &dpu_vig_sblk_qseed3_1_3, 79 80 .xin_id = 4, 80 81 .type = SSPP_TYPE_VIG, 81 82 .clk_ctrl = DPU_CLK_CTRL_VIG1, ··· 83 84 .name = "sspp_2", .id = SSPP_VIG2, 84 85 .base = 0x8000, .len = 0x1c8, 85 86 .features = VIG_SDM845_MASK_SDMA, 86 - .sblk = &sdm845_vig_sblk_2, 87 + .sblk = &dpu_vig_sblk_qseed3_1_3, 87 88 .xin_id = 8, 88 89 .type = SSPP_TYPE_VIG, 89 90 .clk_ctrl = DPU_CLK_CTRL_VIG2, ··· 91 92 .name = "sspp_3", .id = SSPP_VIG3, 92 93 .base = 0xa000, .len = 0x1c8, 93 94 .features = VIG_SDM845_MASK_SDMA, 94 - .sblk = &sdm845_vig_sblk_3, 95 + .sblk = &dpu_vig_sblk_qseed3_1_3, 95 96 .xin_id = 12, 96 97 .type = SSPP_TYPE_VIG, 97 98 .clk_ctrl = DPU_CLK_CTRL_VIG3, ··· 99 100 .name = "sspp_8", .id = SSPP_DMA0, 100 101 .base = 0x24000, .len = 0x1c8, 101 102 .features = DMA_SDM845_MASK_SDMA, 102 - .sblk = &sdm845_dma_sblk_0, 103 + .sblk = &dpu_dma_sblk, 103 104 .xin_id = 1, 104 105 .type = SSPP_TYPE_DMA, 105 106 .clk_ctrl = DPU_CLK_CTRL_DMA0, ··· 107 108 .name = "sspp_9", .id = SSPP_DMA1, 108 109 .base = 0x26000, .len = 0x1c8, 109 110 .features = DMA_SDM845_MASK_SDMA, 110 - .sblk = &sdm845_dma_sblk_1, 111 + .sblk = &dpu_dma_sblk, 111 112 .xin_id = 5, 112 113 .type = SSPP_TYPE_DMA, 113 114 .clk_ctrl = DPU_CLK_CTRL_DMA1, ··· 115 116 .name = "sspp_10", .id = SSPP_DMA2, 116 117 .base = 0x28000, .len = 0x1c8, 117 118 .features = DMA_CURSOR_SDM845_MASK_SDMA, 118 - .sblk = &sdm845_dma_sblk_2, 119 + .sblk = &dpu_dma_sblk, 119 120 .xin_id = 9, 120 121 .type = SSPP_TYPE_DMA, 121 122 .clk_ctrl = DPU_CLK_CTRL_DMA2, ··· 123 124 .name = "sspp_11", .id = SSPP_DMA3, 124 125 .base = 0x2a000, .len = 0x1c8, 125 126 .features = DMA_CURSOR_SDM845_MASK_SDMA, 126 - .sblk = &sdm845_dma_sblk_3, 127 + .sblk = &dpu_dma_sblk, 127 128 .xin_id = 13, 128 129 .type = SSPP_TYPE_DMA, 129 130 .clk_ctrl = DPU_CLK_CTRL_DMA3,
+104
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 4 + * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. 5 + * Copyright (c) 2023, Richard Acayan. All rights reserved. 6 + */ 7 + 8 + #ifndef _DPU_4_1_SDM670_H 9 + #define _DPU_4_1_SDM670_H 10 + 11 + static const struct dpu_mdp_cfg sdm670_mdp = { 12 + .name = "top_0", 13 + .base = 0x0, .len = 0x45c, 14 + .features = BIT(DPU_MDP_AUDIO_SELECT), 15 + .clk_ctrls = { 16 + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, 17 + [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 }, 18 + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, 19 + [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, 20 + [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 }, 21 + }, 22 + }; 23 + 24 + static const struct dpu_sspp_cfg sdm670_sspp[] = { 25 + { 26 + .name = "sspp_0", .id = SSPP_VIG0, 27 + .base = 0x4000, .len = 0x1c8, 28 + .features = VIG_SDM845_MASK_SDMA, 29 + .sblk = &dpu_vig_sblk_qseed3_1_3, 30 + .xin_id = 0, 31 + .type = SSPP_TYPE_VIG, 32 + .clk_ctrl = DPU_CLK_CTRL_VIG0, 33 + }, { 34 + .name = "sspp_1", .id = SSPP_VIG1, 35 + .base = 0x6000, .len = 0x1c8, 36 + .features = VIG_SDM845_MASK_SDMA, 37 + .sblk = &dpu_vig_sblk_qseed3_1_3, 38 + .xin_id = 4, 39 + .type = SSPP_TYPE_VIG, 40 + .clk_ctrl = DPU_CLK_CTRL_VIG0, 41 + }, { 42 + .name = "sspp_8", .id = SSPP_DMA0, 43 + .base = 0x24000, .len = 0x1c8, 44 + .features = DMA_SDM845_MASK_SDMA, 45 + .sblk = &dpu_dma_sblk, 46 + .xin_id = 1, 47 + .type = SSPP_TYPE_DMA, 48 + .clk_ctrl = DPU_CLK_CTRL_DMA0, 49 + }, { 50 + .name = "sspp_9", .id = SSPP_DMA1, 51 + .base = 0x26000, .len = 0x1c8, 52 + .features = DMA_CURSOR_SDM845_MASK_SDMA, 53 + .sblk = &dpu_dma_sblk, 54 + .xin_id = 5, 55 + .type = SSPP_TYPE_DMA, 56 + .clk_ctrl = DPU_CLK_CTRL_DMA1, 57 + }, { 58 + .name = "sspp_10", .id = SSPP_DMA2, 59 + .base = 0x28000, .len = 0x1c8, 60 + .features = DMA_CURSOR_SDM845_MASK_SDMA, 61 + .sblk = &dpu_dma_sblk, 62 + .xin_id = 9, 63 + .type = SSPP_TYPE_DMA, 64 + .clk_ctrl = DPU_CLK_CTRL_DMA2, 65 + }, 66 + }; 67 + 68 + static const struct dpu_dsc_cfg sdm670_dsc[] = { 69 + { 70 + .name = "dsc_0", .id = DSC_0, 71 + .base = 0x80000, .len = 0x140, 72 + }, { 73 + .name = "dsc_1", .id = DSC_1, 74 + .base = 0x80400, .len = 0x140, 75 + }, 76 + }; 77 + 78 + static const struct dpu_mdss_version sdm670_mdss_ver = { 79 + .core_major_ver = 4, 80 + .core_minor_ver = 1, 81 + }; 82 + 83 + const struct dpu_mdss_cfg dpu_sdm670_cfg = { 84 + .mdss_ver = &sdm670_mdss_ver, 85 + .caps = &sdm845_dpu_caps, 86 + .mdp = &sdm670_mdp, 87 + .ctl_count = ARRAY_SIZE(sdm845_ctl), 88 + .ctl = sdm845_ctl, 89 + .sspp_count = ARRAY_SIZE(sdm670_sspp), 90 + .sspp = sdm670_sspp, 91 + .mixer_count = ARRAY_SIZE(sdm845_lm), 92 + .mixer = sdm845_lm, 93 + .pingpong_count = ARRAY_SIZE(sdm845_pp), 94 + .pingpong = sdm845_pp, 95 + .dsc_count = ARRAY_SIZE(sdm670_dsc), 96 + .dsc = sdm670_dsc, 97 + .intf_count = ARRAY_SIZE(sdm845_intf), 98 + .intf = sdm845_intf, 99 + .vbif_count = ARRAY_SIZE(sdm845_vbif), 100 + .vbif = sdm845_vbif, 101 + .perf = &sdm845_perf_data, 102 + }; 103 + 104 + #endif
+8 -9
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
··· 10 10 static const struct dpu_caps sm8150_dpu_caps = { 11 11 .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 12 12 .max_mixer_blendstages = 0xb, 13 - .qseed_type = DPU_SSPP_SCALER_QSEED3, 14 13 .has_src_split = true, 15 14 .has_dim_layer = true, 16 15 .has_idle_pc = true, ··· 76 77 .name = "sspp_0", .id = SSPP_VIG0, 77 78 .base = 0x4000, .len = 0x1f0, 78 79 .features = VIG_SDM845_MASK, 79 - .sblk = &sdm845_vig_sblk_0, 80 + .sblk = &dpu_vig_sblk_qseed3_1_4, 80 81 .xin_id = 0, 81 82 .type = SSPP_TYPE_VIG, 82 83 .clk_ctrl = DPU_CLK_CTRL_VIG0, ··· 84 85 .name = "sspp_1", .id = SSPP_VIG1, 85 86 .base = 0x6000, .len = 0x1f0, 86 87 .features = VIG_SDM845_MASK, 87 - .sblk = &sdm845_vig_sblk_1, 88 + .sblk = &dpu_vig_sblk_qseed3_1_4, 88 89 .xin_id = 4, 89 90 .type = SSPP_TYPE_VIG, 90 91 .clk_ctrl = DPU_CLK_CTRL_VIG1, ··· 92 93 .name = "sspp_2", .id = SSPP_VIG2, 93 94 .base = 0x8000, .len = 0x1f0, 94 95 .features = VIG_SDM845_MASK, 95 - .sblk = &sdm845_vig_sblk_2, 96 + .sblk = &dpu_vig_sblk_qseed3_1_4, 96 97 .xin_id = 8, 97 98 .type = SSPP_TYPE_VIG, 98 99 .clk_ctrl = DPU_CLK_CTRL_VIG2, ··· 100 101 .name = "sspp_3", .id = SSPP_VIG3, 101 102 .base = 0xa000, .len = 0x1f0, 102 103 .features = VIG_SDM845_MASK, 103 - .sblk = &sdm845_vig_sblk_3, 104 + .sblk = &dpu_vig_sblk_qseed3_1_4, 104 105 .xin_id = 12, 105 106 .type = SSPP_TYPE_VIG, 106 107 .clk_ctrl = DPU_CLK_CTRL_VIG3, ··· 108 109 .name = "sspp_8", .id = SSPP_DMA0, 109 110 .base = 0x24000, .len = 0x1f0, 110 111 .features = DMA_SDM845_MASK, 111 - .sblk = &sdm845_dma_sblk_0, 112 + .sblk = &dpu_dma_sblk, 112 113 .xin_id = 1, 113 114 .type = SSPP_TYPE_DMA, 114 115 .clk_ctrl = DPU_CLK_CTRL_DMA0, ··· 116 117 .name = "sspp_9", .id = SSPP_DMA1, 117 118 .base = 0x26000, .len = 0x1f0, 118 119 .features = DMA_SDM845_MASK, 119 - .sblk = &sdm845_dma_sblk_1, 120 + .sblk = &dpu_dma_sblk, 120 121 .xin_id = 5, 121 122 .type = SSPP_TYPE_DMA, 122 123 .clk_ctrl = DPU_CLK_CTRL_DMA1, ··· 124 125 .name = "sspp_10", .id = SSPP_DMA2, 125 126 .base = 0x28000, .len = 0x1f0, 126 127 .features = DMA_CURSOR_SDM845_MASK, 127 - .sblk = &sdm845_dma_sblk_2, 128 + .sblk = &dpu_dma_sblk, 128 129 .xin_id = 9, 129 130 .type = SSPP_TYPE_DMA, 130 131 .clk_ctrl = DPU_CLK_CTRL_DMA2, ··· 132 133 .name = "sspp_11", .id = SSPP_DMA3, 133 134 .base = 0x2a000, .len = 0x1f0, 134 135 .features = DMA_CURSOR_SDM845_MASK, 135 - .sblk = &sdm845_dma_sblk_3, 136 + .sblk = &dpu_dma_sblk, 136 137 .xin_id = 13, 137 138 .type = SSPP_TYPE_DMA, 138 139 .clk_ctrl = DPU_CLK_CTRL_DMA3,
+9 -9
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
··· 10 10 static const struct dpu_caps sc8180x_dpu_caps = { 11 11 .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 12 12 .max_mixer_blendstages = 0xb, 13 - .qseed_type = DPU_SSPP_SCALER_QSEED3, 14 13 .has_src_split = true, 15 14 .has_dim_layer = true, 16 15 .has_idle_pc = true, ··· 75 76 .name = "sspp_0", .id = SSPP_VIG0, 76 77 .base = 0x4000, .len = 0x1f0, 77 78 .features = VIG_SDM845_MASK, 78 - .sblk = &sdm845_vig_sblk_0, 79 + .sblk = &dpu_vig_sblk_qseed3_1_4, 79 80 .xin_id = 0, 80 81 .type = SSPP_TYPE_VIG, 81 82 .clk_ctrl = DPU_CLK_CTRL_VIG0, ··· 83 84 .name = "sspp_1", .id = SSPP_VIG1, 84 85 .base = 0x6000, .len = 0x1f0, 85 86 .features = VIG_SDM845_MASK, 86 - .sblk = &sdm845_vig_sblk_1, 87 + .sblk = &dpu_vig_sblk_qseed3_1_4, 87 88 .xin_id = 4, 88 89 .type = SSPP_TYPE_VIG, 89 90 .clk_ctrl = DPU_CLK_CTRL_VIG1, ··· 91 92 .name = "sspp_2", .id = SSPP_VIG2, 92 93 .base = 0x8000, .len = 0x1f0, 93 94 .features = VIG_SDM845_MASK, 94 - .sblk = &sdm845_vig_sblk_2, 95 + .sblk = &dpu_vig_sblk_qseed3_1_4, 95 96 .xin_id = 8, 96 97 .type = SSPP_TYPE_VIG, 97 98 .clk_ctrl = DPU_CLK_CTRL_VIG2, ··· 99 100 .name = "sspp_3", .id = SSPP_VIG3, 100 101 .base = 0xa000, .len = 0x1f0, 101 102 .features = VIG_SDM845_MASK, 102 - .sblk = &sdm845_vig_sblk_3, 103 + .sblk = &dpu_vig_sblk_qseed3_1_4, 103 104 .xin_id = 12, 104 105 .type = SSPP_TYPE_VIG, 105 106 .clk_ctrl = DPU_CLK_CTRL_VIG3, ··· 107 108 .name = "sspp_8", .id = SSPP_DMA0, 108 109 .base = 0x24000, .len = 0x1f0, 109 110 .features = DMA_SDM845_MASK, 110 - .sblk = &sdm845_dma_sblk_0, 111 + .sblk = &dpu_dma_sblk, 111 112 .xin_id = 1, 112 113 .type = SSPP_TYPE_DMA, 113 114 .clk_ctrl = DPU_CLK_CTRL_DMA0, ··· 115 116 .name = "sspp_9", .id = SSPP_DMA1, 116 117 .base = 0x26000, .len = 0x1f0, 117 118 .features = DMA_SDM845_MASK, 118 - .sblk = &sdm845_dma_sblk_1, 119 + .sblk = &dpu_dma_sblk, 119 120 .xin_id = 5, 120 121 .type = SSPP_TYPE_DMA, 121 122 .clk_ctrl = DPU_CLK_CTRL_DMA1, ··· 123 124 .name = "sspp_10", .id = SSPP_DMA2, 124 125 .base = 0x28000, .len = 0x1f0, 125 126 .features = DMA_CURSOR_SDM845_MASK, 126 - .sblk = &sdm845_dma_sblk_2, 127 + .sblk = &dpu_dma_sblk, 127 128 .xin_id = 9, 128 129 .type = SSPP_TYPE_DMA, 129 130 .clk_ctrl = DPU_CLK_CTRL_DMA2, ··· 131 132 .name = "sspp_11", .id = SSPP_DMA3, 132 133 .base = 0x2a000, .len = 0x1f0, 133 134 .features = DMA_CURSOR_SDM845_MASK, 134 - .sblk = &sdm845_dma_sblk_3, 135 + .sblk = &dpu_dma_sblk, 135 136 .xin_id = 13, 136 137 .type = SSPP_TYPE_DMA, 137 138 .clk_ctrl = DPU_CLK_CTRL_DMA3, ··· 366 367 .min_llcc_ib = 800000, 367 368 .min_dram_ib = 800000, 368 369 .danger_lut_tbl = {0xf, 0xffff, 0x0}, 370 + .safe_lut_tbl = {0xfff0, 0xf000, 0xffff}, 369 371 .qos_lut_tbl = { 370 372 {.nentry = ARRAY_SIZE(sc7180_qos_linear), 371 373 .entries = sc7180_qos_linear
+4 -4
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
··· 68 68 { 69 69 .name = "sspp_0", .id = SSPP_VIG0, 70 70 .base = 0x4000, .len = 0x1f0, 71 - .features = VIG_SM6125_MASK, 72 - .sblk = &sm6125_vig_sblk_0, 71 + .features = VIG_SDM845_MASK, 72 + .sblk = &dpu_vig_sblk_qseed3_2_4, 73 73 .xin_id = 0, 74 74 .type = SSPP_TYPE_VIG, 75 75 .clk_ctrl = DPU_CLK_CTRL_VIG0, ··· 77 77 .name = "sspp_8", .id = SSPP_DMA0, 78 78 .base = 0x24000, .len = 0x1f0, 79 79 .features = DMA_SDM845_MASK, 80 - .sblk = &sdm845_dma_sblk_0, 80 + .sblk = &dpu_dma_sblk, 81 81 .xin_id = 1, 82 82 .type = SSPP_TYPE_DMA, 83 83 .clk_ctrl = DPU_CLK_CTRL_DMA0, ··· 85 85 .name = "sspp_9", .id = SSPP_DMA1, 86 86 .base = 0x26000, .len = 0x1f0, 87 87 .features = DMA_SDM845_MASK, 88 - .sblk = &sdm845_dma_sblk_1, 88 + .sblk = &dpu_dma_sblk, 89 89 .xin_id = 5, 90 90 .type = SSPP_TYPE_DMA, 91 91 .clk_ctrl = DPU_CLK_CTRL_DMA1,
+16 -16
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
··· 10 10 static const struct dpu_caps sm8250_dpu_caps = { 11 11 .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 12 12 .max_mixer_blendstages = 0xb, 13 - .qseed_type = DPU_SSPP_SCALER_QSEED4, 14 13 .has_src_split = true, 15 14 .has_dim_layer = true, 16 15 .has_idle_pc = true, ··· 31 32 [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 }, 32 33 [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 }, 33 34 [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 }, 34 - [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 }, 35 + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, 35 36 }, 36 37 }; 37 38 ··· 74 75 { 75 76 .name = "sspp_0", .id = SSPP_VIG0, 76 77 .base = 0x4000, .len = 0x1f8, 77 - .features = VIG_SC7180_MASK_SDMA, 78 - .sblk = &sm8250_vig_sblk_0, 78 + .features = VIG_SDM845_MASK_SDMA, 79 + .sblk = &dpu_vig_sblk_qseed3_3_0, 79 80 .xin_id = 0, 80 81 .type = SSPP_TYPE_VIG, 81 82 .clk_ctrl = DPU_CLK_CTRL_VIG0, 82 83 }, { 83 84 .name = "sspp_1", .id = SSPP_VIG1, 84 85 .base = 0x6000, .len = 0x1f8, 85 - .features = VIG_SC7180_MASK_SDMA, 86 - .sblk = &sm8250_vig_sblk_1, 86 + .features = VIG_SDM845_MASK_SDMA, 87 + .sblk = &dpu_vig_sblk_qseed3_3_0, 87 88 .xin_id = 4, 88 89 .type = SSPP_TYPE_VIG, 89 90 .clk_ctrl = DPU_CLK_CTRL_VIG1, 90 91 }, { 91 92 .name = "sspp_2", .id = SSPP_VIG2, 92 93 .base = 0x8000, .len = 0x1f8, 93 - .features = VIG_SC7180_MASK_SDMA, 94 - .sblk = &sm8250_vig_sblk_2, 94 + .features = VIG_SDM845_MASK_SDMA, 95 + .sblk = &dpu_vig_sblk_qseed3_3_0, 95 96 .xin_id = 8, 96 97 .type = SSPP_TYPE_VIG, 97 98 .clk_ctrl = DPU_CLK_CTRL_VIG2, 98 99 }, { 99 100 .name = "sspp_3", .id = SSPP_VIG3, 100 101 .base = 0xa000, .len = 0x1f8, 101 - .features = VIG_SC7180_MASK_SDMA, 102 - .sblk = &sm8250_vig_sblk_3, 102 + .features = VIG_SDM845_MASK_SDMA, 103 + .sblk = &dpu_vig_sblk_qseed3_3_0, 103 104 .xin_id = 12, 104 105 .type = SSPP_TYPE_VIG, 105 106 .clk_ctrl = DPU_CLK_CTRL_VIG3, ··· 107 108 .name = "sspp_8", .id = SSPP_DMA0, 108 109 .base = 0x24000, .len = 0x1f8, 109 110 .features = DMA_SDM845_MASK_SDMA, 110 - .sblk = &sdm845_dma_sblk_0, 111 + .sblk = &dpu_dma_sblk, 111 112 .xin_id = 1, 112 113 .type = SSPP_TYPE_DMA, 113 114 .clk_ctrl = DPU_CLK_CTRL_DMA0, ··· 115 116 .name = "sspp_9", .id = SSPP_DMA1, 116 117 .base = 0x26000, .len = 0x1f8, 117 118 .features = DMA_SDM845_MASK_SDMA, 118 - .sblk = &sdm845_dma_sblk_1, 119 + .sblk = &dpu_dma_sblk, 119 120 .xin_id = 5, 120 121 .type = SSPP_TYPE_DMA, 121 122 .clk_ctrl = DPU_CLK_CTRL_DMA1, ··· 123 124 .name = "sspp_10", .id = SSPP_DMA2, 124 125 .base = 0x28000, .len = 0x1f8, 125 126 .features = DMA_CURSOR_SDM845_MASK_SDMA, 126 - .sblk = &sdm845_dma_sblk_2, 127 + .sblk = &dpu_dma_sblk, 127 128 .xin_id = 9, 128 129 .type = SSPP_TYPE_DMA, 129 130 .clk_ctrl = DPU_CLK_CTRL_DMA2, ··· 131 132 .name = "sspp_11", .id = SSPP_DMA3, 132 133 .base = 0x2a000, .len = 0x1f8, 133 134 .features = DMA_CURSOR_SDM845_MASK_SDMA, 134 - .sblk = &sdm845_dma_sblk_3, 135 + .sblk = &dpu_dma_sblk, 135 136 .xin_id = 13, 136 137 .type = SSPP_TYPE_DMA, 137 138 .clk_ctrl = DPU_CLK_CTRL_DMA3, ··· 336 337 .name = "wb_2", .id = WB_2, 337 338 .base = 0x65000, .len = 0x2c8, 338 339 .features = WB_SM8250_MASK, 339 - .format_list = wb2_formats, 340 - .num_formats = ARRAY_SIZE(wb2_formats), 340 + .format_list = wb2_formats_rgb_yuv, 341 + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), 341 342 .clk_ctrl = DPU_CLK_CTRL_WB2, 342 343 .xin_id = 6, 343 344 .vbif_idx = VBIF_RT, ··· 384 385 .mdss_ver = &sm8250_mdss_ver, 385 386 .caps = &sm8250_dpu_caps, 386 387 .mdp = &sm8250_mdp, 388 + .cdm = &sc7280_cdm, 387 389 .ctl_count = ARRAY_SIZE(sm8250_ctl), 388 390 .ctl = sm8250_ctl, 389 391 .sspp_count = ARRAY_SIZE(sm8250_sspp),
+8 -9
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
··· 10 10 static const struct dpu_caps sc7180_dpu_caps = { 11 11 .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 12 12 .max_mixer_blendstages = 0x9, 13 - .qseed_type = DPU_SSPP_SCALER_QSEED4, 14 13 .has_dim_layer = true, 15 14 .has_idle_pc = true, 16 15 .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, ··· 24 25 [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, 25 26 [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, 26 27 [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 }, 27 - [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 }, 28 + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, 28 29 }, 29 30 }; 30 31 ··· 51 52 { 52 53 .name = "sspp_0", .id = SSPP_VIG0, 53 54 .base = 0x4000, .len = 0x1f8, 54 - .features = VIG_SC7180_MASK, 55 - .sblk = &sc7180_vig_sblk_0, 55 + .features = VIG_SDM845_MASK, 56 + .sblk = &dpu_vig_sblk_qseed3_3_0, 56 57 .xin_id = 0, 57 58 .type = SSPP_TYPE_VIG, 58 59 .clk_ctrl = DPU_CLK_CTRL_VIG0, ··· 60 61 .name = "sspp_8", .id = SSPP_DMA0, 61 62 .base = 0x24000, .len = 0x1f8, 62 63 .features = DMA_SDM845_MASK, 63 - .sblk = &sdm845_dma_sblk_0, 64 + .sblk = &dpu_dma_sblk, 64 65 .xin_id = 1, 65 66 .type = SSPP_TYPE_DMA, 66 67 .clk_ctrl = DPU_CLK_CTRL_DMA0, ··· 68 69 .name = "sspp_9", .id = SSPP_DMA1, 69 70 .base = 0x26000, .len = 0x1f8, 70 71 .features = DMA_CURSOR_SDM845_MASK, 71 - .sblk = &sdm845_dma_sblk_1, 72 + .sblk = &dpu_dma_sblk, 72 73 .xin_id = 5, 73 74 .type = SSPP_TYPE_DMA, 74 75 .clk_ctrl = DPU_CLK_CTRL_DMA1, ··· 76 77 .name = "sspp_10", .id = SSPP_DMA2, 77 78 .base = 0x28000, .len = 0x1f8, 78 79 .features = DMA_CURSOR_SDM845_MASK, 79 - .sblk = &sdm845_dma_sblk_2, 80 + .sblk = &dpu_dma_sblk, 80 81 .xin_id = 9, 81 82 .type = SSPP_TYPE_DMA, 82 83 .clk_ctrl = DPU_CLK_CTRL_DMA2, ··· 157 158 .name = "wb_2", .id = WB_2, 158 159 .base = 0x65000, .len = 0x2c8, 159 160 .features = WB_SM8250_MASK, 160 - .format_list = wb2_formats, 161 - .num_formats = ARRAY_SIZE(wb2_formats), 161 + .format_list = wb2_formats_rgb, 162 + .num_formats = ARRAY_SIZE(wb2_formats_rgb), 162 163 .clk_ctrl = DPU_CLK_CTRL_WB2, 163 164 .xin_id = 6, 164 165 .vbif_idx = VBIF_RT,
+3 -4
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
··· 10 10 static const struct dpu_caps sm6115_dpu_caps = { 11 11 .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, 12 12 .max_mixer_blendstages = 0x4, 13 - .qseed_type = DPU_SSPP_SCALER_QSEED4, 14 13 .has_dim_layer = true, 15 14 .has_idle_pc = true, 16 15 .max_linewidth = 2160, ··· 38 39 { 39 40 .name = "sspp_0", .id = SSPP_VIG0, 40 41 .base = 0x4000, .len = 0x1f8, 41 - .features = VIG_SC7180_MASK, 42 - .sblk = &sm6115_vig_sblk_0, 42 + .features = VIG_SDM845_MASK, 43 + .sblk = &dpu_vig_sblk_qseed3_3_0, 43 44 .xin_id = 0, 44 45 .type = SSPP_TYPE_VIG, 45 46 .clk_ctrl = DPU_CLK_CTRL_VIG0, ··· 47 48 .name = "sspp_8", .id = SSPP_DMA0, 48 49 .base = 0x24000, .len = 0x1f8, 49 50 .features = DMA_SDM845_MASK, 50 - .sblk = &sdm845_dma_sblk_0, 51 + .sblk = &dpu_dma_sblk, 51 52 .xin_id = 1, 52 53 .type = SSPP_TYPE_DMA, 53 54 .clk_ctrl = DPU_CLK_CTRL_DMA0,
+5 -6
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
··· 11 11 static const struct dpu_caps sm6350_dpu_caps = { 12 12 .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 13 13 .max_mixer_blendstages = 0x7, 14 - .qseed_type = DPU_SSPP_SCALER_QSEED4, 15 14 .has_src_split = true, 16 15 .has_dim_layer = true, 17 16 .has_idle_pc = true, ··· 58 59 { 59 60 .name = "sspp_0", .id = SSPP_VIG0, 60 61 .base = 0x4000, .len = 0x1f8, 61 - .features = VIG_SC7180_MASK, 62 - .sblk = &sc7180_vig_sblk_0, 62 + .features = VIG_SDM845_MASK, 63 + .sblk = &dpu_vig_sblk_qseed3_3_0, 63 64 .xin_id = 0, 64 65 .type = SSPP_TYPE_VIG, 65 66 .clk_ctrl = DPU_CLK_CTRL_VIG0, ··· 67 68 .name = "sspp_8", .id = SSPP_DMA0, 68 69 .base = 0x24000, .len = 0x1f8, 69 70 .features = DMA_SDM845_MASK, 70 - .sblk = &sdm845_dma_sblk_0, 71 + .sblk = &dpu_dma_sblk, 71 72 .xin_id = 1, 72 73 .type = SSPP_TYPE_DMA, 73 74 .clk_ctrl = DPU_CLK_CTRL_DMA0, ··· 75 76 .name = "sspp_9", .id = SSPP_DMA1, 76 77 .base = 0x26000, .len = 0x1f8, 77 78 .features = DMA_CURSOR_SDM845_MASK, 78 - .sblk = &sdm845_dma_sblk_1, 79 + .sblk = &dpu_dma_sblk, 79 80 .xin_id = 5, 80 81 .type = SSPP_TYPE_DMA, 81 82 .clk_ctrl = DPU_CLK_CTRL_DMA1, ··· 83 84 .name = "sspp_10", .id = SSPP_DMA2, 84 85 .base = 0x28000, .len = 0x1f8, 85 86 .features = DMA_CURSOR_SDM845_MASK, 86 - .sblk = &sdm845_dma_sblk_2, 87 + .sblk = &dpu_dma_sblk, 87 88 .xin_id = 9, 88 89 .type = SSPP_TYPE_DMA, 89 90 .clk_ctrl = DPU_CLK_CTRL_DMA2,
+2 -2
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
··· 39 39 .name = "sspp_0", .id = SSPP_VIG0, 40 40 .base = 0x4000, .len = 0x1f8, 41 41 .features = VIG_QCM2290_MASK, 42 - .sblk = &qcm2290_vig_sblk_0, 42 + .sblk = &dpu_vig_sblk_noscale, 43 43 .xin_id = 0, 44 44 .type = SSPP_TYPE_VIG, 45 45 .clk_ctrl = DPU_CLK_CTRL_VIG0, ··· 47 47 .name = "sspp_8", .id = SSPP_DMA0, 48 48 .base = 0x24000, .len = 0x1f8, 49 49 .features = DMA_SDM845_MASK, 50 - .sblk = &qcm2290_dma_sblk_0, 50 + .sblk = &dpu_dma_sblk, 51 51 .xin_id = 1, 52 52 .type = SSPP_TYPE_DMA, 53 53 .clk_ctrl = DPU_CLK_CTRL_DMA0,
+3 -4
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
··· 11 11 static const struct dpu_caps sm6375_dpu_caps = { 12 12 .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, 13 13 .max_mixer_blendstages = 0x4, 14 - .qseed_type = DPU_SSPP_SCALER_QSEED4, 15 14 .has_dim_layer = true, 16 15 .has_idle_pc = true, 17 16 .max_linewidth = 2160, ··· 39 40 { 40 41 .name = "sspp_0", .id = SSPP_VIG0, 41 42 .base = 0x4000, .len = 0x1f8, 42 - .features = VIG_SC7180_MASK, 43 - .sblk = &sm6115_vig_sblk_0, 43 + .features = VIG_SDM845_MASK, 44 + .sblk = &dpu_vig_sblk_qseed3_3_0, 44 45 .xin_id = 0, 45 46 .type = SSPP_TYPE_VIG, 46 47 .clk_ctrl = DPU_CLK_CTRL_VIG0, ··· 48 49 .name = "sspp_8", .id = SSPP_DMA0, 49 50 .base = 0x24000, .len = 0x1f8, 50 51 .features = DMA_SDM845_MASK, 51 - .sblk = &sdm845_dma_sblk_0, 52 + .sblk = &dpu_dma_sblk, 52 53 .xin_id = 1, 53 54 .type = SSPP_TYPE_DMA, 54 55 .clk_ctrl = DPU_CLK_CTRL_DMA0,
+34 -17
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
··· 10 10 static const struct dpu_caps sm8350_dpu_caps = { 11 11 .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 12 12 .max_mixer_blendstages = 0xb, 13 - .qseed_type = DPU_SSPP_SCALER_QSEED4, 14 13 .has_src_split = true, 15 14 .has_dim_layer = true, 16 15 .has_idle_pc = true, ··· 30 31 [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, 31 32 [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 }, 32 33 [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 }, 34 + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, 33 35 [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 }, 34 36 }, 35 37 }; ··· 74 74 { 75 75 .name = "sspp_0", .id = SSPP_VIG0, 76 76 .base = 0x4000, .len = 0x1f8, 77 - .features = VIG_SC7180_MASK, 78 - .sblk = &sm8250_vig_sblk_0, 77 + .features = VIG_SDM845_MASK_SDMA, 78 + .sblk = &dpu_vig_sblk_qseed3_3_0, 79 79 .xin_id = 0, 80 80 .type = SSPP_TYPE_VIG, 81 81 .clk_ctrl = DPU_CLK_CTRL_VIG0, 82 82 }, { 83 83 .name = "sspp_1", .id = SSPP_VIG1, 84 84 .base = 0x6000, .len = 0x1f8, 85 - .features = VIG_SC7180_MASK, 86 - .sblk = &sm8250_vig_sblk_1, 85 + .features = VIG_SDM845_MASK_SDMA, 86 + .sblk = &dpu_vig_sblk_qseed3_3_0, 87 87 .xin_id = 4, 88 88 .type = SSPP_TYPE_VIG, 89 89 .clk_ctrl = DPU_CLK_CTRL_VIG1, 90 90 }, { 91 91 .name = "sspp_2", .id = SSPP_VIG2, 92 92 .base = 0x8000, .len = 0x1f8, 93 - .features = VIG_SC7180_MASK, 94 - .sblk = &sm8250_vig_sblk_2, 93 + .features = VIG_SDM845_MASK_SDMA, 94 + .sblk = &dpu_vig_sblk_qseed3_3_0, 95 95 .xin_id = 8, 96 96 .type = SSPP_TYPE_VIG, 97 97 .clk_ctrl = DPU_CLK_CTRL_VIG2, 98 98 }, { 99 99 .name = "sspp_3", .id = SSPP_VIG3, 100 100 .base = 0xa000, .len = 0x1f8, 101 - .features = VIG_SC7180_MASK, 102 - .sblk = &sm8250_vig_sblk_3, 101 + .features = VIG_SDM845_MASK_SDMA, 102 + .sblk = &dpu_vig_sblk_qseed3_3_0, 103 103 .xin_id = 12, 104 104 .type = SSPP_TYPE_VIG, 105 105 .clk_ctrl = DPU_CLK_CTRL_VIG3, 106 106 }, { 107 107 .name = "sspp_8", .id = SSPP_DMA0, 108 108 .base = 0x24000, .len = 0x1f8, 109 - .features = DMA_SDM845_MASK, 110 - .sblk = &sdm845_dma_sblk_0, 109 + .features = DMA_SDM845_MASK_SDMA, 110 + .sblk = &dpu_dma_sblk, 111 111 .xin_id = 1, 112 112 .type = SSPP_TYPE_DMA, 113 113 .clk_ctrl = DPU_CLK_CTRL_DMA0, 114 114 }, { 115 115 .name = "sspp_9", .id = SSPP_DMA1, 116 116 .base = 0x26000, .len = 0x1f8, 117 - .features = DMA_SDM845_MASK, 118 - .sblk = &sdm845_dma_sblk_1, 117 + .features = DMA_SDM845_MASK_SDMA, 118 + .sblk = &dpu_dma_sblk, 119 119 .xin_id = 5, 120 120 .type = SSPP_TYPE_DMA, 121 121 .clk_ctrl = DPU_CLK_CTRL_DMA1, 122 122 }, { 123 123 .name = "sspp_10", .id = SSPP_DMA2, 124 124 .base = 0x28000, .len = 0x1f8, 125 - .features = DMA_CURSOR_SDM845_MASK, 126 - .sblk = &sdm845_dma_sblk_2, 125 + .features = DMA_CURSOR_SDM845_MASK_SDMA, 126 + .sblk = &dpu_dma_sblk, 127 127 .xin_id = 9, 128 128 .type = SSPP_TYPE_DMA, 129 129 .clk_ctrl = DPU_CLK_CTRL_DMA2, 130 130 }, { 131 131 .name = "sspp_11", .id = SSPP_DMA3, 132 132 .base = 0x2a000, .len = 0x1f8, 133 - .features = DMA_CURSOR_SDM845_MASK, 134 - .sblk = &sdm845_dma_sblk_3, 133 + .features = DMA_CURSOR_SDM845_MASK_SDMA, 134 + .sblk = &dpu_dma_sblk, 135 135 .xin_id = 13, 136 136 .type = SSPP_TYPE_DMA, 137 137 .clk_ctrl = DPU_CLK_CTRL_DMA3, ··· 298 298 }, 299 299 }; 300 300 301 + static const struct dpu_wb_cfg sm8350_wb[] = { 302 + { 303 + .name = "wb_2", .id = WB_2, 304 + .base = 0x65000, .len = 0x2c8, 305 + .features = WB_SM8250_MASK, 306 + .format_list = wb2_formats_rgb, 307 + .num_formats = ARRAY_SIZE(wb2_formats_rgb), 308 + .clk_ctrl = DPU_CLK_CTRL_WB2, 309 + .xin_id = 6, 310 + .vbif_idx = VBIF_RT, 311 + .maxlinewidth = 4096, 312 + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), 313 + }, 314 + }; 315 + 301 316 static const struct dpu_intf_cfg sm8350_intf[] = { 302 317 { 303 318 .name = "intf_0", .id = INTF_0, ··· 408 393 .dsc = sm8350_dsc, 409 394 .merge_3d_count = ARRAY_SIZE(sm8350_merge_3d), 410 395 .merge_3d = sm8350_merge_3d, 396 + .wb_count = ARRAY_SIZE(sm8350_wb), 397 + .wb = sm8350_wb, 411 398 .intf_count = ARRAY_SIZE(sm8350_intf), 412 399 .intf = sm8350_intf, 413 400 .vbif_count = ARRAY_SIZE(sdm845_vbif),
+8 -8
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
··· 10 10 static const struct dpu_caps sc7280_dpu_caps = { 11 11 .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 12 12 .max_mixer_blendstages = 0x7, 13 - .qseed_type = DPU_SSPP_SCALER_QSEED4, 14 13 .has_dim_layer = true, 15 14 .has_idle_pc = true, 16 15 .max_linewidth = 2400, ··· 24 25 [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, 25 26 [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, 26 27 [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 }, 27 - [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 }, 28 + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, 28 29 }, 29 30 }; 30 31 ··· 57 58 .name = "sspp_0", .id = SSPP_VIG0, 58 59 .base = 0x4000, .len = 0x1f8, 59 60 .features = VIG_SC7280_MASK_SDMA, 60 - .sblk = &sc7280_vig_sblk_0, 61 + .sblk = &dpu_vig_sblk_qseed3_3_0_rot_v2, 61 62 .xin_id = 0, 62 63 .type = SSPP_TYPE_VIG, 63 64 .clk_ctrl = DPU_CLK_CTRL_VIG0, ··· 65 66 .name = "sspp_8", .id = SSPP_DMA0, 66 67 .base = 0x24000, .len = 0x1f8, 67 68 .features = DMA_SDM845_MASK_SDMA, 68 - .sblk = &sdm845_dma_sblk_0, 69 + .sblk = &dpu_dma_sblk, 69 70 .xin_id = 1, 70 71 .type = SSPP_TYPE_DMA, 71 72 .clk_ctrl = DPU_CLK_CTRL_DMA0, ··· 73 74 .name = "sspp_9", .id = SSPP_DMA1, 74 75 .base = 0x26000, .len = 0x1f8, 75 76 .features = DMA_CURSOR_SDM845_MASK_SDMA, 76 - .sblk = &sdm845_dma_sblk_1, 77 + .sblk = &dpu_dma_sblk, 77 78 .xin_id = 5, 78 79 .type = SSPP_TYPE_DMA, 79 80 .clk_ctrl = DPU_CLK_CTRL_DMA1, ··· 81 82 .name = "sspp_10", .id = SSPP_DMA2, 82 83 .base = 0x28000, .len = 0x1f8, 83 84 .features = DMA_CURSOR_SDM845_MASK_SDMA, 84 - .sblk = &sdm845_dma_sblk_2, 85 + .sblk = &dpu_dma_sblk, 85 86 .xin_id = 9, 86 87 .type = SSPP_TYPE_DMA, 87 88 .clk_ctrl = DPU_CLK_CTRL_DMA2, ··· 169 170 .name = "wb_2", .id = WB_2, 170 171 .base = 0x65000, .len = 0x2c8, 171 172 .features = WB_SM8250_MASK, 172 - .format_list = wb2_formats, 173 - .num_formats = ARRAY_SIZE(wb2_formats), 173 + .format_list = wb2_formats_rgb_yuv, 174 + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), 174 175 .clk_ctrl = DPU_CLK_CTRL_WB2, 175 176 .xin_id = 6, 176 177 .vbif_idx = VBIF_RT, ··· 248 249 .mdss_ver = &sc7280_mdss_ver, 249 250 .caps = &sc7280_dpu_caps, 250 251 .mdp = &sc7280_mdp, 252 + .cdm = &sc7280_cdm, 251 253 .ctl_count = ARRAY_SIZE(sc7280_ctl), 252 254 .ctl = sc7280_ctl, 253 255 .sspp_count = ARRAY_SIZE(sc7280_sspp),
+12 -13
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
··· 10 10 static const struct dpu_caps sc8280xp_dpu_caps = { 11 11 .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 12 12 .max_mixer_blendstages = 11, 13 - .qseed_type = DPU_SSPP_SCALER_QSEED4, 14 13 .has_src_split = true, 15 14 .has_dim_layer = true, 16 15 .has_idle_pc = true, ··· 74 75 { 75 76 .name = "sspp_0", .id = SSPP_VIG0, 76 77 .base = 0x4000, .len = 0x2ac, 77 - .features = VIG_SC7180_MASK, 78 - .sblk = &sm8250_vig_sblk_0, 78 + .features = VIG_SDM845_MASK, 79 + .sblk = &dpu_vig_sblk_qseed3_3_0, 79 80 .xin_id = 0, 80 81 .type = SSPP_TYPE_VIG, 81 82 .clk_ctrl = DPU_CLK_CTRL_VIG0, 82 83 }, { 83 84 .name = "sspp_1", .id = SSPP_VIG1, 84 85 .base = 0x6000, .len = 0x2ac, 85 - .features = VIG_SC7180_MASK, 86 - .sblk = &sm8250_vig_sblk_1, 86 + .features = VIG_SDM845_MASK, 87 + .sblk = &dpu_vig_sblk_qseed3_3_0, 87 88 .xin_id = 4, 88 89 .type = SSPP_TYPE_VIG, 89 90 .clk_ctrl = DPU_CLK_CTRL_VIG1, 90 91 }, { 91 92 .name = "sspp_2", .id = SSPP_VIG2, 92 93 .base = 0x8000, .len = 0x2ac, 93 - .features = VIG_SC7180_MASK, 94 - .sblk = &sm8250_vig_sblk_2, 94 + .features = VIG_SDM845_MASK, 95 + .sblk = &dpu_vig_sblk_qseed3_3_0, 95 96 .xin_id = 8, 96 97 .type = SSPP_TYPE_VIG, 97 98 .clk_ctrl = DPU_CLK_CTRL_VIG2, 98 99 }, { 99 100 .name = "sspp_3", .id = SSPP_VIG3, 100 101 .base = 0xa000, .len = 0x2ac, 101 - .features = VIG_SC7180_MASK, 102 - .sblk = &sm8250_vig_sblk_3, 102 + .features = VIG_SDM845_MASK, 103 + .sblk = &dpu_vig_sblk_qseed3_3_0, 103 104 .xin_id = 12, 104 105 .type = SSPP_TYPE_VIG, 105 106 .clk_ctrl = DPU_CLK_CTRL_VIG3, ··· 107 108 .name = "sspp_8", .id = SSPP_DMA0, 108 109 .base = 0x24000, .len = 0x2ac, 109 110 .features = DMA_SDM845_MASK, 110 - .sblk = &sdm845_dma_sblk_0, 111 + .sblk = &dpu_dma_sblk, 111 112 .xin_id = 1, 112 113 .type = SSPP_TYPE_DMA, 113 114 .clk_ctrl = DPU_CLK_CTRL_DMA0, ··· 115 116 .name = "sspp_9", .id = SSPP_DMA1, 116 117 .base = 0x26000, .len = 0x2ac, 117 118 .features = DMA_SDM845_MASK, 118 - .sblk = &sdm845_dma_sblk_1, 119 + .sblk = &dpu_dma_sblk, 119 120 .xin_id = 5, 120 121 .type = SSPP_TYPE_DMA, 121 122 .clk_ctrl = DPU_CLK_CTRL_DMA1, ··· 123 124 .name = "sspp_10", .id = SSPP_DMA2, 124 125 .base = 0x28000, .len = 0x2ac, 125 126 .features = DMA_CURSOR_SDM845_MASK, 126 - .sblk = &sdm845_dma_sblk_2, 127 + .sblk = &dpu_dma_sblk, 127 128 .xin_id = 9, 128 129 .type = SSPP_TYPE_DMA, 129 130 .clk_ctrl = DPU_CLK_CTRL_DMA2, ··· 131 132 .name = "sspp_11", .id = SSPP_DMA3, 132 133 .base = 0x2a000, .len = 0x2ac, 133 134 .features = DMA_CURSOR_SDM845_MASK, 134 - .sblk = &sdm845_dma_sblk_3, 135 + .sblk = &dpu_dma_sblk, 135 136 .xin_id = 13, 136 137 .type = SSPP_TYPE_DMA, 137 138 .clk_ctrl = DPU_CLK_CTRL_DMA3,
+34 -17
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
··· 10 10 static const struct dpu_caps sm8450_dpu_caps = { 11 11 .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 12 12 .max_mixer_blendstages = 0xb, 13 - .qseed_type = DPU_SSPP_SCALER_QSEED4, 14 13 .has_src_split = true, 15 14 .has_dim_layer = true, 16 15 .has_idle_pc = true, ··· 31 32 [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, 32 33 [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 }, 33 34 [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 }, 35 + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, 34 36 [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 }, 35 37 }, 36 38 }; ··· 75 75 { 76 76 .name = "sspp_0", .id = SSPP_VIG0, 77 77 .base = 0x4000, .len = 0x32c, 78 - .features = VIG_SC7180_MASK, 79 - .sblk = &sm8250_vig_sblk_0, 78 + .features = VIG_SDM845_MASK_SDMA, 79 + .sblk = &dpu_vig_sblk_qseed3_3_1, 80 80 .xin_id = 0, 81 81 .type = SSPP_TYPE_VIG, 82 82 .clk_ctrl = DPU_CLK_CTRL_VIG0, 83 83 }, { 84 84 .name = "sspp_1", .id = SSPP_VIG1, 85 85 .base = 0x6000, .len = 0x32c, 86 - .features = VIG_SC7180_MASK, 87 - .sblk = &sm8250_vig_sblk_1, 86 + .features = VIG_SDM845_MASK_SDMA, 87 + .sblk = &dpu_vig_sblk_qseed3_3_1, 88 88 .xin_id = 4, 89 89 .type = SSPP_TYPE_VIG, 90 90 .clk_ctrl = DPU_CLK_CTRL_VIG1, 91 91 }, { 92 92 .name = "sspp_2", .id = SSPP_VIG2, 93 93 .base = 0x8000, .len = 0x32c, 94 - .features = VIG_SC7180_MASK, 95 - .sblk = &sm8250_vig_sblk_2, 94 + .features = VIG_SDM845_MASK_SDMA, 95 + .sblk = &dpu_vig_sblk_qseed3_3_1, 96 96 .xin_id = 8, 97 97 .type = SSPP_TYPE_VIG, 98 98 .clk_ctrl = DPU_CLK_CTRL_VIG2, 99 99 }, { 100 100 .name = "sspp_3", .id = SSPP_VIG3, 101 101 .base = 0xa000, .len = 0x32c, 102 - .features = VIG_SC7180_MASK, 103 - .sblk = &sm8250_vig_sblk_3, 102 + .features = VIG_SDM845_MASK_SDMA, 103 + .sblk = &dpu_vig_sblk_qseed3_3_1, 104 104 .xin_id = 12, 105 105 .type = SSPP_TYPE_VIG, 106 106 .clk_ctrl = DPU_CLK_CTRL_VIG3, 107 107 }, { 108 108 .name = "sspp_8", .id = SSPP_DMA0, 109 109 .base = 0x24000, .len = 0x32c, 110 - .features = DMA_SDM845_MASK, 111 - .sblk = &sdm845_dma_sblk_0, 110 + .features = DMA_SDM845_MASK_SDMA, 111 + .sblk = &dpu_dma_sblk, 112 112 .xin_id = 1, 113 113 .type = SSPP_TYPE_DMA, 114 114 .clk_ctrl = DPU_CLK_CTRL_DMA0, 115 115 }, { 116 116 .name = "sspp_9", .id = SSPP_DMA1, 117 117 .base = 0x26000, .len = 0x32c, 118 - .features = DMA_SDM845_MASK, 119 - .sblk = &sdm845_dma_sblk_1, 118 + .features = DMA_SDM845_MASK_SDMA, 119 + .sblk = &dpu_dma_sblk, 120 120 .xin_id = 5, 121 121 .type = SSPP_TYPE_DMA, 122 122 .clk_ctrl = DPU_CLK_CTRL_DMA1, 123 123 }, { 124 124 .name = "sspp_10", .id = SSPP_DMA2, 125 125 .base = 0x28000, .len = 0x32c, 126 - .features = DMA_CURSOR_SDM845_MASK, 127 - .sblk = &sdm845_dma_sblk_2, 126 + .features = DMA_CURSOR_SDM845_MASK_SDMA, 127 + .sblk = &dpu_dma_sblk, 128 128 .xin_id = 9, 129 129 .type = SSPP_TYPE_DMA, 130 130 .clk_ctrl = DPU_CLK_CTRL_DMA2, 131 131 }, { 132 132 .name = "sspp_11", .id = SSPP_DMA3, 133 133 .base = 0x2a000, .len = 0x32c, 134 - .features = DMA_CURSOR_SDM845_MASK, 135 - .sblk = &sdm845_dma_sblk_3, 134 + .features = DMA_CURSOR_SDM845_MASK_SDMA, 135 + .sblk = &dpu_dma_sblk, 136 136 .xin_id = 13, 137 137 .type = SSPP_TYPE_DMA, 138 138 .clk_ctrl = DPU_CLK_CTRL_DMA3, ··· 316 316 }, 317 317 }; 318 318 319 + static const struct dpu_wb_cfg sm8450_wb[] = { 320 + { 321 + .name = "wb_2", .id = WB_2, 322 + .base = 0x65000, .len = 0x2c8, 323 + .features = WB_SM8250_MASK, 324 + .format_list = wb2_formats_rgb, 325 + .num_formats = ARRAY_SIZE(wb2_formats_rgb), 326 + .clk_ctrl = DPU_CLK_CTRL_WB2, 327 + .xin_id = 6, 328 + .vbif_idx = VBIF_RT, 329 + .maxlinewidth = 4096, 330 + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), 331 + }, 332 + }; 333 + 319 334 static const struct dpu_intf_cfg sm8450_intf[] = { 320 335 { 321 336 .name = "intf_0", .id = INTF_0, ··· 426 411 .dsc = sm8450_dsc, 427 412 .merge_3d_count = ARRAY_SIZE(sm8450_merge_3d), 428 413 .merge_3d = sm8450_merge_3d, 414 + .wb_count = ARRAY_SIZE(sm8450_wb), 415 + .wb = sm8450_wb, 429 416 .intf_count = ARRAY_SIZE(sm8450_intf), 430 417 .intf = sm8450_intf, 431 418 .vbif_count = ARRAY_SIZE(sdm845_vbif),
+16 -17
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
··· 10 10 static const struct dpu_caps sm8550_dpu_caps = { 11 11 .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 12 12 .max_mixer_blendstages = 0xb, 13 - .qseed_type = DPU_SSPP_SCALER_QSEED4, 14 13 .has_src_split = true, 15 14 .has_dim_layer = true, 16 15 .has_idle_pc = true, ··· 66 67 { 67 68 .name = "sspp_0", .id = SSPP_VIG0, 68 69 .base = 0x4000, .len = 0x344, 69 - .features = VIG_SC7180_MASK, 70 - .sblk = &sm8550_vig_sblk_0, 70 + .features = VIG_SDM845_MASK, 71 + .sblk = &dpu_vig_sblk_qseed3_3_2, 71 72 .xin_id = 0, 72 73 .type = SSPP_TYPE_VIG, 73 74 }, { 74 75 .name = "sspp_1", .id = SSPP_VIG1, 75 76 .base = 0x6000, .len = 0x344, 76 - .features = VIG_SC7180_MASK, 77 - .sblk = &sm8550_vig_sblk_1, 77 + .features = VIG_SDM845_MASK, 78 + .sblk = &dpu_vig_sblk_qseed3_3_2, 78 79 .xin_id = 4, 79 80 .type = SSPP_TYPE_VIG, 80 81 }, { 81 82 .name = "sspp_2", .id = SSPP_VIG2, 82 83 .base = 0x8000, .len = 0x344, 83 - .features = VIG_SC7180_MASK, 84 - .sblk = &sm8550_vig_sblk_2, 84 + .features = VIG_SDM845_MASK, 85 + .sblk = &dpu_vig_sblk_qseed3_3_2, 85 86 .xin_id = 8, 86 87 .type = SSPP_TYPE_VIG, 87 88 }, { 88 89 .name = "sspp_3", .id = SSPP_VIG3, 89 90 .base = 0xa000, .len = 0x344, 90 - .features = VIG_SC7180_MASK, 91 - .sblk = &sm8550_vig_sblk_3, 91 + .features = VIG_SDM845_MASK, 92 + .sblk = &dpu_vig_sblk_qseed3_3_2, 92 93 .xin_id = 12, 93 94 .type = SSPP_TYPE_VIG, 94 95 }, { 95 96 .name = "sspp_8", .id = SSPP_DMA0, 96 97 .base = 0x24000, .len = 0x344, 97 98 .features = DMA_SDM845_MASK, 98 - .sblk = &sdm845_dma_sblk_0, 99 + .sblk = &dpu_dma_sblk, 99 100 .xin_id = 1, 100 101 .type = SSPP_TYPE_DMA, 101 102 }, { 102 103 .name = "sspp_9", .id = SSPP_DMA1, 103 104 .base = 0x26000, .len = 0x344, 104 105 .features = DMA_SDM845_MASK, 105 - .sblk = &sdm845_dma_sblk_1, 106 + .sblk = &dpu_dma_sblk, 106 107 .xin_id = 5, 107 108 .type = SSPP_TYPE_DMA, 108 109 }, { 109 110 .name = "sspp_10", .id = SSPP_DMA2, 110 111 .base = 0x28000, .len = 0x344, 111 112 .features = DMA_SDM845_MASK, 112 - .sblk = &sdm845_dma_sblk_2, 113 + .sblk = &dpu_dma_sblk, 113 114 .xin_id = 9, 114 115 .type = SSPP_TYPE_DMA, 115 116 }, { 116 117 .name = "sspp_11", .id = SSPP_DMA3, 117 118 .base = 0x2a000, .len = 0x344, 118 119 .features = DMA_SDM845_MASK, 119 - .sblk = &sdm845_dma_sblk_3, 120 + .sblk = &dpu_dma_sblk, 120 121 .xin_id = 13, 121 122 .type = SSPP_TYPE_DMA, 122 123 }, { 123 124 .name = "sspp_12", .id = SSPP_DMA4, 124 125 .base = 0x2c000, .len = 0x344, 125 126 .features = DMA_CURSOR_SDM845_MASK, 126 - .sblk = &sm8550_dma_sblk_4, 127 + .sblk = &dpu_dma_sblk, 127 128 .xin_id = 14, 128 129 .type = SSPP_TYPE_DMA, 129 130 }, { 130 131 .name = "sspp_13", .id = SSPP_DMA5, 131 132 .base = 0x2e000, .len = 0x344, 132 133 .features = DMA_CURSOR_SDM845_MASK, 133 - .sblk = &sm8550_dma_sblk_5, 134 + .sblk = &dpu_dma_sblk, 134 135 .xin_id = 15, 135 136 .type = SSPP_TYPE_DMA, 136 137 }, ··· 315 316 .name = "wb_2", .id = WB_2, 316 317 .base = 0x65000, .len = 0x2c8, 317 318 .features = WB_SM8250_MASK, 318 - .format_list = wb2_formats, 319 - .num_formats = ARRAY_SIZE(wb2_formats), 319 + .format_list = wb2_formats_rgb, 320 + .num_formats = ARRAY_SIZE(wb2_formats_rgb), 320 321 .xin_id = 6, 321 322 .vbif_idx = VBIF_RT, 322 323 .maxlinewidth = 4096,
+9 -20
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 3 + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 4 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. 5 5 * Copyright (C) 2013 Red Hat 6 6 * Author: Rob Clark <robdclark@gmail.com> ··· 49 49 struct msm_drm_private *priv = crtc->dev->dev_private; 50 50 51 51 return to_dpu_kms(priv->kms); 52 - } 53 - 54 - static void dpu_crtc_destroy(struct drm_crtc *crtc) 55 - { 56 - struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 57 - 58 - if (!crtc) 59 - return; 60 - 61 - drm_crtc_cleanup(crtc); 62 - kfree(dpu_crtc); 63 52 } 64 53 65 54 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) ··· 114 125 continue; 115 126 116 127 /* Calculate MISR over 1 frame */ 117 - m->hw_lm->ops.setup_misr(m->hw_lm, true, 1); 128 + m->hw_lm->ops.setup_misr(m->hw_lm); 118 129 } 119 130 } 120 131 ··· 1424 1435 1425 1436 static const struct drm_crtc_funcs dpu_crtc_funcs = { 1426 1437 .set_config = drm_atomic_helper_set_config, 1427 - .destroy = dpu_crtc_destroy, 1428 1438 .page_flip = drm_atomic_helper_page_flip, 1429 1439 .reset = dpu_crtc_reset, 1430 1440 .atomic_duplicate_state = dpu_crtc_duplicate_state, ··· 1457 1469 struct dpu_crtc *dpu_crtc; 1458 1470 int i, ret; 1459 1471 1460 - dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); 1461 - if (!dpu_crtc) 1462 - return ERR_PTR(-ENOMEM); 1472 + dpu_crtc = drmm_crtc_alloc_with_planes(dev, struct dpu_crtc, base, 1473 + plane, cursor, 1474 + &dpu_crtc_funcs, 1475 + NULL); 1476 + 1477 + if (IS_ERR(dpu_crtc)) 1478 + return ERR_CAST(dpu_crtc); 1463 1479 1464 1480 crtc = &dpu_crtc->base; 1465 1481 crtc->dev = dev; ··· 1482 1490 kthread_init_work(&dpu_crtc->frame_events[i].work, 1483 1491 dpu_crtc_frame_event_work); 1484 1492 } 1485 - 1486 - drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs, 1487 - NULL); 1488 1493 1489 1494 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); 1490 1495
+76 -110
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 2 2 /* 3 3 * Copyright (C) 2013 Red Hat 4 4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved. 5 - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 5 + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 6 6 * 7 7 * Author: Rob Clark <robdclark@gmail.com> 8 8 */ ··· 16 16 #include <drm/drm_crtc.h> 17 17 #include <drm/drm_file.h> 18 18 #include <drm/drm_probe_helper.h> 19 + #include <drm/drm_framebuffer.h> 19 20 20 21 #include "msm_drv.h" 21 22 #include "dpu_kms.h" ··· 27 26 #include "dpu_hw_dspp.h" 28 27 #include "dpu_hw_dsc.h" 29 28 #include "dpu_hw_merge3d.h" 29 + #include "dpu_hw_cdm.h" 30 30 #include "dpu_formats.h" 31 31 #include "dpu_encoder_phys.h" 32 32 #include "dpu_crtc.h" ··· 39 37 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 40 38 41 39 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\ 40 + (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 41 + 42 + #define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\ 42 43 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 43 44 44 45 /* ··· 156 151 * @crtc_frame_event_cb: callback handler for frame event 157 152 * @crtc_frame_event_cb_data: callback handler private data 158 153 * @frame_done_timeout_ms: frame done timeout in ms 154 + * @frame_done_timeout_cnt: atomic counter tracking the number of frame 155 + * done timeouts 159 156 * @frame_done_timer: watchdog timer for frame done event 160 157 * @disp_info: local copy of msm_display_info struct 161 158 * @idle_pc_supported: indicate if idle power collaps is supported ··· 191 184 struct drm_crtc *crtc; 192 185 struct drm_connector *connector; 193 186 194 - struct dentry *debugfs_root; 195 187 struct mutex enc_lock; 196 188 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); 197 189 void (*crtc_frame_event_cb)(void *, u32 event); 198 190 void *crtc_frame_event_cb_data; 199 191 200 192 atomic_t frame_done_timeout_ms; 193 + atomic_t frame_done_timeout_cnt; 201 194 struct timer_list frame_done_timer; 202 195 203 196 struct msm_display_info disp_info; ··· 262 255 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr) 263 256 continue; 264 257 265 - phys->hw_intf->ops.setup_misr(phys->hw_intf, true, 1); 258 + phys->hw_intf->ops.setup_misr(phys->hw_intf); 266 259 } 267 260 } 268 261 ··· 446 439 return linecount; 447 440 } 448 441 449 - static void dpu_encoder_destroy(struct drm_encoder *drm_enc) 450 - { 451 - struct dpu_encoder_virt *dpu_enc = NULL; 452 - int i = 0; 453 - 454 - if (!drm_enc) { 455 - DPU_ERROR("invalid encoder\n"); 456 - return; 457 - } 458 - 459 - dpu_enc = to_dpu_encoder_virt(drm_enc); 460 - DPU_DEBUG_ENC(dpu_enc, "\n"); 461 - 462 - mutex_lock(&dpu_enc->enc_lock); 463 - 464 - for (i = 0; i < dpu_enc->num_phys_encs; i++) { 465 - struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 466 - 467 - if (phys->ops.destroy) { 468 - phys->ops.destroy(phys); 469 - --dpu_enc->num_phys_encs; 470 - dpu_enc->phys_encs[i] = NULL; 471 - } 472 - } 473 - 474 - if (dpu_enc->num_phys_encs) 475 - DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n", 476 - dpu_enc->num_phys_encs); 477 - dpu_enc->num_phys_encs = 0; 478 - mutex_unlock(&dpu_enc->enc_lock); 479 - 480 - drm_encoder_cleanup(drm_enc); 481 - mutex_destroy(&dpu_enc->enc_lock); 482 - } 483 - 484 442 void dpu_encoder_helper_split_config( 485 443 struct dpu_encoder_phys *phys_enc, 486 444 enum dpu_intf interface) ··· 586 614 struct drm_display_mode *adj_mode; 587 615 struct msm_display_topology topology; 588 616 struct dpu_global_state *global_state; 617 + struct drm_framebuffer *fb; 589 618 struct drm_dsc_config *dsc; 590 619 int i = 0; 591 620 int ret = 0; ··· 626 653 dsc = dpu_encoder_get_dsc_config(drm_enc); 627 654 628 655 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc); 656 + 657 + /* 658 + * Use CDM only for writeback at the moment as other interfaces cannot handle it. 659 + * if writeback itself cannot handle cdm for some reason it will fail in its atomic_check() 660 + * earlier. 661 + */ 662 + if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) { 663 + fb = conn_state->writeback_job->fb; 664 + 665 + if (fb && DPU_FORMAT_IS_YUV(to_dpu_format(msm_framebuffer_format(fb)))) 666 + topology.needs_cdm = true; 667 + if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm) 668 + crtc_state->mode_changed = true; 669 + else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm) 670 + crtc_state->mode_changed = true; 671 + } 629 672 630 673 /* 631 674 * Release and Allocate resources on every modeset ··· 1083 1094 1084 1095 dpu_enc->dsc_mask = dsc_mask; 1085 1096 1097 + if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) { 1098 + struct dpu_hw_blk *hw_cdm = NULL; 1099 + 1100 + dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1101 + drm_enc->base.id, DPU_HW_BLK_CDM, 1102 + &hw_cdm, 1); 1103 + dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL; 1104 + } 1105 + 1086 1106 cstate = to_dpu_crtc_state(crtc_state); 1087 1107 1088 1108 for (i = 0; i < num_lm; i++) { ··· 1201 1203 index = disp_info->h_tile_instance[0]; 1202 1204 1203 1205 dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc); 1206 + 1207 + atomic_set(&dpu_enc->frame_done_timeout_cnt, 0); 1204 1208 1205 1209 if (disp_info->intf_type == INTF_DP) 1206 1210 dpu_enc->wide_bus_en = msm_dp_wide_bus_available(priv->dp[index]); ··· 2080 2080 phys_enc->hw_pp->merge_3d->idx); 2081 2081 } 2082 2082 2083 + if (phys_enc->hw_cdm) { 2084 + if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp) 2085 + phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm, 2086 + PINGPONG_NONE); 2087 + if (phys_enc->hw_ctl->ops.update_pending_flush_cdm) 2088 + phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl, 2089 + phys_enc->hw_cdm->idx); 2090 + } 2091 + 2083 2092 if (dpu_enc->dsc) { 2084 2093 dpu_encoder_unprep_dsc(dpu_enc); 2085 2094 dpu_enc->dsc = NULL; ··· 2117 2108 #ifdef CONFIG_DEBUG_FS 2118 2109 static int _dpu_encoder_status_show(struct seq_file *s, void *data) 2119 2110 { 2120 - struct dpu_encoder_virt *dpu_enc = s->private; 2111 + struct drm_encoder *drm_enc = s->private; 2112 + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 2121 2113 int i; 2122 2114 2123 2115 mutex_lock(&dpu_enc->enc_lock); 2124 2116 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2125 2117 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2126 2118 2127 - seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ", 2119 + seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d frame_done_cnt:%d", 2128 2120 phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1, 2129 2121 phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1, 2130 2122 atomic_read(&phys->vsync_cnt), 2131 - atomic_read(&phys->underrun_cnt)); 2123 + atomic_read(&phys->underrun_cnt), 2124 + atomic_read(&dpu_enc->frame_done_timeout_cnt)); 2132 2125 2133 2126 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode)); 2134 2127 } ··· 2141 2130 2142 2131 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status); 2143 2132 2144 - static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) 2133 + static void dpu_encoder_debugfs_init(struct drm_encoder *drm_enc, struct dentry *root) 2145 2134 { 2146 - struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 2147 - 2148 - char name[12]; 2149 - 2150 - if (!drm_enc->dev) { 2151 - DPU_ERROR("invalid encoder or kms\n"); 2152 - return -EINVAL; 2153 - } 2154 - 2155 - snprintf(name, sizeof(name), "encoder%u", drm_enc->base.id); 2156 - 2157 - /* create overall sub-directory for the encoder */ 2158 - dpu_enc->debugfs_root = debugfs_create_dir(name, 2159 - drm_enc->dev->primary->debugfs_root); 2160 - 2161 2135 /* don't error check these */ 2162 2136 debugfs_create_file("status", 0600, 2163 - dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops); 2164 - 2165 - return 0; 2137 + root, drm_enc, &_dpu_encoder_status_fops); 2166 2138 } 2167 2139 #else 2168 - static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) 2169 - { 2170 - return 0; 2171 - } 2140 + #define dpu_encoder_debugfs_init NULL 2172 2141 #endif 2173 2142 2174 - static int dpu_encoder_late_register(struct drm_encoder *encoder) 2175 - { 2176 - return _dpu_encoder_init_debugfs(encoder); 2177 - } 2178 - 2179 - static void dpu_encoder_early_unregister(struct drm_encoder *encoder) 2180 - { 2181 - struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); 2182 - 2183 - debugfs_remove_recursive(dpu_enc->debugfs_root); 2184 - } 2185 - 2186 2143 static int dpu_encoder_virt_add_phys_encs( 2144 + struct drm_device *dev, 2187 2145 struct msm_display_info *disp_info, 2188 2146 struct dpu_encoder_virt *dpu_enc, 2189 2147 struct dpu_enc_phys_init_params *params) ··· 2174 2194 2175 2195 2176 2196 if (disp_info->intf_type == INTF_WB) { 2177 - enc = dpu_encoder_phys_wb_init(params); 2197 + enc = dpu_encoder_phys_wb_init(dev, params); 2178 2198 2179 2199 if (IS_ERR(enc)) { 2180 2200 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n", ··· 2185 2205 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2186 2206 ++dpu_enc->num_phys_encs; 2187 2207 } else if (disp_info->is_cmd_mode) { 2188 - enc = dpu_encoder_phys_cmd_init(params); 2208 + enc = dpu_encoder_phys_cmd_init(dev, params); 2189 2209 2190 2210 if (IS_ERR(enc)) { 2191 2211 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", ··· 2196 2216 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2197 2217 ++dpu_enc->num_phys_encs; 2198 2218 } else { 2199 - enc = dpu_encoder_phys_vid_init(params); 2219 + enc = dpu_encoder_phys_vid_init(dev, params); 2200 2220 2201 2221 if (IS_ERR(enc)) { 2202 2222 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", ··· 2285 2305 break; 2286 2306 } 2287 2307 2288 - ret = dpu_encoder_virt_add_phys_encs(disp_info, 2308 + ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info, 2289 2309 dpu_enc, &phys_params); 2290 2310 if (ret) { 2291 2311 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n"); ··· 2319 2339 return; 2320 2340 } 2321 2341 2322 - DPU_ERROR_ENC(dpu_enc, "frame done timeout\n"); 2342 + DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n"); 2343 + 2344 + if (atomic_inc_return(&dpu_enc->frame_done_timeout_cnt) == 1) 2345 + msm_disp_snapshot_state(drm_enc->dev); 2323 2346 2324 2347 event = DPU_ENCODER_FRAME_EVENT_ERROR; 2325 2348 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event); ··· 2337 2354 }; 2338 2355 2339 2356 static const struct drm_encoder_funcs dpu_encoder_funcs = { 2340 - .destroy = dpu_encoder_destroy, 2341 - .late_register = dpu_encoder_late_register, 2342 - .early_unregister = dpu_encoder_early_unregister, 2357 + .debugfs_init = dpu_encoder_debugfs_init, 2343 2358 }; 2344 2359 2345 2360 struct drm_encoder *dpu_encoder_init(struct drm_device *dev, ··· 2346 2365 { 2347 2366 struct msm_drm_private *priv = dev->dev_private; 2348 2367 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 2349 - struct drm_encoder *drm_enc = NULL; 2350 - struct dpu_encoder_virt *dpu_enc = NULL; 2351 - int ret = 0; 2368 + struct dpu_encoder_virt *dpu_enc; 2369 + int ret; 2352 2370 2353 - dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL); 2354 - if (!dpu_enc) 2355 - return ERR_PTR(-ENOMEM); 2356 - 2357 - ret = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs, 2358 - drm_enc_mode, NULL); 2359 - if (ret) { 2360 - devm_kfree(dev->dev, dpu_enc); 2361 - return ERR_PTR(ret); 2362 - } 2371 + dpu_enc = drmm_encoder_alloc(dev, struct dpu_encoder_virt, base, 2372 + &dpu_encoder_funcs, drm_enc_mode, NULL); 2373 + if (IS_ERR(dpu_enc)) 2374 + return ERR_CAST(dpu_enc); 2363 2375 2364 2376 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); 2365 2377 ··· 2362 2388 mutex_init(&dpu_enc->rc_lock); 2363 2389 2364 2390 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); 2365 - if (ret) 2366 - goto fail; 2391 + if (ret) { 2392 + DPU_ERROR("failed to setup encoder\n"); 2393 + return ERR_PTR(-ENOMEM); 2394 + } 2367 2395 2368 2396 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 2397 + atomic_set(&dpu_enc->frame_done_timeout_cnt, 0); 2369 2398 timer_setup(&dpu_enc->frame_done_timer, 2370 2399 dpu_encoder_frame_done_timeout, 0); 2371 2400 ··· 2381 2404 DPU_DEBUG_ENC(dpu_enc, "created\n"); 2382 2405 2383 2406 return &dpu_enc->base; 2384 - 2385 - fail: 2386 - DPU_ERROR("failed to create encoder\n"); 2387 - if (drm_enc) 2388 - dpu_encoder_destroy(drm_enc); 2389 - 2390 - return ERR_PTR(ret); 2391 2407 } 2392 2408 2393 2409 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, ··· 2406 2436 break; 2407 2437 case MSM_ENC_TX_COMPLETE: 2408 2438 fn_wait = phys->ops.wait_for_tx_complete; 2409 - break; 2410 - case MSM_ENC_VBLANK: 2411 - fn_wait = phys->ops.wait_for_vblank; 2412 2439 break; 2413 2440 default: 2414 2441 DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n", ··· 2464 2497 phys_enc->enc_spinlock = p->enc_spinlock; 2465 2498 phys_enc->enable_state = DPU_ENC_DISABLED; 2466 2499 2467 - atomic_set(&phys_enc->vblank_refcount, 0); 2468 2500 atomic_set(&phys_enc->pending_kickoff_cnt, 0); 2469 2501 atomic_set(&phys_enc->pending_ctlstart_cnt, 0); 2470 2502
+14 -7
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
··· 14 14 #include "dpu_hw_intf.h" 15 15 #include "dpu_hw_wb.h" 16 16 #include "dpu_hw_pingpong.h" 17 + #include "dpu_hw_cdm.h" 17 18 #include "dpu_hw_ctl.h" 18 19 #include "dpu_hw_top.h" 20 + #include "dpu_hw_util.h" 19 21 #include "dpu_encoder.h" 20 22 #include "dpu_crtc.h" 21 23 ··· 74 72 * @enable: DRM Call. Enable a DRM mode. 75 73 * @disable: DRM Call. Disable mode. 76 74 * @atomic_check: DRM Call. Atomic check new DRM state. 77 - * @destroy: DRM Call. Destroy and release resources. 78 75 * @control_vblank_irq Register/Deregister for VBLANK IRQ 79 76 * @wait_for_commit_done: Wait for hardware to have flushed the 80 77 * current pending frames to hardware ··· 103 102 int (*atomic_check)(struct dpu_encoder_phys *encoder, 104 103 struct drm_crtc_state *crtc_state, 105 104 struct drm_connector_state *conn_state); 106 - void (*destroy)(struct dpu_encoder_phys *encoder); 107 105 int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable); 108 106 int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc); 109 107 int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc); 110 - int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc); 111 108 void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc); 112 109 void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc); 113 110 void (*trigger_start)(struct dpu_encoder_phys *phys_enc); ··· 152 153 * @hw_pp: Hardware interface to the ping pong registers 153 154 * @hw_intf: Hardware interface to the intf registers 154 155 * @hw_wb: Hardware interface to the wb registers 156 + * @hw_cdm: Hardware interface to the CDM registers 155 157 * @dpu_kms: Pointer to the dpu_kms top level 156 158 * @cached_mode: DRM mode cached at mode_set time, acted on in enable 159 + * @vblank_ctl_lock: Vblank ctl mutex lock to protect vblank_refcount 157 160 * @enabled: Whether the encoder has enabled and running a mode 158 161 * @split_role: Role to play in a split-panel configuration 159 162 * @intf_mode: Interface mode ··· 182 181 struct dpu_hw_pingpong *hw_pp; 183 182 struct dpu_hw_intf *hw_intf; 184 183 struct dpu_hw_wb *hw_wb; 184 + struct dpu_hw_cdm *hw_cdm; 185 185 struct dpu_kms *dpu_kms; 186 186 struct drm_display_mode cached_mode; 187 + struct mutex vblank_ctl_lock; 187 188 enum dpu_enc_split_role split_role; 188 189 enum dpu_intf_mode intf_mode; 189 190 spinlock_t *enc_spinlock; 190 191 enum dpu_enc_enable_state enable_state; 191 - atomic_t vblank_refcount; 192 + int vblank_refcount; 192 193 atomic_t vsync_cnt; 193 194 atomic_t underrun_cnt; 194 195 atomic_t pending_ctlstart_cnt; ··· 213 210 * @wbirq_refcount: Reference count of writeback interrupt 214 211 * @wb_done_timeout_cnt: number of wb done irq timeout errors 215 212 * @wb_cfg: writeback block config to store fb related details 213 + * @cdm_cfg: cdm block config needed to store writeback block's CDM configuration 216 214 * @wb_conn: backpointer to writeback connector 217 215 * @wb_job: backpointer to current writeback job 218 216 * @dest: dpu buffer layout for current writeback output buffer ··· 223 219 atomic_t wbirq_refcount; 224 220 int wb_done_timeout_cnt; 225 221 struct dpu_hw_wb_cfg wb_cfg; 222 + struct dpu_hw_cdm_cfg cdm_cfg; 226 223 struct drm_writeback_connector *wb_conn; 227 224 struct drm_writeback_job *wb_job; 228 225 struct dpu_hw_fmt_layout dest; ··· 286 281 * @p: Pointer to init params structure 287 282 * Return: Error code or newly allocated encoder 288 283 */ 289 - struct dpu_encoder_phys *dpu_encoder_phys_vid_init( 284 + struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, 290 285 struct dpu_enc_phys_init_params *p); 291 286 292 287 /** 293 288 * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder 289 + * @dev: Corresponding device for devres management 294 290 * @p: Pointer to init params structure 295 291 * Return: Error code or newly allocated encoder 296 292 */ 297 - struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( 293 + struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, 298 294 struct dpu_enc_phys_init_params *p); 299 295 300 296 /** 301 297 * dpu_encoder_phys_wb_init - initialize writeback encoder 298 + * @dev: Corresponding device for devres management 302 299 * @init: Pointer to init info structure with initialization params 303 300 */ 304 - struct dpu_encoder_phys *dpu_encoder_phys_wb_init( 301 + struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev, 305 302 struct dpu_enc_phys_init_params *p); 306 303 307 304 /**
+26 -49
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
··· 13 13 #include "dpu_trace.h" 14 14 #include "disp/msm_disp_snapshot.h" 15 15 16 + #include <drm/drm_managed.h> 17 + 16 18 #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \ 17 19 (e) && (e)->base.parent ? \ 18 20 (e)->base.parent->base.id : -1, \ ··· 246 244 return -EINVAL; 247 245 } 248 246 249 - refcount = atomic_read(&phys_enc->vblank_refcount); 247 + mutex_lock(&phys_enc->vblank_ctl_lock); 248 + refcount = phys_enc->vblank_refcount; 250 249 251 250 /* Slave encoders don't report vblank */ 252 251 if (!dpu_encoder_phys_cmd_is_master(phys_enc)) ··· 263 260 phys_enc->hw_pp->idx - PINGPONG_0, 264 261 enable ? "true" : "false", refcount); 265 262 266 - if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1) 267 - ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, 268 - phys_enc->irq[INTR_IDX_RDPTR], 269 - dpu_encoder_phys_cmd_te_rd_ptr_irq, 270 - phys_enc); 271 - else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0) 272 - ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, 273 - phys_enc->irq[INTR_IDX_RDPTR]); 263 + if (enable) { 264 + if (phys_enc->vblank_refcount == 0) 265 + ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, 266 + phys_enc->irq[INTR_IDX_RDPTR], 267 + dpu_encoder_phys_cmd_te_rd_ptr_irq, 268 + phys_enc); 269 + if (!ret) 270 + phys_enc->vblank_refcount++; 271 + } else if (!enable) { 272 + if (phys_enc->vblank_refcount == 1) 273 + ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, 274 + phys_enc->irq[INTR_IDX_RDPTR]); 275 + if (!ret) 276 + phys_enc->vblank_refcount--; 277 + } 274 278 275 279 end: 280 + mutex_unlock(&phys_enc->vblank_ctl_lock); 276 281 if (ret) { 277 282 DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n", 278 283 DRMID(phys_enc->parent), ··· 296 285 { 297 286 trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent), 298 287 phys_enc->hw_pp->idx - PINGPONG_0, 299 - enable, atomic_read(&phys_enc->vblank_refcount)); 288 + enable, phys_enc->vblank_refcount); 300 289 301 290 if (enable) { 302 291 dpu_core_irq_register_callback(phys_enc->dpu_kms, ··· 569 558 phys_enc->enable_state = DPU_ENC_DISABLED; 570 559 } 571 560 572 - static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc) 573 - { 574 - struct dpu_encoder_phys_cmd *cmd_enc = 575 - to_dpu_encoder_phys_cmd(phys_enc); 576 - 577 - kfree(cmd_enc); 578 - } 579 - 580 561 static void dpu_encoder_phys_cmd_prepare_for_kickoff( 581 562 struct dpu_encoder_phys *phys_enc) 582 563 { ··· 684 681 return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc); 685 682 } 686 683 687 - static int dpu_encoder_phys_cmd_wait_for_vblank( 688 - struct dpu_encoder_phys *phys_enc) 689 - { 690 - int rc = 0; 691 - struct dpu_encoder_phys_cmd *cmd_enc; 692 - struct dpu_encoder_wait_info wait_info; 693 - 694 - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); 695 - 696 - /* only required for master controller */ 697 - if (!dpu_encoder_phys_cmd_is_master(phys_enc)) 698 - return rc; 699 - 700 - wait_info.wq = &cmd_enc->pending_vblank_wq; 701 - wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt; 702 - wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; 703 - 704 - atomic_inc(&cmd_enc->pending_vblank_cnt); 705 - 706 - rc = dpu_encoder_helper_wait_for_irq(phys_enc, 707 - phys_enc->irq[INTR_IDX_RDPTR], 708 - dpu_encoder_phys_cmd_te_rd_ptr_irq, 709 - &wait_info); 710 - 711 - return rc; 712 - } 713 - 714 684 static void dpu_encoder_phys_cmd_handle_post_kickoff( 715 685 struct dpu_encoder_phys *phys_enc) 716 686 { ··· 707 731 ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set; 708 732 ops->enable = dpu_encoder_phys_cmd_enable; 709 733 ops->disable = dpu_encoder_phys_cmd_disable; 710 - ops->destroy = dpu_encoder_phys_cmd_destroy; 711 734 ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq; 712 735 ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done; 713 736 ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff; 714 737 ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete; 715 - ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank; 716 738 ops->trigger_start = dpu_encoder_phys_cmd_trigger_start; 717 739 ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush; 718 740 ops->irq_control = dpu_encoder_phys_cmd_irq_control; ··· 720 746 ops->get_line_count = dpu_encoder_phys_cmd_get_line_count; 721 747 } 722 748 723 - struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( 749 + struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, 724 750 struct dpu_enc_phys_init_params *p) 725 751 { 726 752 struct dpu_encoder_phys *phys_enc = NULL; ··· 728 754 729 755 DPU_DEBUG("intf\n"); 730 756 731 - cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL); 757 + cmd_enc = drmm_kzalloc(dev, sizeof(*cmd_enc), GFP_KERNEL); 732 758 if (!cmd_enc) { 733 759 DPU_ERROR("failed to allocate\n"); 734 760 return ERR_PTR(-ENOMEM); ··· 736 762 phys_enc = &cmd_enc->base; 737 763 738 764 dpu_encoder_phys_init(phys_enc, p); 765 + 766 + mutex_init(&phys_enc->vblank_ctl_lock); 767 + phys_enc->vblank_refcount = 0; 739 768 740 769 dpu_encoder_phys_cmd_init_ops(&phys_enc->ops); 741 770 phys_enc->intf_mode = INTF_MODE_CMD;
+30 -25
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
··· 11 11 #include "dpu_trace.h" 12 12 #include "disp/msm_disp_snapshot.h" 13 13 14 + #include <drm/drm_managed.h> 15 + 14 16 #define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \ 15 17 (e) && (e)->parent ? \ 16 18 (e)->parent->base.id : -1, \ ··· 366 364 int ret = 0; 367 365 int refcount; 368 366 369 - refcount = atomic_read(&phys_enc->vblank_refcount); 367 + mutex_lock(&phys_enc->vblank_ctl_lock); 368 + refcount = phys_enc->vblank_refcount; 370 369 371 370 /* Slave encoders don't report vblank */ 372 371 if (!dpu_encoder_phys_vid_is_master(phys_enc)) ··· 380 377 } 381 378 382 379 DRM_DEBUG_VBL("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable, 383 - atomic_read(&phys_enc->vblank_refcount)); 380 + refcount); 384 381 385 - if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1) 386 - ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, 387 - phys_enc->irq[INTR_IDX_VSYNC], 388 - dpu_encoder_phys_vid_vblank_irq, 389 - phys_enc); 390 - else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0) 391 - ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, 392 - phys_enc->irq[INTR_IDX_VSYNC]); 382 + if (enable) { 383 + if (phys_enc->vblank_refcount == 0) 384 + ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, 385 + phys_enc->irq[INTR_IDX_VSYNC], 386 + dpu_encoder_phys_vid_vblank_irq, 387 + phys_enc); 388 + if (!ret) 389 + phys_enc->vblank_refcount++; 390 + } else if (!enable) { 391 + if (phys_enc->vblank_refcount == 1) 392 + ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, 393 + phys_enc->irq[INTR_IDX_VSYNC]); 394 + if (!ret) 395 + phys_enc->vblank_refcount--; 396 + } 393 397 394 398 end: 399 + mutex_unlock(&phys_enc->vblank_ctl_lock); 395 400 if (ret) { 396 401 DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n", 397 402 DRMID(phys_enc->parent), ··· 449 438 phys_enc->enable_state = DPU_ENC_ENABLING; 450 439 } 451 440 452 - static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc) 453 - { 454 - DPU_DEBUG_VIDENC(phys_enc, "\n"); 455 - kfree(phys_enc); 456 - } 457 - 458 - static int dpu_encoder_phys_vid_wait_for_vblank( 441 + static int dpu_encoder_phys_vid_wait_for_tx_complete( 459 442 struct dpu_encoder_phys *phys_enc) 460 443 { 461 444 struct dpu_encoder_wait_info wait_info; ··· 563 558 * scanout buffer) don't latch properly.. 564 559 */ 565 560 if (dpu_encoder_phys_vid_is_master(phys_enc)) { 566 - ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc); 561 + ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc); 567 562 if (ret) { 568 563 atomic_set(&phys_enc->pending_kickoff_cnt, 0); 569 564 DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n", ··· 583 578 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); 584 579 dpu_encoder_phys_inc_pending(phys_enc); 585 580 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); 586 - ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc); 581 + ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc); 587 582 if (ret) { 588 583 atomic_set(&phys_enc->pending_kickoff_cnt, 0); 589 584 DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n", ··· 623 618 trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent), 624 619 phys_enc->hw_intf->idx - INTF_0, 625 620 enable, 626 - atomic_read(&phys_enc->vblank_refcount)); 621 + phys_enc->vblank_refcount); 627 622 628 623 if (enable) { 629 624 ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true); ··· 686 681 ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set; 687 682 ops->enable = dpu_encoder_phys_vid_enable; 688 683 ops->disable = dpu_encoder_phys_vid_disable; 689 - ops->destroy = dpu_encoder_phys_vid_destroy; 690 684 ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq; 691 685 ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done; 692 - ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank; 693 - ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank; 686 + ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_tx_complete; 694 687 ops->irq_control = dpu_encoder_phys_vid_irq_control; 695 688 ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff; 696 689 ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff; ··· 697 694 ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count; 698 695 } 699 696 700 - struct dpu_encoder_phys *dpu_encoder_phys_vid_init( 697 + struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, 701 698 struct dpu_enc_phys_init_params *p) 702 699 { 703 700 struct dpu_encoder_phys *phys_enc = NULL; ··· 707 704 return ERR_PTR(-EINVAL); 708 705 } 709 706 710 - phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL); 707 + phys_enc = drmm_kzalloc(dev, sizeof(*phys_enc), GFP_KERNEL); 711 708 if (!phys_enc) { 712 709 DPU_ERROR("failed to create encoder due to memory allocation error\n"); 713 710 return ERR_PTR(-ENOMEM); ··· 716 713 DPU_DEBUG_VIDENC(phys_enc, "\n"); 717 714 718 715 dpu_encoder_phys_init(phys_enc, p); 716 + mutex_init(&phys_enc->vblank_ctl_lock); 717 + phys_enc->vblank_refcount = 0; 719 718 720 719 dpu_encoder_phys_vid_init_ops(&phys_enc->ops); 721 720 phys_enc->intf_mode = INTF_MODE_VIDEO;
+109 -21
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
··· 8 8 #include <linux/debugfs.h> 9 9 10 10 #include <drm/drm_framebuffer.h> 11 + #include <drm/drm_managed.h> 11 12 12 13 #include "dpu_encoder_phys.h" 13 14 #include "dpu_formats.h" ··· 207 206 } 208 207 209 208 /** 210 - * dpu_encoder_phys_wb_setup_cdp - setup chroma down prefetch block 209 + * dpu_encoder_phys_wb_setup_ctl - setup wb pipeline for ctl path 211 210 * @phys_enc:Pointer to physical encoder 212 211 */ 213 - static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc) 212 + static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc) 214 213 { 215 214 struct dpu_hw_wb *hw_wb; 216 215 struct dpu_hw_ctl *ctl; 216 + struct dpu_hw_cdm *hw_cdm; 217 217 218 218 if (!phys_enc) { 219 219 DPU_ERROR("invalid encoder\n"); ··· 223 221 224 222 hw_wb = phys_enc->hw_wb; 225 223 ctl = phys_enc->hw_ctl; 224 + hw_cdm = phys_enc->hw_cdm; 226 225 227 226 if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && 228 227 (phys_enc->hw_ctl && ··· 239 236 240 237 if (mode_3d && hw_pp && hw_pp->merge_3d) 241 238 intf_cfg.merge_3d = hw_pp->merge_3d->idx; 239 + 240 + if (hw_cdm) 241 + intf_cfg.cdm = hw_cdm->idx; 242 242 243 243 if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode) 244 244 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, ··· 261 255 intf_cfg.mode_3d = 262 256 dpu_encoder_helper_get_3d_blend_mode(phys_enc); 263 257 phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg); 258 + } 259 + } 260 + 261 + /** 262 + * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block 263 + * This API does not handle DPU_CHROMA_H1V2. 264 + * @phys_enc:Pointer to physical encoder 265 + */ 266 + static void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc) 267 + { 268 + struct dpu_hw_cdm *hw_cdm; 269 + struct dpu_hw_cdm_cfg *cdm_cfg; 270 + struct dpu_hw_pingpong *hw_pp; 271 + struct dpu_encoder_phys_wb *wb_enc; 272 + const struct msm_format *format; 273 + const struct dpu_format *dpu_fmt; 274 + struct drm_writeback_job *wb_job; 275 + int ret; 276 + 277 + if (!phys_enc) 278 + return; 279 + 280 + wb_enc = to_dpu_encoder_phys_wb(phys_enc); 281 + cdm_cfg = &wb_enc->cdm_cfg; 282 + hw_pp = phys_enc->hw_pp; 283 + hw_cdm = phys_enc->hw_cdm; 284 + wb_job = wb_enc->wb_job; 285 + 286 + format = msm_framebuffer_format(wb_enc->wb_job->fb); 287 + dpu_fmt = dpu_get_dpu_format_ext(format->pixel_format, wb_job->fb->modifier); 288 + 289 + if (!hw_cdm) 290 + return; 291 + 292 + if (!DPU_FORMAT_IS_YUV(dpu_fmt)) { 293 + DPU_DEBUG("[enc:%d] cdm_disable fmt:%x\n", DRMID(phys_enc->parent), 294 + dpu_fmt->base.pixel_format); 295 + if (hw_cdm->ops.bind_pingpong_blk) 296 + hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE); 297 + 298 + return; 299 + } 300 + 301 + memset(cdm_cfg, 0, sizeof(struct dpu_hw_cdm_cfg)); 302 + 303 + cdm_cfg->output_width = wb_job->fb->width; 304 + cdm_cfg->output_height = wb_job->fb->height; 305 + cdm_cfg->output_fmt = dpu_fmt; 306 + cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB; 307 + cdm_cfg->output_bit_depth = DPU_FORMAT_IS_DX(dpu_fmt) ? 308 + CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT; 309 + cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l; 310 + 311 + /* enable 10 bit logic */ 312 + switch (cdm_cfg->output_fmt->chroma_sample) { 313 + case DPU_CHROMA_RGB: 314 + cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; 315 + cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; 316 + break; 317 + case DPU_CHROMA_H2V1: 318 + cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; 319 + cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; 320 + break; 321 + case DPU_CHROMA_420: 322 + cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; 323 + cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE; 324 + break; 325 + case DPU_CHROMA_H1V2: 326 + default: 327 + DPU_ERROR("[enc:%d] unsupported chroma sampling type\n", 328 + DRMID(phys_enc->parent)); 329 + cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; 330 + cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; 331 + break; 332 + } 333 + 334 + DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n", 335 + DRMID(phys_enc->parent), cdm_cfg->output_width, 336 + cdm_cfg->output_height, cdm_cfg->output_fmt->base.pixel_format, 337 + cdm_cfg->output_type, cdm_cfg->output_bit_depth, 338 + cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type); 339 + 340 + if (hw_cdm->ops.enable) { 341 + cdm_cfg->pp_id = hw_pp->idx; 342 + ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg); 343 + if (ret < 0) { 344 + DPU_ERROR("[enc:%d] failed to enable CDM; ret:%d\n", 345 + DRMID(phys_enc->parent), ret); 346 + return; 347 + } 264 348 } 265 349 } 266 350 ··· 403 307 return -EINVAL; 404 308 } 405 309 406 - return 0; 310 + return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state); 407 311 } 408 312 409 313 ··· 416 320 struct dpu_hw_wb *hw_wb; 417 321 struct dpu_hw_ctl *hw_ctl; 418 322 struct dpu_hw_pingpong *hw_pp; 323 + struct dpu_hw_cdm *hw_cdm; 419 324 u32 pending_flush = 0; 420 325 421 326 if (!phys_enc) ··· 425 328 hw_wb = phys_enc->hw_wb; 426 329 hw_pp = phys_enc->hw_pp; 427 330 hw_ctl = phys_enc->hw_ctl; 331 + hw_cdm = phys_enc->hw_cdm; 428 332 429 333 DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0); 430 334 ··· 440 342 if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d) 441 343 hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl, 442 344 hw_pp->merge_3d->idx); 345 + 346 + if (hw_cdm && hw_ctl->ops.update_pending_flush_cdm) 347 + hw_ctl->ops.update_pending_flush_cdm(hw_ctl, hw_cdm->idx); 443 348 444 349 if (hw_ctl->ops.get_pending_flush) 445 350 pending_flush = hw_ctl->ops.get_pending_flush(hw_ctl); ··· 475 374 476 375 dpu_encoder_phys_wb_setup_fb(phys_enc, fb); 477 376 478 - dpu_encoder_phys_wb_setup_cdp(phys_enc); 377 + dpu_encoder_helper_phys_setup_cdm(phys_enc); 479 378 379 + dpu_encoder_phys_wb_setup_ctl(phys_enc); 480 380 } 481 381 482 382 /** ··· 682 580 phys_enc->enable_state = DPU_ENC_DISABLED; 683 581 } 684 582 685 - /** 686 - * dpu_encoder_phys_wb_destroy - destroy writeback encoder 687 - * @phys_enc: Pointer to physical encoder 688 - */ 689 - static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc) 690 - { 691 - if (!phys_enc) 692 - return; 693 - 694 - DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0); 695 - 696 - kfree(phys_enc); 697 - } 698 - 699 583 static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc, 700 584 struct drm_writeback_job *job) 701 585 { ··· 777 689 ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set; 778 690 ops->enable = dpu_encoder_phys_wb_enable; 779 691 ops->disable = dpu_encoder_phys_wb_disable; 780 - ops->destroy = dpu_encoder_phys_wb_destroy; 781 692 ops->atomic_check = dpu_encoder_phys_wb_atomic_check; 782 693 ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done; 783 694 ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff; ··· 792 705 793 706 /** 794 707 * dpu_encoder_phys_wb_init - initialize writeback encoder 708 + * @dev: Corresponding device for devres management 795 709 * @p: Pointer to init info structure with initialization params 796 710 */ 797 - struct dpu_encoder_phys *dpu_encoder_phys_wb_init( 711 + struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev, 798 712 struct dpu_enc_phys_init_params *p) 799 713 { 800 714 struct dpu_encoder_phys *phys_enc = NULL; ··· 808 720 return ERR_PTR(-EINVAL); 809 721 } 810 722 811 - wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL); 723 + wb_enc = drmm_kzalloc(dev, sizeof(*wb_enc), GFP_KERNEL); 812 724 if (!wb_enc) { 813 725 DPU_ERROR("failed to allocate wb phys_enc enc\n"); 814 726 return ERR_PTR(-ENOMEM);
+119 -88
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
··· 22 22 BIT(DPU_SSPP_CSC_10BIT)) 23 23 24 24 #define VIG_MSM8998_MASK \ 25 - (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3)) 25 + (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE)) 26 26 27 27 #define VIG_SDM845_MASK \ 28 - (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3)) 28 + (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE)) 29 29 30 30 #define VIG_SDM845_MASK_SDMA \ 31 31 (VIG_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) 32 - 33 - #define VIG_SC7180_MASK \ 34 - (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4)) 35 - 36 - #define VIG_SM6125_MASK \ 37 - (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE)) 38 - 39 - #define VIG_SC7180_MASK_SDMA \ 40 - (VIG_SC7180_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) 41 32 42 33 #define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL)) 43 34 ··· 38 47 BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT)) 39 48 40 49 #define VIG_SC7280_MASK \ 41 - (VIG_SC7180_MASK | BIT(DPU_SSPP_INLINE_ROTATION)) 50 + (VIG_SDM845_MASK | BIT(DPU_SSPP_INLINE_ROTATION)) 42 51 43 52 #define VIG_SC7280_MASK_SDMA \ 44 53 (VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) ··· 202 211 /* TODO add formats after validation */ 203 212 }; 204 213 205 - static const uint32_t wb2_formats[] = { 214 + static const u32 wb2_formats_rgb[] = { 206 215 DRM_FORMAT_RGB565, 207 216 DRM_FORMAT_BGR565, 208 217 DRM_FORMAT_RGB888, ··· 236 245 DRM_FORMAT_XBGR4444, 237 246 }; 238 247 248 + static const u32 wb2_formats_rgb_yuv[] = { 249 + DRM_FORMAT_RGB565, 250 + DRM_FORMAT_BGR565, 251 + DRM_FORMAT_RGB888, 252 + DRM_FORMAT_ARGB8888, 253 + DRM_FORMAT_RGBA8888, 254 + DRM_FORMAT_ABGR8888, 255 + DRM_FORMAT_XRGB8888, 256 + DRM_FORMAT_RGBX8888, 257 + DRM_FORMAT_XBGR8888, 258 + DRM_FORMAT_ARGB1555, 259 + DRM_FORMAT_RGBA5551, 260 + DRM_FORMAT_XRGB1555, 261 + DRM_FORMAT_RGBX5551, 262 + DRM_FORMAT_ARGB4444, 263 + DRM_FORMAT_RGBA4444, 264 + DRM_FORMAT_RGBX4444, 265 + DRM_FORMAT_XRGB4444, 266 + DRM_FORMAT_BGR565, 267 + DRM_FORMAT_BGR888, 268 + DRM_FORMAT_ABGR8888, 269 + DRM_FORMAT_BGRA8888, 270 + DRM_FORMAT_BGRX8888, 271 + DRM_FORMAT_XBGR8888, 272 + DRM_FORMAT_ABGR1555, 273 + DRM_FORMAT_BGRA5551, 274 + DRM_FORMAT_XBGR1555, 275 + DRM_FORMAT_BGRX5551, 276 + DRM_FORMAT_ABGR4444, 277 + DRM_FORMAT_BGRA4444, 278 + DRM_FORMAT_BGRX4444, 279 + DRM_FORMAT_XBGR4444, 280 + DRM_FORMAT_NV12, 281 + }; 282 + 239 283 /************************************************************* 240 284 * SSPP sub blocks config 241 285 *************************************************************/ 242 286 287 + #define SSPP_SCALER_VER(maj, min) (((maj) << 16) | (min)) 288 + 243 289 /* SSPP common configuration */ 244 - #define _VIG_SBLK(sdma_pri, qseed_ver) \ 290 + #define _VIG_SBLK(scaler_ver) \ 245 291 { \ 246 292 .maxdwnscale = MAX_DOWNSCALE_RATIO, \ 247 293 .maxupscale = MAX_UPSCALE_RATIO, \ 248 - .smart_dma_priority = sdma_pri, \ 249 294 .scaler_blk = {.name = "scaler", \ 250 - .id = qseed_ver, \ 295 + .version = scaler_ver, \ 251 296 .base = 0xa00, .len = 0xa0,}, \ 252 297 .csc_blk = {.name = "csc", \ 253 - .id = DPU_SSPP_CSC_10BIT, \ 254 298 .base = 0x1a00, .len = 0x100,}, \ 255 299 .format_list = plane_formats_yuv, \ 256 300 .num_formats = ARRAY_SIZE(plane_formats_yuv), \ ··· 294 268 .rotation_cfg = NULL, \ 295 269 } 296 270 297 - #define _VIG_SBLK_ROT(sdma_pri, qseed_ver, rot_cfg) \ 271 + #define _VIG_SBLK_ROT(scaler_ver, rot_cfg) \ 298 272 { \ 299 273 .maxdwnscale = MAX_DOWNSCALE_RATIO, \ 300 274 .maxupscale = MAX_UPSCALE_RATIO, \ 301 - .smart_dma_priority = sdma_pri, \ 302 275 .scaler_blk = {.name = "scaler", \ 303 - .id = qseed_ver, \ 276 + .version = scaler_ver, \ 304 277 .base = 0xa00, .len = 0xa0,}, \ 305 278 .csc_blk = {.name = "csc", \ 306 - .id = DPU_SSPP_CSC_10BIT, \ 307 279 .base = 0x1a00, .len = 0x100,}, \ 308 280 .format_list = plane_formats_yuv, \ 309 281 .num_formats = ARRAY_SIZE(plane_formats_yuv), \ ··· 310 286 .rotation_cfg = rot_cfg, \ 311 287 } 312 288 313 - #define _DMA_SBLK(sdma_pri) \ 289 + #define _VIG_SBLK_NOSCALE() \ 314 290 { \ 315 291 .maxdwnscale = SSPP_UNITY_SCALE, \ 316 292 .maxupscale = SSPP_UNITY_SCALE, \ 317 - .smart_dma_priority = sdma_pri, \ 293 + .format_list = plane_formats_yuv, \ 294 + .num_formats = ARRAY_SIZE(plane_formats_yuv), \ 295 + .virt_format_list = plane_formats, \ 296 + .virt_num_formats = ARRAY_SIZE(plane_formats), \ 297 + } 298 + 299 + #define _DMA_SBLK() \ 300 + { \ 301 + .maxdwnscale = SSPP_UNITY_SCALE, \ 302 + .maxupscale = SSPP_UNITY_SCALE, \ 318 303 .format_list = plane_formats, \ 319 304 .num_formats = ARRAY_SIZE(plane_formats), \ 320 305 .virt_format_list = plane_formats, \ 321 306 .virt_num_formats = ARRAY_SIZE(plane_formats), \ 322 307 } 323 - 324 - static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 = 325 - _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3); 326 - static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 = 327 - _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3); 328 - static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 = 329 - _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3); 330 - static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 = 331 - _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3); 332 308 333 309 static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = { 334 310 .rot_maxheight = 1088, ··· 336 312 .rot_format_list = rotation_v2_formats, 337 313 }; 338 314 339 - static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = 340 - _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3); 341 - static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = 342 - _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3); 343 - static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = 344 - _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3); 345 - static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = 346 - _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3); 315 + static const struct dpu_sspp_sub_blks dpu_vig_sblk_noscale = 316 + _VIG_SBLK_NOSCALE(); 347 317 348 - static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK(1); 349 - static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK(2); 350 - static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK(3); 351 - static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK(4); 318 + static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_2 = 319 + _VIG_SBLK(SSPP_SCALER_VER(1, 2)); 352 320 353 - static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 = 354 - _VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4); 321 + static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_3 = 322 + _VIG_SBLK(SSPP_SCALER_VER(1, 3)); 355 323 356 - static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 = 357 - _VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2); 324 + static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_4 = 325 + _VIG_SBLK(SSPP_SCALER_VER(1, 4)); 358 326 359 - static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 = 360 - _VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4); 327 + static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_2_4 = 328 + _VIG_SBLK(SSPP_SCALER_VER(2, 4)); 361 329 362 - static const struct dpu_sspp_sub_blks sm6125_vig_sblk_0 = 363 - _VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE); 330 + static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_0 = 331 + _VIG_SBLK(SSPP_SCALER_VER(3, 0)); 364 332 365 - static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 = 366 - _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4); 367 - static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 = 368 - _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4); 369 - static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 = 370 - _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4); 371 - static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 = 372 - _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4); 333 + static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_0_rot_v2 = 334 + _VIG_SBLK_ROT(SSPP_SCALER_VER(3, 0), 335 + &dpu_rot_sc7280_cfg_v2); 373 336 374 - static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 = 375 - _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4); 376 - static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 = 377 - _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4); 378 - static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 = 379 - _VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4); 380 - static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 = 381 - _VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4); 382 - static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK(5); 383 - static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK(6); 337 + static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_1 = 338 + _VIG_SBLK(SSPP_SCALER_VER(3, 1)); 384 339 385 - #define _VIG_SBLK_NOSCALE(sdma_pri) \ 386 - { \ 387 - .maxdwnscale = SSPP_UNITY_SCALE, \ 388 - .maxupscale = SSPP_UNITY_SCALE, \ 389 - .smart_dma_priority = sdma_pri, \ 390 - .format_list = plane_formats_yuv, \ 391 - .num_formats = ARRAY_SIZE(plane_formats_yuv), \ 392 - .virt_format_list = plane_formats, \ 393 - .virt_num_formats = ARRAY_SIZE(plane_formats), \ 394 - } 340 + static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 = 341 + _VIG_SBLK(SSPP_SCALER_VER(3, 2)); 395 342 396 - static const struct dpu_sspp_sub_blks qcm2290_vig_sblk_0 = _VIG_SBLK_NOSCALE(2); 397 - static const struct dpu_sspp_sub_blks qcm2290_dma_sblk_0 = _DMA_SBLK(1); 343 + static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 = 344 + _VIG_SBLK(SSPP_SCALER_VER(3, 3)); 345 + 346 + static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK(); 398 347 399 348 /************************************************************* 400 349 * MIXER sub blocks config ··· 419 422 * DSPP sub blocks config 420 423 *************************************************************/ 421 424 static const struct dpu_dspp_sub_blks msm8998_dspp_sblk = { 422 - .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700, 425 + .pcc = {.name = "pcc", .base = 0x1700, 423 426 .len = 0x90, .version = 0x10007}, 424 427 }; 425 428 426 429 static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = { 427 - .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700, 430 + .pcc = {.name = "pcc", .base = 0x1700, 428 431 .len = 0x90, .version = 0x40000}, 429 432 }; 430 433 ··· 432 435 * PINGPONG sub blocks config 433 436 *************************************************************/ 434 437 static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = { 435 - .te2 = {.name = "te2", .id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0, 438 + .te2 = {.name = "te2", .base = 0x2000, .len = 0x0, 436 439 .version = 0x1}, 437 - .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0, 440 + .dither = {.name = "dither", .base = 0x30e0, 438 441 .len = 0x20, .version = 0x10000}, 439 442 }; 440 443 441 444 static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = { 442 - .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0, 445 + .dither = {.name = "dither", .base = 0x30e0, 443 446 .len = 0x20, .version = 0x10000}, 444 447 }; 445 448 446 449 static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = { 447 - .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0xe0, 450 + .dither = {.name = "dither", .base = 0xe0, 448 451 .len = 0x20, .version = 0x20000}, 449 452 }; 450 453 ··· 462 465 }; 463 466 464 467 /************************************************************* 468 + * CDM block config 469 + *************************************************************/ 470 + static const struct dpu_cdm_cfg sc7280_cdm = { 471 + .name = "cdm_0", 472 + .id = CDM_0, 473 + .len = 0x228, 474 + .base = 0x79200, 475 + }; 476 + 477 + /************************************************************* 465 478 * VBIF sub blocks config 466 479 *************************************************************/ 467 480 /* VBIF QOS remap */ ··· 479 472 static const u32 msm8998_nrt_pri_lvl[] = {1, 1, 1, 1}; 480 473 static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6}; 481 474 static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3}; 475 + static const u32 sm8650_rt_pri_lvl[] = {4, 4, 5, 5, 5, 5, 5, 6}; 482 476 483 477 static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = { 484 478 { ··· 556 548 .qos_rt_tbl = { 557 549 .npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl), 558 550 .priority_lvl = sdm845_rt_pri_lvl, 551 + }, 552 + .qos_nrt_tbl = { 553 + .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl), 554 + .priority_lvl = sdm845_nrt_pri_lvl, 555 + }, 556 + .memtype_count = 16, 557 + .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}, 558 + }, 559 + }; 560 + 561 + static const struct dpu_vbif_cfg sm8650_vbif[] = { 562 + { 563 + .name = "vbif_rt", .id = VBIF_RT, 564 + .base = 0, .len = 0x1074, 565 + .features = BIT(DPU_VBIF_QOS_REMAP), 566 + .xin_halt_timeout = 0x4000, 567 + .qos_rp_remap_size = 0x40, 568 + .qos_rt_tbl = { 569 + .npriority_lvl = ARRAY_SIZE(sm8650_rt_pri_lvl), 570 + .priority_lvl = sm8650_rt_pri_lvl, 559 571 }, 560 572 .qos_nrt_tbl = { 561 573 .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl), ··· 682 654 #include "catalog/dpu_3_0_msm8998.h" 683 655 684 656 #include "catalog/dpu_4_0_sdm845.h" 657 + #include "catalog/dpu_4_1_sdm670.h" 685 658 686 659 #include "catalog/dpu_5_0_sm8150.h" 687 660 #include "catalog/dpu_5_1_sc8180x.h" ··· 702 673 #include "catalog/dpu_8_1_sm8450.h" 703 674 704 675 #include "catalog/dpu_9_0_sm8550.h" 676 + 677 + #include "catalog/dpu_10_0_sm8650.h"
+40 -32
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
··· 51 51 /** 52 52 * SSPP sub-blocks/features 53 53 * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support 54 - * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support 55 - * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support 56 - * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support 54 + * @DPU_SSPP_SCALER_QSEED3_COMPATIBLE, QSEED3-compatible alogorithm support (includes QSEED3, QSEED3LITE and QSEED4) 57 55 * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes 58 56 * @DPU_SSPP_CSC, Support of Color space converion 59 57 * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion ··· 69 71 */ 70 72 enum { 71 73 DPU_SSPP_SCALER_QSEED2 = 0x1, 72 - DPU_SSPP_SCALER_QSEED3, 73 - DPU_SSPP_SCALER_QSEED3LITE, 74 - DPU_SSPP_SCALER_QSEED4, 74 + DPU_SSPP_SCALER_QSEED3_COMPATIBLE, 75 75 DPU_SSPP_SCALER_RGB, 76 76 DPU_SSPP_CSC, 77 77 DPU_SSPP_CSC_10BIT, ··· 245 249 unsigned long features 246 250 247 251 /** 248 - * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU 249 - * @name: string name for debug purposes 250 - * @id: enum identifying this sub-block 251 - * @base: offset of this sub-block relative to the block 252 - * offset 253 - * @len register block length of this sub-block 254 - */ 255 - #define DPU_HW_SUBBLK_INFO \ 256 - char name[DPU_HW_BLK_NAME_LEN]; \ 257 - u32 id; \ 258 - u32 base; \ 259 - u32 len 260 - 261 - /** 262 252 * struct dpu_scaler_blk: Scaler information 263 - * @info: HW register and features supported by this sub-blk 264 - * @version: qseed block revision 253 + * @name: string name for debug purposes 254 + * @base: offset of this sub-block relative to the block offset 255 + * @len: register block length of this sub-block 256 + * @version: qseed block revision, on QSEED3+ platforms this is the value of 257 + * scaler_blk.base + QSEED3_HW_VERSION registers. 265 258 */ 266 259 struct dpu_scaler_blk { 267 - DPU_HW_SUBBLK_INFO; 260 + char name[DPU_HW_BLK_NAME_LEN]; 261 + u32 base; 262 + u32 len; 268 263 u32 version; 269 264 }; 270 265 271 266 struct dpu_csc_blk { 272 - DPU_HW_SUBBLK_INFO; 267 + char name[DPU_HW_BLK_NAME_LEN]; 268 + u32 base; 269 + u32 len; 273 270 }; 274 271 275 272 /** 276 273 * struct dpu_pp_blk : Pixel processing sub-blk information 277 - * @info: HW register and features supported by this sub-blk 274 + * @name: string name for debug purposes 275 + * @base: offset of this sub-block relative to the block offset 276 + * @len: register block length of this sub-block 278 277 * @version: HW Algorithm version 279 278 */ 280 279 struct dpu_pp_blk { 281 - DPU_HW_SUBBLK_INFO; 280 + char name[DPU_HW_BLK_NAME_LEN]; 281 + u32 base; 282 + u32 len; 282 283 u32 version; 283 284 }; 284 285 285 286 /** 286 287 * struct dpu_dsc_blk - DSC Encoder sub-blk information 287 - * @info: HW register and features supported by this sub-blk 288 + * @name: string name for debug purposes 289 + * @base: offset of this sub-block relative to the block offset 290 + * @len: register block length of this sub-block 288 291 */ 289 292 struct dpu_dsc_blk { 290 - DPU_HW_SUBBLK_INFO; 293 + char name[DPU_HW_BLK_NAME_LEN]; 294 + u32 base; 295 + u32 len; 291 296 }; 292 297 293 298 /** ··· 338 341 * @max_mixer_width max layer mixer line width support. 339 342 * @max_mixer_blendstages max layer mixer blend stages or 340 343 * supported z order 341 - * @qseed_type qseed2 or qseed3 support. 342 344 * @has_src_split source split feature status 343 345 * @has_dim_layer dim layer feature status 344 346 * @has_idle_pc indicate if idle power collapse feature is supported ··· 350 354 struct dpu_caps { 351 355 u32 max_mixer_width; 352 356 u32 max_mixer_blendstages; 353 - u32 qseed_type; 354 357 bool has_src_split; 355 358 bool has_dim_layer; 356 359 bool has_idle_pc; ··· 366 371 * common: Pointer to common configurations shared by sub blocks 367 372 * @maxdwnscale: max downscale ratio supported(without DECIMATION) 368 373 * @maxupscale: maxupscale ratio supported 369 - * @smart_dma_priority: hw priority of rect1 of multirect pipe 370 374 * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps 371 375 * @qseed_ver: qseed version 372 376 * @scaler_blk: ··· 379 385 struct dpu_sspp_sub_blks { 380 386 u32 maxdwnscale; 381 387 u32 maxupscale; 382 - u32 smart_dma_priority; 383 388 u32 max_per_pipe_bw; 384 389 u32 qseed_ver; 385 390 struct dpu_scaler_blk scaler_blk; ··· 683 690 }; 684 691 685 692 /** 693 + * struct dpu_cdm_cfg - information of chroma down blocks 694 + * @name string name for debug purposes 695 + * @id enum identifying this block 696 + * @base register offset of this block 697 + * @features bit mask identifying sub-blocks/features 698 + */ 699 + struct dpu_cdm_cfg { 700 + DPU_HW_BLK_INFO; 701 + }; 702 + 703 + /** 686 704 * Define CDP use cases 687 705 * @DPU_PERF_CDP_UDAGE_RT: real-time use cases 688 706 * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD ··· 816 812 u32 wb_count; 817 813 const struct dpu_wb_cfg *wb; 818 814 815 + const struct dpu_cdm_cfg *cdm; 816 + 819 817 u32 ad_count; 820 818 821 819 u32 dspp_count; ··· 833 827 834 828 extern const struct dpu_mdss_cfg dpu_msm8998_cfg; 835 829 extern const struct dpu_mdss_cfg dpu_sdm845_cfg; 830 + extern const struct dpu_mdss_cfg dpu_sdm670_cfg; 836 831 extern const struct dpu_mdss_cfg dpu_sm8150_cfg; 837 832 extern const struct dpu_mdss_cfg dpu_sc8180x_cfg; 838 833 extern const struct dpu_mdss_cfg dpu_sm8250_cfg; ··· 848 841 extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg; 849 842 extern const struct dpu_mdss_cfg dpu_sm8450_cfg; 850 843 extern const struct dpu_mdss_cfg dpu_sm8550_cfg; 844 + extern const struct dpu_mdss_cfg dpu_sm8650_cfg; 851 845 852 846 #endif /* _DPU_HW_CATALOG_H */
+247
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2023, The Linux Foundation. All rights reserved. 4 + */ 5 + 6 + #include <linux/bitfield.h> 7 + 8 + #include <drm/drm_managed.h> 9 + 10 + #include "dpu_hw_mdss.h" 11 + #include "dpu_hw_util.h" 12 + #include "dpu_hw_catalog.h" 13 + #include "dpu_hw_cdm.h" 14 + #include "dpu_kms.h" 15 + 16 + #define CDM_CSC_10_OPMODE 0x000 17 + #define CDM_CSC_10_BASE 0x004 18 + 19 + #define CDM_CDWN2_OP_MODE 0x100 20 + #define CDM_CDWN2_CLAMP_OUT 0x104 21 + #define CDM_CDWN2_PARAMS_3D_0 0x108 22 + #define CDM_CDWN2_PARAMS_3D_1 0x10C 23 + #define CDM_CDWN2_COEFF_COSITE_H_0 0x110 24 + #define CDM_CDWN2_COEFF_COSITE_H_1 0x114 25 + #define CDM_CDWN2_COEFF_COSITE_H_2 0x118 26 + #define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C 27 + #define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120 28 + #define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124 29 + #define CDM_CDWN2_COEFF_COSITE_V 0x128 30 + #define CDM_CDWN2_COEFF_OFFSITE_V 0x12C 31 + #define CDM_CDWN2_OUT_SIZE 0x130 32 + 33 + #define CDM_HDMI_PACK_OP_MODE 0x200 34 + #define CDM_CSC_10_MATRIX_COEFF_0 0x004 35 + 36 + #define CDM_MUX 0x224 37 + 38 + /* CDM CDWN2 sub-block bit definitions */ 39 + #define CDM_CDWN2_OP_MODE_EN BIT(0) 40 + #define CDM_CDWN2_OP_MODE_ENABLE_H BIT(1) 41 + #define CDM_CDWN2_OP_MODE_ENABLE_V BIT(2) 42 + #define CDM_CDWN2_OP_MODE_BITS_OUT_8BIT BIT(7) 43 + #define CDM_CDWN2_V_PIXEL_METHOD_MASK GENMASK(6, 5) 44 + #define CDM_CDWN2_H_PIXEL_METHOD_MASK GENMASK(4, 3) 45 + 46 + /* CDM CSC10 sub-block bit definitions */ 47 + #define CDM_CSC10_OP_MODE_EN BIT(0) 48 + #define CDM_CSC10_OP_MODE_SRC_FMT_YUV BIT(1) 49 + #define CDM_CSC10_OP_MODE_DST_FMT_YUV BIT(2) 50 + 51 + /* CDM HDMI pack sub-block bit definitions */ 52 + #define CDM_HDMI_PACK_OP_MODE_EN BIT(0) 53 + 54 + /* 55 + * Horizontal coefficients for cosite chroma downscale 56 + * s13 representation of coefficients 57 + */ 58 + static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e}; 59 + 60 + /* 61 + * Horizontal coefficients for offsite chroma downscale 62 + */ 63 + static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046}; 64 + 65 + /* 66 + * Vertical coefficients for cosite chroma downscale 67 + */ 68 + static u32 cosite_v_coeff[] = {0x00080004}; 69 + /* 70 + * Vertical coefficients for offsite chroma downscale 71 + */ 72 + static u32 offsite_v_coeff[] = {0x00060002}; 73 + 74 + static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cfg) 75 + { 76 + struct dpu_hw_blk_reg_map *c = &ctx->hw; 77 + u32 opmode; 78 + u32 out_size; 79 + 80 + switch (cfg->h_cdwn_type) { 81 + case CDM_CDWN_DISABLE: 82 + opmode = 0; 83 + break; 84 + case CDM_CDWN_PIXEL_DROP: 85 + opmode = CDM_CDWN2_OP_MODE_ENABLE_H | 86 + FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK, 87 + CDM_CDWN2_METHOD_PIXEL_DROP); 88 + break; 89 + case CDM_CDWN_AVG: 90 + opmode = CDM_CDWN2_OP_MODE_ENABLE_H | 91 + FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK, 92 + CDM_CDWN2_METHOD_AVG); 93 + break; 94 + case CDM_CDWN_COSITE: 95 + opmode = CDM_CDWN2_OP_MODE_ENABLE_H | 96 + FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK, 97 + CDM_CDWN2_METHOD_COSITE); 98 + DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0, 99 + cosite_h_coeff[0]); 100 + DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1, 101 + cosite_h_coeff[1]); 102 + DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2, 103 + cosite_h_coeff[2]); 104 + break; 105 + case CDM_CDWN_OFFSITE: 106 + opmode = CDM_CDWN2_OP_MODE_ENABLE_H | 107 + FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK, CDM_CDWN2_METHOD_OFFSITE); 108 + DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0, 109 + offsite_h_coeff[0]); 110 + DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1, 111 + offsite_h_coeff[1]); 112 + DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2, 113 + offsite_h_coeff[2]); 114 + break; 115 + default: 116 + DPU_ERROR("%s invalid horz down sampling type\n", __func__); 117 + return -EINVAL; 118 + } 119 + 120 + switch (cfg->v_cdwn_type) { 121 + case CDM_CDWN_DISABLE: 122 + /* if its only Horizontal downsample, we dont need to do anything here */ 123 + break; 124 + case CDM_CDWN_PIXEL_DROP: 125 + opmode |= CDM_CDWN2_OP_MODE_ENABLE_V | 126 + FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK, 127 + CDM_CDWN2_METHOD_PIXEL_DROP); 128 + break; 129 + case CDM_CDWN_AVG: 130 + opmode |= CDM_CDWN2_OP_MODE_ENABLE_V | 131 + FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK, 132 + CDM_CDWN2_METHOD_AVG); 133 + break; 134 + case CDM_CDWN_COSITE: 135 + opmode |= CDM_CDWN2_OP_MODE_ENABLE_V | 136 + FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK, 137 + CDM_CDWN2_METHOD_COSITE); 138 + DPU_REG_WRITE(c, 139 + CDM_CDWN2_COEFF_COSITE_V, 140 + cosite_v_coeff[0]); 141 + break; 142 + case CDM_CDWN_OFFSITE: 143 + opmode |= CDM_CDWN2_OP_MODE_ENABLE_V | 144 + FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK, 145 + CDM_CDWN2_METHOD_OFFSITE); 146 + DPU_REG_WRITE(c, 147 + CDM_CDWN2_COEFF_OFFSITE_V, 148 + offsite_v_coeff[0]); 149 + break; 150 + default: 151 + return -EINVAL; 152 + } 153 + 154 + if (cfg->output_bit_depth != CDM_CDWN_OUTPUT_10BIT) 155 + opmode |= CDM_CDWN2_OP_MODE_BITS_OUT_8BIT; 156 + 157 + if (cfg->v_cdwn_type || cfg->h_cdwn_type) 158 + opmode |= CDM_CDWN2_OP_MODE_EN; /* EN CDWN module */ 159 + else 160 + opmode &= ~CDM_CDWN2_OP_MODE_EN; 161 + 162 + out_size = (cfg->output_width & 0xFFFF) | ((cfg->output_height & 0xFFFF) << 16); 163 + DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size); 164 + DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode); 165 + DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT, ((0x3FF << 16) | 0x0)); 166 + 167 + return 0; 168 + } 169 + 170 + static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cdm) 171 + { 172 + struct dpu_hw_blk_reg_map *c = &ctx->hw; 173 + const struct dpu_format *fmt; 174 + u32 opmode = 0; 175 + u32 csc = 0; 176 + 177 + if (!ctx || !cdm) 178 + return -EINVAL; 179 + 180 + fmt = cdm->output_fmt; 181 + 182 + if (!DPU_FORMAT_IS_YUV(fmt)) 183 + return -EINVAL; 184 + 185 + dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, cdm->csc_cfg, true); 186 + dpu_hw_cdm_setup_cdwn(ctx, cdm); 187 + 188 + if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) { 189 + if (fmt->chroma_sample != DPU_CHROMA_H1V2) 190 + return -EINVAL; /*unsupported format */ 191 + opmode = CDM_HDMI_PACK_OP_MODE_EN; 192 + opmode |= (fmt->chroma_sample << 1); 193 + } 194 + 195 + csc |= CDM_CSC10_OP_MODE_DST_FMT_YUV; 196 + csc &= ~CDM_CSC10_OP_MODE_SRC_FMT_YUV; 197 + csc |= CDM_CSC10_OP_MODE_EN; 198 + 199 + if (ctx && ctx->ops.bind_pingpong_blk) 200 + ctx->ops.bind_pingpong_blk(ctx, cdm->pp_id); 201 + 202 + DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc); 203 + DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode); 204 + return 0; 205 + } 206 + 207 + static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_pingpong pp) 208 + { 209 + struct dpu_hw_blk_reg_map *c; 210 + int mux_cfg; 211 + 212 + c = &ctx->hw; 213 + 214 + mux_cfg = DPU_REG_READ(c, CDM_MUX); 215 + mux_cfg &= ~0xf; 216 + 217 + if (pp) 218 + mux_cfg |= (pp - PINGPONG_0) & 0x7; 219 + else 220 + mux_cfg |= 0xf; 221 + 222 + DPU_REG_WRITE(c, CDM_MUX, mux_cfg); 223 + } 224 + 225 + struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev, 226 + const struct dpu_cdm_cfg *cfg, void __iomem *addr, 227 + const struct dpu_mdss_version *mdss_rev) 228 + { 229 + struct dpu_hw_cdm *c; 230 + 231 + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); 232 + if (!c) 233 + return ERR_PTR(-ENOMEM); 234 + 235 + c->hw.blk_addr = addr + cfg->base; 236 + c->hw.log_mask = DPU_DBG_MASK_CDM; 237 + 238 + /* Assign ops */ 239 + c->idx = cfg->id; 240 + c->caps = cfg; 241 + 242 + c->ops.enable = dpu_hw_cdm_enable; 243 + if (mdss_rev->core_major_ver >= 5) 244 + c->ops.bind_pingpong_blk = dpu_hw_cdm_bind_pingpong_blk; 245 + 246 + return c; 247 + }
+142
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (c) 2023, The Linux Foundation. All rights reserved. 4 + */ 5 + 6 + #ifndef _DPU_HW_CDM_H 7 + #define _DPU_HW_CDM_H 8 + 9 + #include "dpu_hw_mdss.h" 10 + #include "dpu_hw_top.h" 11 + 12 + struct dpu_hw_cdm; 13 + 14 + /** 15 + * struct dpu_hw_cdm_cfg : current configuration of CDM block 16 + * 17 + * @output_width: output ROI width of CDM block 18 + * @output_height: output ROI height of CDM block 19 + * @output_bit_depth: output bit-depth of CDM block 20 + * @h_cdwn_type: downsample type used for horizontal pixels 21 + * @v_cdwn_type: downsample type used for vertical pixels 22 + * @output_fmt: handle to dpu_format of CDM block 23 + * @csc_cfg: handle to CSC matrix programmed for CDM block 24 + * @output_type: interface to which CDM is paired (HDMI/WB) 25 + * @pp_id: ping-pong block to which CDM is bound to 26 + */ 27 + struct dpu_hw_cdm_cfg { 28 + u32 output_width; 29 + u32 output_height; 30 + u32 output_bit_depth; 31 + u32 h_cdwn_type; 32 + u32 v_cdwn_type; 33 + const struct dpu_format *output_fmt; 34 + const struct dpu_csc_cfg *csc_cfg; 35 + u32 output_type; 36 + int pp_id; 37 + }; 38 + 39 + /* 40 + * These values are used indicate which type of downsample is used 41 + * in the horizontal/vertical direction for the CDM block. 42 + */ 43 + enum dpu_hw_cdwn_type { 44 + CDM_CDWN_DISABLE, 45 + CDM_CDWN_PIXEL_DROP, 46 + CDM_CDWN_AVG, 47 + CDM_CDWN_COSITE, 48 + CDM_CDWN_OFFSITE, 49 + }; 50 + 51 + /* 52 + * CDM block can be paired with WB or HDMI block. These values match 53 + * the input with which the CDM block is paired. 54 + */ 55 + enum dpu_hw_cdwn_output_type { 56 + CDM_CDWN_OUTPUT_HDMI, 57 + CDM_CDWN_OUTPUT_WB, 58 + }; 59 + 60 + /* 61 + * CDM block can give an 8-bit or 10-bit output. These values 62 + * are used to indicate the output bit depth of CDM block 63 + */ 64 + enum dpu_hw_cdwn_output_bit_depth { 65 + CDM_CDWN_OUTPUT_8BIT, 66 + CDM_CDWN_OUTPUT_10BIT, 67 + }; 68 + 69 + /* 70 + * CDM block can downsample using different methods. These values 71 + * are used to indicate the downsample method which can be used 72 + * either in the horizontal or vertical direction. 73 + */ 74 + enum dpu_hw_cdwn_op_mode_method_h_v { 75 + CDM_CDWN2_METHOD_PIXEL_DROP, 76 + CDM_CDWN2_METHOD_AVG, 77 + CDM_CDWN2_METHOD_COSITE, 78 + CDM_CDWN2_METHOD_OFFSITE 79 + }; 80 + 81 + /** 82 + * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions 83 + * Assumption is these functions will be called after 84 + * clocks are enabled 85 + * @enable: Enables the output to interface and programs the 86 + * output packer 87 + * @bind_pingpong_blk: enable/disable the connection with pingpong which 88 + * will feed pixels to this cdm 89 + */ 90 + struct dpu_hw_cdm_ops { 91 + /** 92 + * Enable the CDM module 93 + * @cdm Pointer to chroma down context 94 + */ 95 + int (*enable)(struct dpu_hw_cdm *cdm, struct dpu_hw_cdm_cfg *cfg); 96 + 97 + /** 98 + * Enable/disable the connection with pingpong 99 + * @cdm Pointer to chroma down context 100 + * @pp pingpong block id. 101 + */ 102 + void (*bind_pingpong_blk)(struct dpu_hw_cdm *cdm, const enum dpu_pingpong pp); 103 + }; 104 + 105 + /** 106 + * struct dpu_hw_cdm - cdm description 107 + * @base: Hardware block base structure 108 + * @hw: Block hardware details 109 + * @idx: CDM index 110 + * @caps: Pointer to cdm_cfg 111 + * @ops: handle to operations possible for this CDM 112 + */ 113 + struct dpu_hw_cdm { 114 + struct dpu_hw_blk base; 115 + struct dpu_hw_blk_reg_map hw; 116 + 117 + /* chroma down */ 118 + const struct dpu_cdm_cfg *caps; 119 + enum dpu_cdm idx; 120 + 121 + /* ops */ 122 + struct dpu_hw_cdm_ops ops; 123 + }; 124 + 125 + /** 126 + * dpu_hw_cdm_init - initializes the cdm hw driver object. 127 + * should be called once before accessing every cdm. 128 + * @dev: DRM device handle 129 + * @cdm: CDM catalog entry for which driver object is required 130 + * @addr : mapped register io address of MDSS 131 + * @mdss_rev: mdss hw core revision 132 + */ 133 + struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev, 134 + const struct dpu_cdm_cfg *cdm, void __iomem *addr, 135 + const struct dpu_mdss_version *mdss_rev); 136 + 137 + static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw) 138 + { 139 + return container_of(hw, struct dpu_hw_cdm, base); 140 + } 141 + 142 + #endif /*_DPU_HW_CDM_H */
+42 -10
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
··· 4 4 */ 5 5 6 6 #include <linux/delay.h> 7 + 8 + #include <drm/drm_managed.h> 9 + 7 10 #include "dpu_hwio.h" 8 11 #include "dpu_hw_ctl.h" 9 12 #include "dpu_kms.h" ··· 32 29 #define CTL_DSC_ACTIVE 0x0E8 33 30 #define CTL_WB_ACTIVE 0x0EC 34 31 #define CTL_INTF_ACTIVE 0x0F4 32 + #define CTL_CDM_ACTIVE 0x0F8 35 33 #define CTL_FETCH_PIPE_ACTIVE 0x0FC 36 34 #define CTL_MERGE_3D_FLUSH 0x100 37 35 #define CTL_DSC_FLUSH 0x104 38 36 #define CTL_WB_FLUSH 0x108 39 37 #define CTL_INTF_FLUSH 0x110 38 + #define CTL_CDM_FLUSH 0x114 40 39 #define CTL_INTF_MASTER 0x134 41 40 #define CTL_DSPP_n_FLUSH(n) ((0x13C) + ((n) * 4)) 42 41 ··· 48 43 #define DPU_REG_RESET_TIMEOUT_US 2000 49 44 #define MERGE_3D_IDX 23 50 45 #define DSC_IDX 22 46 + #define CDM_IDX 26 51 47 #define INTF_IDX 31 52 48 #define WB_IDX 16 53 49 #define DSPP_IDX 29 /* From DPU hw rev 7.x.x */ ··· 110 104 ctx->pending_wb_flush_mask = 0; 111 105 ctx->pending_merge_3d_flush_mask = 0; 112 106 ctx->pending_dsc_flush_mask = 0; 107 + ctx->pending_cdm_flush_mask = 0; 113 108 114 109 memset(ctx->pending_dspp_flush_mask, 0, 115 110 sizeof(ctx->pending_dspp_flush_mask)); ··· 154 147 if (ctx->pending_flush_mask & BIT(DSC_IDX)) 155 148 DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, 156 149 ctx->pending_dsc_flush_mask); 150 + 151 + if (ctx->pending_flush_mask & BIT(CDM_IDX)) 152 + DPU_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH, 153 + ctx->pending_cdm_flush_mask); 157 154 158 155 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); 159 156 } ··· 290 279 } 291 280 } 292 281 282 + static void dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num) 283 + { 284 + /* update pending flush only if CDM_0 is flushed */ 285 + if (cdm_num == CDM_0) 286 + ctx->pending_flush_mask |= BIT(CDM_IDX); 287 + } 288 + 293 289 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx, 294 290 enum dpu_wb wb) 295 291 { ··· 323 305 { 324 306 ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0); 325 307 ctx->pending_flush_mask |= BIT(DSC_IDX); 308 + } 309 + 310 + static void dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num) 311 + { 312 + ctx->pending_cdm_flush_mask |= BIT(cdm_num - CDM_0); 313 + ctx->pending_flush_mask |= BIT(CDM_IDX); 326 314 } 327 315 328 316 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx, ··· 564 540 565 541 if (cfg->dsc) 566 542 DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc); 543 + 544 + if (cfg->cdm) 545 + DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm); 567 546 } 568 547 569 548 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, ··· 610 583 u32 wb_active = 0; 611 584 u32 merge3d_active = 0; 612 585 u32 dsc_active; 586 + u32 cdm_active; 613 587 614 588 /* 615 589 * This API resets each portion of the CTL path namely, ··· 646 618 dsc_active &= ~cfg->dsc; 647 619 DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active); 648 620 } 621 + 622 + if (cfg->cdm) { 623 + cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE); 624 + cdm_active &= ~cfg->cdm; 625 + DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active); 626 + } 649 627 } 650 628 651 629 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx, ··· 685 651 ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1; 686 652 ops->update_pending_flush_dsc = 687 653 dpu_hw_ctl_update_pending_flush_dsc_v1; 654 + ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1; 688 655 } else { 689 656 ops->trigger_flush = dpu_hw_ctl_trigger_flush; 690 657 ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg; 691 658 ops->update_pending_flush_intf = 692 659 dpu_hw_ctl_update_pending_flush_intf; 693 660 ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb; 661 + ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm; 694 662 } 695 663 ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush; 696 664 ops->update_pending_flush = dpu_hw_ctl_update_pending_flush; ··· 716 680 ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active; 717 681 }; 718 682 719 - struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg, 720 - void __iomem *addr, 721 - u32 mixer_count, 722 - const struct dpu_lm_cfg *mixer) 683 + struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev, 684 + const struct dpu_ctl_cfg *cfg, 685 + void __iomem *addr, 686 + u32 mixer_count, 687 + const struct dpu_lm_cfg *mixer) 723 688 { 724 689 struct dpu_hw_ctl *c; 725 690 726 - c = kzalloc(sizeof(*c), GFP_KERNEL); 691 + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); 727 692 if (!c) 728 693 return ERR_PTR(-ENOMEM); 729 694 ··· 738 701 c->mixer_hw_caps = mixer; 739 702 740 703 return c; 741 - } 742 - 743 - void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx) 744 - { 745 - kfree(ctx); 746 704 }
+18 -10
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
··· 39 39 * @mode_3d: 3d mux configuration 40 40 * @merge_3d: 3d merge block used 41 41 * @intf_mode_sel: Interface mode, cmd / vid 42 + * @cdm: CDM block used 42 43 * @stream_sel: Stream selection for multi-stream interfaces 43 44 * @dsc: DSC BIT masks used 44 45 */ ··· 49 48 enum dpu_3d_blend_mode mode_3d; 50 49 enum dpu_merge_3d merge_3d; 51 50 enum dpu_ctl_mode_sel intf_mode_sel; 51 + enum dpu_cdm cdm; 52 52 int stream_sel; 53 53 unsigned int dsc; 54 54 }; ··· 169 167 enum dpu_dsc blk); 170 168 171 169 /** 170 + * OR in the given flushbits to the cached pending_(cdm_)flush_mask 171 + * No effect on hardware 172 + * @ctx: ctl path ctx pointer 173 + * @cdm_num: idx of cdm to be flushed 174 + */ 175 + void (*update_pending_flush_cdm)(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num); 176 + 177 + /** 172 178 * Write the value of the pending_flush_mask to hardware 173 179 * @ctx : ctl path ctx pointer 174 180 */ ··· 249 239 * @pending_intf_flush_mask: pending INTF flush 250 240 * @pending_wb_flush_mask: pending WB flush 251 241 * @pending_dsc_flush_mask: pending DSC flush 242 + * @pending_cdm_flush_mask: pending CDM flush 252 243 * @ops: operation list 253 244 */ 254 245 struct dpu_hw_ctl { ··· 267 256 u32 pending_merge_3d_flush_mask; 268 257 u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0]; 269 258 u32 pending_dsc_flush_mask; 259 + u32 pending_cdm_flush_mask; 270 260 271 261 /* ops */ 272 262 struct dpu_hw_ctl_ops ops; ··· 286 274 /** 287 275 * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object. 288 276 * Should be called before accessing any ctl_path register. 277 + * @dev: Corresponding device for devres management 289 278 * @cfg: ctl_path catalog entry for which driver object is required 290 279 * @addr: mapped register io address of MDP 291 280 * @mixer_count: Number of mixers in @mixer 292 281 * @mixer: Pointer to an array of Layer Mixers defined in the catalog 293 282 */ 294 - struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg, 295 - void __iomem *addr, 296 - u32 mixer_count, 297 - const struct dpu_lm_cfg *mixer); 298 - 299 - /** 300 - * dpu_hw_ctl_destroy(): Destroys ctl driver context 301 - * should be called to free the context 302 - */ 303 - void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx); 283 + struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev, 284 + const struct dpu_ctl_cfg *cfg, 285 + void __iomem *addr, 286 + u32 mixer_count, 287 + const struct dpu_lm_cfg *mixer); 304 288 305 289 #endif /*_DPU_HW_CTL_H */
+5 -7
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
··· 3 3 * Copyright (c) 2020-2022, Linaro Limited 4 4 */ 5 5 6 + #include <drm/drm_managed.h> 7 + 6 8 #include <drm/display/drm_dsc_helper.h> 7 9 8 10 #include "dpu_kms.h" ··· 190 188 ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk; 191 189 }; 192 190 193 - struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg, 191 + struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev, 192 + const struct dpu_dsc_cfg *cfg, 194 193 void __iomem *addr) 195 194 { 196 195 struct dpu_hw_dsc *c; 197 196 198 - c = kzalloc(sizeof(*c), GFP_KERNEL); 197 + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); 199 198 if (!c) 200 199 return ERR_PTR(-ENOMEM); 201 200 ··· 208 205 _setup_dsc_ops(&c->ops, c->caps->features); 209 206 210 207 return c; 211 - } 212 - 213 - void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc) 214 - { 215 - kfree(dsc); 216 208 }
+7 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
··· 64 64 65 65 /** 66 66 * dpu_hw_dsc_init() - Initializes the DSC hw driver object. 67 + * @dev: Corresponding device for devres management 67 68 * @cfg: DSC catalog entry for which driver object is required 68 69 * @addr: Mapped register io address of MDP 69 70 * Return: Error code or allocated dpu_hw_dsc context 70 71 */ 71 - struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg, 72 - void __iomem *addr); 72 + struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev, 73 + const struct dpu_dsc_cfg *cfg, 74 + void __iomem *addr); 73 75 74 76 /** 75 77 * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object 78 + * @dev: Corresponding device for devres management 76 79 * @cfg: DSC catalog entry for which driver object is required 77 80 * @addr: Mapped register io address of MDP 78 81 * Returns: Error code or allocated dpu_hw_dsc context 79 82 */ 80 - struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg, 83 + struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev, 84 + const struct dpu_dsc_cfg *cfg, 81 85 void __iomem *addr); 82 86 83 87 /**
+5 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
··· 4 4 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved 5 5 */ 6 6 7 + #include <drm/drm_managed.h> 8 + 7 9 #include <drm/display/drm_dsc_helper.h> 8 10 9 11 #include "dpu_kms.h" ··· 369 367 ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2; 370 368 } 371 369 372 - struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg, 370 + struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev, 371 + const struct dpu_dsc_cfg *cfg, 373 372 void __iomem *addr) 374 373 { 375 374 struct dpu_hw_dsc *c; 376 375 377 - c = kzalloc(sizeof(*c), GFP_KERNEL); 376 + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); 378 377 if (!c) 379 378 return ERR_PTR(-ENOMEM); 380 379
+6 -10
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
··· 2 2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 3 3 */ 4 4 5 + #include <drm/drm_managed.h> 6 + 5 7 #include "dpu_hwio.h" 6 8 #include "dpu_hw_catalog.h" 7 9 #include "dpu_hw_lm.h" ··· 70 68 c->ops.setup_pcc = dpu_setup_dspp_pcc; 71 69 } 72 70 73 - struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg, 74 - void __iomem *addr) 71 + struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev, 72 + const struct dpu_dspp_cfg *cfg, 73 + void __iomem *addr) 75 74 { 76 75 struct dpu_hw_dspp *c; 77 76 78 77 if (!addr) 79 78 return ERR_PTR(-EINVAL); 80 79 81 - c = kzalloc(sizeof(*c), GFP_KERNEL); 80 + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); 82 81 if (!c) 83 82 return ERR_PTR(-ENOMEM); 84 83 ··· 93 90 94 91 return c; 95 92 } 96 - 97 - void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp) 98 - { 99 - kfree(dspp); 100 - } 101 - 102 -
+4 -8
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
··· 81 81 /** 82 82 * dpu_hw_dspp_init() - Initializes the DSPP hw driver object. 83 83 * should be called once before accessing every DSPP. 84 + * @dev: Corresponding device for devres management 84 85 * @cfg: DSPP catalog entry for which driver object is required 85 86 * @addr: Mapped register io address of MDP 86 87 * Return: pointer to structure or ERR_PTR 87 88 */ 88 - struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg, 89 - void __iomem *addr); 90 - 91 - /** 92 - * dpu_hw_dspp_destroy(): Destroys DSPP driver context 93 - * @dspp: Pointer to DSPP driver context 94 - */ 95 - void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp); 89 + struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev, 90 + const struct dpu_dspp_cfg *cfg, 91 + void __iomem *addr); 96 92 97 93 #endif /*_DPU_HW_DSPP_H */ 98 94
+6 -8
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
··· 6 6 #include <linux/debugfs.h> 7 7 #include <linux/slab.h> 8 8 9 + #include <drm/drm_managed.h> 10 + 9 11 #include "dpu_core_irq.h" 10 12 #include "dpu_kms.h" 11 13 #include "dpu_hw_interrupts.h" ··· 474 472 return intr_status; 475 473 } 476 474 477 - struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, 478 - const struct dpu_mdss_cfg *m) 475 + struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, 476 + void __iomem *addr, 477 + const struct dpu_mdss_cfg *m) 479 478 { 480 479 struct dpu_hw_intr *intr; 481 480 unsigned int i; ··· 484 481 if (!addr || !m) 485 482 return ERR_PTR(-EINVAL); 486 483 487 - intr = kzalloc(sizeof(*intr), GFP_KERNEL); 484 + intr = drmm_kzalloc(dev, sizeof(*intr), GFP_KERNEL); 488 485 if (!intr) 489 486 return ERR_PTR(-ENOMEM); 490 487 ··· 513 510 spin_lock_init(&intr->irq_lock); 514 511 515 512 return intr; 516 - } 517 - 518 - void dpu_hw_intr_destroy(struct dpu_hw_intr *intr) 519 - { 520 - kfree(intr); 521 513 } 522 514 523 515 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
+4 -7
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
··· 70 70 71 71 /** 72 72 * dpu_hw_intr_init(): Initializes the interrupts hw object 73 + * @dev: Corresponding device for devres management 73 74 * @addr: mapped register io address of MDP 74 75 * @m: pointer to MDSS catalog data 75 76 */ 76 - struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, 77 - const struct dpu_mdss_cfg *m); 77 + struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, 78 + void __iomem *addr, 79 + const struct dpu_mdss_cfg *m); 78 80 79 - /** 80 - * dpu_hw_intr_destroy(): Cleanup interrutps hw object 81 - * @intr: pointer to interrupts hw object 82 - */ 83 - void dpu_hw_intr_destroy(struct dpu_hw_intr *intr); 84 81 #endif
+10 -12
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 3 + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 4 * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 5 5 */ 6 6 ··· 11 11 #include "dpu_trace.h" 12 12 13 13 #include <linux/iopoll.h> 14 + 15 + #include <drm/drm_managed.h> 14 16 15 17 #define INTF_TIMING_ENGINE_EN 0x000 16 18 #define INTF_CONFIG 0x004 ··· 320 318 return DPU_REG_READ(c, INTF_LINE_COUNT); 321 319 } 322 320 323 - static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf, bool enable, u32 frame_count) 321 + static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf) 324 322 { 325 - dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, enable, frame_count); 323 + dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, 0x1); 326 324 } 327 325 328 326 static int dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf, u32 *misr_value) ··· 529 527 DPU_REG_WRITE(&ctx->hw, INTF_CONFIG2, intf_cfg2); 530 528 } 531 529 532 - struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg, 533 - void __iomem *addr, const struct dpu_mdss_version *mdss_rev) 530 + struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev, 531 + const struct dpu_intf_cfg *cfg, 532 + void __iomem *addr, 533 + const struct dpu_mdss_version *mdss_rev) 534 534 { 535 535 struct dpu_hw_intf *c; 536 536 ··· 541 537 return NULL; 542 538 } 543 539 544 - c = kzalloc(sizeof(*c), GFP_KERNEL); 540 + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); 545 541 if (!c) 546 542 return ERR_PTR(-ENOMEM); 547 543 ··· 585 581 586 582 return c; 587 583 } 588 - 589 - void dpu_hw_intf_destroy(struct dpu_hw_intf *intf) 590 - { 591 - kfree(intf); 592 - } 593 -
+7 -10
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 3 + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 4 * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 5 5 */ 6 6 ··· 95 95 96 96 void (*bind_pingpong_blk)(struct dpu_hw_intf *intf, 97 97 const enum dpu_pingpong pp); 98 - void (*setup_misr)(struct dpu_hw_intf *intf, bool enable, u32 frame_count); 98 + void (*setup_misr)(struct dpu_hw_intf *intf); 99 99 int (*collect_misr)(struct dpu_hw_intf *intf, u32 *misr_value); 100 100 101 101 // Tearcheck on INTF since DPU 5.0.0 ··· 131 131 /** 132 132 * dpu_hw_intf_init() - Initializes the INTF driver for the passed 133 133 * interface catalog entry. 134 + * @dev: Corresponding device for devres management 134 135 * @cfg: interface catalog entry for which driver object is required 135 136 * @addr: mapped register io address of MDP 136 137 * @mdss_rev: dpu core's major and minor versions 137 138 */ 138 - struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg, 139 - void __iomem *addr, const struct dpu_mdss_version *mdss_rev); 140 - 141 - /** 142 - * dpu_hw_intf_destroy(): Destroys INTF driver context 143 - * @intf: Pointer to INTF driver context 144 - */ 145 - void dpu_hw_intf_destroy(struct dpu_hw_intf *intf); 139 + struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev, 140 + const struct dpu_intf_cfg *cfg, 141 + void __iomem *addr, 142 + const struct dpu_mdss_version *mdss_rev); 146 143 147 144 #endif /*_DPU_HW_INTF_H */
+9 -11
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 3 + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 4 * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. 5 5 */ 6 + 7 + #include <drm/drm_managed.h> 6 8 7 9 #include "dpu_kms.h" 8 10 #include "dpu_hw_catalog.h" ··· 83 81 } 84 82 } 85 83 86 - static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count) 84 + static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx) 87 85 { 88 - dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, enable, frame_count); 86 + dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, 0x0); 89 87 } 90 88 91 89 static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value) ··· 158 156 ops->collect_misr = dpu_hw_lm_collect_misr; 159 157 } 160 158 161 - struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg, 162 - void __iomem *addr) 159 + struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev, 160 + const struct dpu_lm_cfg *cfg, 161 + void __iomem *addr) 163 162 { 164 163 struct dpu_hw_mixer *c; 165 164 ··· 169 166 return NULL; 170 167 } 171 168 172 - c = kzalloc(sizeof(*c), GFP_KERNEL); 169 + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); 173 170 if (!c) 174 171 return ERR_PTR(-ENOMEM); 175 172 ··· 182 179 _setup_mixer_ops(&c->ops, c->cap->features); 183 180 184 181 return c; 185 - } 186 - 187 - void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm) 188 - { 189 - kfree(lm); 190 182 }
+6 -9
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 3 4 * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. 4 5 */ 5 6 ··· 58 57 /** 59 58 * setup_misr: Enable/disable MISR 60 59 */ 61 - void (*setup_misr)(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count); 60 + void (*setup_misr)(struct dpu_hw_mixer *ctx); 62 61 63 62 /** 64 63 * collect_misr: Read MISR signature ··· 96 95 /** 97 96 * dpu_hw_lm_init() - Initializes the mixer hw driver object. 98 97 * should be called once before accessing every mixer. 98 + * @dev: Corresponding device for devres management 99 99 * @cfg: mixer catalog entry for which driver object is required 100 100 * @addr: mapped register io address of MDP 101 101 */ 102 - struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg, 103 - void __iomem *addr); 104 - 105 - /** 106 - * dpu_hw_lm_destroy(): Destroys layer mixer driver context 107 - * @lm: Pointer to LM driver context 108 - */ 109 - void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm); 102 + struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev, 103 + const struct dpu_lm_cfg *cfg, 104 + void __iomem *addr); 110 105 111 106 #endif /*_DPU_HW_LM_H */
+10
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
··· 98 98 DPU_HW_BLK_DSPP, 99 99 DPU_HW_BLK_MERGE_3D, 100 100 DPU_HW_BLK_DSC, 101 + DPU_HW_BLK_CDM, 101 102 DPU_HW_BLK_MAX, 102 103 }; 103 104 ··· 186 185 DSC_MAX 187 186 }; 188 187 188 + enum dpu_cdm { 189 + CDM_0 = 1, 190 + CDM_MAX 191 + }; 192 + 189 193 enum dpu_pingpong { 190 194 PINGPONG_NONE, 191 195 PINGPONG_0, ··· 201 195 PINGPONG_5, 202 196 PINGPONG_6, 203 197 PINGPONG_7, 198 + PINGPONG_8, 199 + PINGPONG_9, 204 200 PINGPONG_S0, 205 201 PINGPONG_MAX 206 202 }; ··· 212 204 MERGE_3D_1, 213 205 MERGE_3D_2, 214 206 MERGE_3D_3, 207 + MERGE_3D_4, 215 208 MERGE_3D_MAX 216 209 }; 217 210 ··· 467 458 #define DPU_DBG_MASK_ROT (1 << 9) 468 459 #define DPU_DBG_MASK_DSPP (1 << 10) 469 460 #define DPU_DBG_MASK_DSC (1 << 11) 461 + #define DPU_DBG_MASK_CDM (1 << 12) 470 462 471 463 /** 472 464 * struct dpu_hw_tear_check - Struct contains parameters to configure
+6 -8
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
··· 4 4 5 5 #include <linux/iopoll.h> 6 6 7 + #include <drm/drm_managed.h> 8 + 7 9 #include "dpu_hw_mdss.h" 8 10 #include "dpu_hwio.h" 9 11 #include "dpu_hw_catalog.h" ··· 39 37 c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode; 40 38 }; 41 39 42 - struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg, 43 - void __iomem *addr) 40 + struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev, 41 + const struct dpu_merge_3d_cfg *cfg, 42 + void __iomem *addr) 44 43 { 45 44 struct dpu_hw_merge_3d *c; 46 45 47 - c = kzalloc(sizeof(*c), GFP_KERNEL); 46 + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); 48 47 if (!c) 49 48 return ERR_PTR(-ENOMEM); 50 49 ··· 57 54 _setup_merge_3d_ops(c, c->caps->features); 58 55 59 56 return c; 60 - } 61 - 62 - void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *hw) 63 - { 64 - kfree(hw); 65 57 }
+4 -9
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
··· 48 48 /** 49 49 * dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed 50 50 * merge3d catalog entry. 51 + * @dev: Corresponding device for devres management 51 52 * @cfg: Pingpong catalog entry for which driver object is required 52 53 * @addr: Mapped register io address of MDP 53 54 * Return: Error code or allocated dpu_hw_merge_3d context 54 55 */ 55 - struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg, 56 - void __iomem *addr); 57 - 58 - /** 59 - * dpu_hw_merge_3d_destroy - destroys merge_3d driver context 60 - * should be called to free the context 61 - * @pp: Pointer to PP driver context returned by dpu_hw_merge_3d_init 62 - */ 63 - void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *pp); 56 + struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev, 57 + const struct dpu_merge_3d_cfg *cfg, 58 + void __iomem *addr); 64 59 65 60 #endif /*_DPU_HW_MERGE3D_H */
+7 -8
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
··· 4 4 5 5 #include <linux/iopoll.h> 6 6 7 + #include <drm/drm_managed.h> 8 + 7 9 #include "dpu_hw_mdss.h" 8 10 #include "dpu_hwio.h" 9 11 #include "dpu_hw_catalog.h" ··· 283 281 return 0; 284 282 } 285 283 286 - struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg, 287 - void __iomem *addr, const struct dpu_mdss_version *mdss_rev) 284 + struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev, 285 + const struct dpu_pingpong_cfg *cfg, 286 + void __iomem *addr, 287 + const struct dpu_mdss_version *mdss_rev) 288 288 { 289 289 struct dpu_hw_pingpong *c; 290 290 291 - c = kzalloc(sizeof(*c), GFP_KERNEL); 291 + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); 292 292 if (!c) 293 293 return ERR_PTR(-ENOMEM); 294 294 ··· 320 316 c->ops.setup_dither = dpu_hw_pp_setup_dither; 321 317 322 318 return c; 323 - } 324 - 325 - void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp) 326 - { 327 - kfree(pp); 328 319 }
+5 -9
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
··· 121 121 /** 122 122 * dpu_hw_pingpong_init() - initializes the pingpong driver for the passed 123 123 * pingpong catalog entry. 124 + * @dev: Corresponding device for devres management 124 125 * @cfg: Pingpong catalog entry for which driver object is required 125 126 * @addr: Mapped register io address of MDP 126 127 * @mdss_rev: dpu core's major and minor versions 127 128 * Return: Error code or allocated dpu_hw_pingpong context 128 129 */ 129 - struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg, 130 - void __iomem *addr, const struct dpu_mdss_version *mdss_rev); 131 - 132 - /** 133 - * dpu_hw_pingpong_destroy - destroys pingpong driver context 134 - * should be called to free the context 135 - * @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init 136 - */ 137 - void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp); 130 + struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev, 131 + const struct dpu_pingpong_cfg *cfg, 132 + void __iomem *addr, 133 + const struct dpu_mdss_version *mdss_rev); 138 134 139 135 #endif /*_DPU_HW_PINGPONG_H */
+9 -28
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
··· 11 11 #include "msm_mdss.h" 12 12 13 13 #include <drm/drm_file.h> 14 + #include <drm/drm_managed.h> 14 15 15 16 #define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087 16 17 ··· 396 395 format); 397 396 } 398 397 399 - static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_sspp *ctx) 400 - { 401 - if (!ctx) 402 - return 0; 403 - 404 - return dpu_hw_get_scaler3_ver(&ctx->hw, 405 - ctx->cap->sblk->scaler_blk.base); 406 - } 407 - 408 398 /* 409 399 * dpu_hw_sspp_setup_rects() 410 400 */ ··· 606 614 test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features)) 607 615 c->ops.setup_multirect = dpu_hw_sspp_setup_multirect; 608 616 609 - if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) || 610 - test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) || 611 - test_bit(DPU_SSPP_SCALER_QSEED4, &features)) { 617 + if (test_bit(DPU_SSPP_SCALER_QSEED3_COMPATIBLE, &features)) 612 618 c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3; 613 - c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver; 614 - } 615 619 616 620 if (test_bit(DPU_SSPP_CDP, &features)) 617 621 c->ops.setup_cdp = dpu_hw_sspp_setup_cdp; ··· 642 654 cfg->len, 643 655 kms); 644 656 645 - if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) || 646 - cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) || 647 - cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) || 648 - cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) 657 + if (sblk->scaler_blk.len) 649 658 dpu_debugfs_create_regset32("scaler_blk", 0400, 650 659 debugfs_root, 651 660 sblk->scaler_blk.base + cfg->base, ··· 670 685 } 671 686 #endif 672 687 673 - struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg, 674 - void __iomem *addr, const struct msm_mdss_data *mdss_data, 675 - const struct dpu_mdss_version *mdss_rev) 688 + struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev, 689 + const struct dpu_sspp_cfg *cfg, 690 + void __iomem *addr, 691 + const struct msm_mdss_data *mdss_data, 692 + const struct dpu_mdss_version *mdss_rev) 676 693 { 677 694 struct dpu_hw_sspp *hw_pipe; 678 695 679 696 if (!addr) 680 697 return ERR_PTR(-EINVAL); 681 698 682 - hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL); 699 + hw_pipe = drmm_kzalloc(dev, sizeof(*hw_pipe), GFP_KERNEL); 683 700 if (!hw_pipe) 684 701 return ERR_PTR(-ENOMEM); 685 702 ··· 696 709 697 710 return hw_pipe; 698 711 } 699 - 700 - void dpu_hw_sspp_destroy(struct dpu_hw_sspp *ctx) 701 - { 702 - kfree(ctx); 703 - } 704 -
+6 -31
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
··· 22 22 #define DPU_SSPP_SOLID_FILL BIT(4) 23 23 24 24 /** 25 - * Define all scaler feature bits in catalog 26 - */ 27 - #define DPU_SSPP_SCALER (BIT(DPU_SSPP_SCALER_RGB) | \ 28 - BIT(DPU_SSPP_SCALER_QSEED2) | \ 29 - BIT(DPU_SSPP_SCALER_QSEED3) | \ 30 - BIT(DPU_SSPP_SCALER_QSEED3LITE) | \ 31 - BIT(DPU_SSPP_SCALER_QSEED4)) 32 - 33 - /* 34 - * Define all CSC feature bits in catalog 35 - */ 36 - #define DPU_SSPP_CSC_ANY (BIT(DPU_SSPP_CSC) | \ 37 - BIT(DPU_SSPP_CSC_10BIT)) 38 - 39 - /** 40 25 * Component indices 41 26 */ 42 27 enum { ··· 282 297 const struct dpu_format *format); 283 298 284 299 /** 285 - * get_scaler_ver - get scaler h/w version 286 - * @ctx: Pointer to pipe context 287 - */ 288 - u32 (*get_scaler_ver)(struct dpu_hw_sspp *ctx); 289 - 290 - /** 291 300 * setup_cdp - setup client driven prefetch 292 301 * @pipe: Pointer to software pipe context 293 302 * @fmt: format used by the sw pipe ··· 318 339 /** 319 340 * dpu_hw_sspp_init() - Initializes the sspp hw driver object. 320 341 * Should be called once before accessing every pipe. 342 + * @dev: Corresponding device for devres management 321 343 * @cfg: Pipe catalog entry for which driver object is required 322 344 * @addr: Mapped register io address of MDP 323 345 * @mdss_data: UBWC / MDSS configuration data 324 346 * @mdss_rev: dpu core's major and minor versions 325 347 */ 326 - struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg, 327 - void __iomem *addr, const struct msm_mdss_data *mdss_data, 328 - const struct dpu_mdss_version *mdss_rev); 329 - 330 - /** 331 - * dpu_hw_sspp_destroy(): Destroys SSPP driver context 332 - * should be called during Hw pipe cleanup. 333 - * @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init 334 - */ 335 - void dpu_hw_sspp_destroy(struct dpu_hw_sspp *ctx); 348 + struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev, 349 + const struct dpu_sspp_cfg *cfg, 350 + void __iomem *addr, 351 + const struct msm_mdss_data *mdss_data, 352 + const struct dpu_mdss_version *mdss_rev); 336 353 337 354 int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms, 338 355 struct dentry *entry);
+7 -10
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
··· 2 2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 3 3 */ 4 4 5 + #include <drm/drm_managed.h> 6 + 5 7 #include "dpu_hwio.h" 6 8 #include "dpu_hw_catalog.h" 7 9 #include "dpu_hw_top.h" ··· 249 247 ops->intf_audio_select = dpu_hw_intf_audio_select; 250 248 } 251 249 252 - struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg, 253 - void __iomem *addr, 254 - const struct dpu_mdss_cfg *m) 250 + struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev, 251 + const struct dpu_mdp_cfg *cfg, 252 + void __iomem *addr, 253 + const struct dpu_mdss_cfg *m) 255 254 { 256 255 struct dpu_hw_mdp *mdp; 257 256 258 257 if (!addr) 259 258 return ERR_PTR(-EINVAL); 260 259 261 - mdp = kzalloc(sizeof(*mdp), GFP_KERNEL); 260 + mdp = drmm_kzalloc(dev, sizeof(*mdp), GFP_KERNEL); 262 261 if (!mdp) 263 262 return ERR_PTR(-ENOMEM); 264 263 ··· 274 271 275 272 return mdp; 276 273 } 277 - 278 - void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp) 279 - { 280 - kfree(mdp); 281 - } 282 -
+5 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
··· 145 145 146 146 /** 147 147 * dpu_hw_mdptop_init - initializes the top driver for the passed config 148 + * @dev: Corresponding device for devres management 148 149 * @cfg: MDP TOP configuration from catalog 149 150 * @addr: Mapped register io address of MDP 150 151 * @m: Pointer to mdss catalog data 151 152 */ 152 - struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg, 153 - void __iomem *addr, 154 - const struct dpu_mdss_cfg *m); 153 + struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev, 154 + const struct dpu_mdp_cfg *cfg, 155 + void __iomem *addr, 156 + const struct dpu_mdss_cfg *m); 155 157 156 158 void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp); 157 159
+52 -18
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 3 + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 4 * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 5 5 */ 6 6 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ ··· 381 381 DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode); 382 382 } 383 383 384 - u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c, 385 - u32 scaler_offset) 386 - { 387 - return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset); 388 - } 389 - 390 384 void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c, 391 385 u32 csc_reg_off, 392 386 const struct dpu_csc_cfg *data, bool csc10) ··· 475 481 cfg->danger_safe_en ? QOS_QOS_CTRL_DANGER_SAFE_EN : 0); 476 482 } 477 483 484 + /* 485 + * note: Aside from encoders, input_sel should be set to 0x0 by default 486 + */ 478 487 void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c, 479 - u32 misr_ctrl_offset, 480 - bool enable, u32 frame_count) 488 + u32 misr_ctrl_offset, u8 input_sel) 481 489 { 482 490 u32 config = 0; 483 491 ··· 488 492 /* Clear old MISR value (in case it's read before a new value is calculated)*/ 489 493 wmb(); 490 494 491 - if (enable) { 492 - config = (frame_count & MISR_FRAME_COUNT_MASK) | 493 - MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK; 494 - 495 - DPU_REG_WRITE(c, misr_ctrl_offset, config); 496 - } else { 497 - DPU_REG_WRITE(c, misr_ctrl_offset, 0); 498 - } 499 - 495 + config = MISR_FRAME_COUNT | MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK | 496 + ((input_sel & 0xF) << 24); 497 + DPU_REG_WRITE(c, misr_ctrl_offset, config); 500 498 } 501 499 502 500 int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c, ··· 557 567 558 568 return clk_forced_on; 559 569 } 570 + 571 + #define TO_S15D16(_x_)((_x_) << 7) 572 + 573 + const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = { 574 + { 575 + /* S15.16 format */ 576 + 0x00012A00, 0x00000000, 0x00019880, 577 + 0x00012A00, 0xFFFF9B80, 0xFFFF3000, 578 + 0x00012A00, 0x00020480, 0x00000000, 579 + }, 580 + /* signed bias */ 581 + { 0xfff0, 0xff80, 0xff80,}, 582 + { 0x0, 0x0, 0x0,}, 583 + /* unsigned clamp */ 584 + { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,}, 585 + { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,}, 586 + }; 587 + 588 + const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = { 589 + { 590 + /* S15.16 format */ 591 + 0x00012A00, 0x00000000, 0x00019880, 592 + 0x00012A00, 0xFFFF9B80, 0xFFFF3000, 593 + 0x00012A00, 0x00020480, 0x00000000, 594 + }, 595 + /* signed bias */ 596 + { 0xffc0, 0xfe00, 0xfe00,}, 597 + { 0x0, 0x0, 0x0,}, 598 + /* unsigned clamp */ 599 + { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,}, 600 + { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,}, 601 + }; 602 + 603 + const struct dpu_csc_cfg dpu_csc10_rgb2yuv_601l = { 604 + { 605 + TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032), 606 + TO_S15D16(0x1fb5), TO_S15D16(0x1f6c), TO_S15D16(0x00e1), 607 + TO_S15D16(0x00e1), TO_S15D16(0x1f45), TO_S15D16(0x1fdc) 608 + }, 609 + { 0x00, 0x00, 0x00 }, 610 + { 0x0040, 0x0200, 0x0200 }, 611 + { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff }, 612 + { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 }, 613 + };
+9 -8
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 3 + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 4 * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. 5 5 */ 6 6 ··· 13 13 #include "dpu_hw_catalog.h" 14 14 15 15 #define REG_MASK(n) ((BIT(n)) - 1) 16 - #define MISR_FRAME_COUNT_MASK 0xFF 16 + #define MISR_FRAME_COUNT 0x1 17 17 #define MISR_CTRL_ENABLE BIT(8) 18 18 #define MISR_CTRL_STATUS BIT(9) 19 19 #define MISR_CTRL_STATUS_CLEAR BIT(10) 20 20 #define MISR_CTRL_FREE_RUN_MASK BIT(31) 21 + 22 + #define TO_S15D16(_x_)((_x_) << 7) 23 + 24 + extern const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L; 25 + extern const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L; 26 + extern const struct dpu_csc_cfg dpu_csc10_rgb2yuv_601l; 21 27 22 28 /* 23 29 * This is the common struct maintained by each sub block ··· 346 340 u32 scaler_offset, u32 scaler_version, 347 341 const struct dpu_format *format); 348 342 349 - u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c, 350 - u32 scaler_offset); 351 - 352 343 void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c, 353 344 u32 csc_reg_off, 354 345 const struct dpu_csc_cfg *data, bool csc10); ··· 361 358 const struct dpu_hw_qos_cfg *cfg); 362 359 363 360 void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c, 364 - u32 misr_ctrl_offset, 365 - bool enable, 366 - u32 frame_count); 361 + u32 misr_ctrl_offset, u8 input_sel); 367 362 368 363 int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c, 369 364 u32 misr_ctrl_offset,
+6 -8
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
··· 2 2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 3 3 */ 4 4 5 + #include <drm/drm_managed.h> 6 + 5 7 #include "dpu_hwio.h" 6 8 #include "dpu_hw_catalog.h" 7 9 #include "dpu_hw_vbif.h" ··· 213 211 ops->set_write_gather_en = dpu_hw_set_write_gather_en; 214 212 } 215 213 216 - struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg, 217 - void __iomem *addr) 214 + struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev, 215 + const struct dpu_vbif_cfg *cfg, 216 + void __iomem *addr) 218 217 { 219 218 struct dpu_hw_vbif *c; 220 219 221 - c = kzalloc(sizeof(*c), GFP_KERNEL); 220 + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); 222 221 if (!c) 223 222 return ERR_PTR(-ENOMEM); 224 223 ··· 236 233 /* no need to register sub-range in dpu dbg, dump entire vbif io base */ 237 234 238 235 return c; 239 - } 240 - 241 - void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif) 242 - { 243 - kfree(vbif); 244 236 }
+4 -4
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
··· 108 108 /** 109 109 * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed 110 110 * VBIF catalog entry. 111 + * @dev: Corresponding device for devres management 111 112 * @cfg: VBIF catalog entry for which driver object is required 112 113 * @addr: Mapped register io address of MDSS 113 114 */ 114 - struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg, 115 - void __iomem *addr); 116 - 117 - void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif); 115 + struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev, 116 + const struct dpu_vbif_cfg *cfg, 117 + void __iomem *addr); 118 118 119 119 #endif /*_DPU_HW_VBIF_H */
+10 -8
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
··· 3 3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved 4 4 */ 5 5 6 + #include <drm/drm_managed.h> 7 + 6 8 #include "dpu_hw_mdss.h" 7 9 #include "dpu_hwio.h" 8 10 #include "dpu_hw_catalog.h" ··· 88 86 !(ctx->caps->features & BIT(DPU_WB_PIPE_ALPHA))) 89 87 dst_format |= BIT(14); /* DST_ALPHA_X */ 90 88 } 89 + 90 + if (DPU_FORMAT_IS_YUV(fmt)) 91 + dst_format |= BIT(15); 91 92 92 93 pattern = (fmt->element[3] << 24) | 93 94 (fmt->element[2] << 16) | ··· 213 208 ops->setup_clk_force_ctrl = dpu_hw_wb_setup_clk_force_ctrl; 214 209 } 215 210 216 - struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg, 217 - void __iomem *addr, const struct dpu_mdss_version *mdss_rev) 211 + struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev, 212 + const struct dpu_wb_cfg *cfg, 213 + void __iomem *addr, 214 + const struct dpu_mdss_version *mdss_rev) 218 215 { 219 216 struct dpu_hw_wb *c; 220 217 221 218 if (!addr) 222 219 return ERR_PTR(-EINVAL); 223 220 224 - c = kzalloc(sizeof(*c), GFP_KERNEL); 221 + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); 225 222 if (!c) 226 223 return ERR_PTR(-ENOMEM); 227 224 ··· 236 229 _setup_wb_ops(&c->ops, c->caps->features, mdss_rev); 237 230 238 231 return c; 239 - } 240 - 241 - void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb) 242 - { 243 - kfree(hw_wb); 244 232 }
+5 -8
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
··· 76 76 77 77 /** 78 78 * dpu_hw_wb_init() - Initializes the writeback hw driver object. 79 + * @dev: Corresponding device for devres management 79 80 * @cfg: wb_path catalog entry for which driver object is required 80 81 * @addr: mapped register io address of MDP 81 82 * @mdss_rev: dpu core's major and minor versions 82 83 * Return: Error code or allocated dpu_hw_wb context 83 84 */ 84 - struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg, 85 - void __iomem *addr, const struct dpu_mdss_version *mdss_rev); 86 - 87 - /** 88 - * dpu_hw_wb_destroy(): Destroy writeback hw driver object. 89 - * @hw_wb: Pointer to writeback hw driver object 90 - */ 91 - void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb); 85 + struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev, 86 + const struct dpu_wb_cfg *cfg, 87 + void __iomem *addr, 88 + const struct dpu_mdss_version *mdss_rev); 92 89 93 90 #endif /*_DPU_HW_WB_H */
+25 -54
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
··· 274 274 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 275 275 void *p = dpu_hw_util_get_log_mask_ptr(); 276 276 struct dentry *entry; 277 - struct drm_device *dev; 278 - struct msm_drm_private *priv; 279 - int i; 280 277 281 278 if (!p) 282 279 return -EINVAL; ··· 281 284 /* Only create a set of debugfs for the primary node, ignore render nodes */ 282 285 if (minor->type != DRM_MINOR_PRIMARY) 283 286 return 0; 284 - 285 - dev = dpu_kms->dev; 286 - priv = dev->dev_private; 287 287 288 288 entry = debugfs_create_dir("debug", minor->debugfs_root); 289 289 ··· 290 296 dpu_debugfs_vbif_init(dpu_kms, entry); 291 297 dpu_debugfs_core_irq_init(dpu_kms, entry); 292 298 dpu_debugfs_sspp_init(dpu_kms, entry); 293 - 294 - for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { 295 - if (priv->dp[i]) 296 - msm_dp_debugfs_init(priv->dp[i], minor); 297 - } 298 299 299 300 return dpu_core_perf_debugfs_init(dpu_kms, entry); 300 301 } ··· 586 597 rc = msm_dp_modeset_init(priv->dp[i], dev, encoder); 587 598 if (rc) { 588 599 DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); 589 - drm_encoder_cleanup(encoder); 590 600 return rc; 591 601 } 592 602 } ··· 618 630 rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); 619 631 if (rc) { 620 632 DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); 621 - drm_encoder_cleanup(encoder); 622 633 return rc; 623 634 } 624 635 ··· 649 662 n_formats); 650 663 if (rc) { 651 664 DPU_ERROR("dpu_writeback_init, rc = %d\n", rc); 652 - drm_encoder_cleanup(encoder); 653 665 return rc; 654 666 } 655 667 ··· 792 806 { 793 807 int i; 794 808 795 - if (dpu_kms->hw_intr) 796 - dpu_hw_intr_destroy(dpu_kms->hw_intr); 797 809 dpu_kms->hw_intr = NULL; 798 810 799 811 /* safe to call these more than once during shutdown */ 800 812 _dpu_kms_mmu_destroy(dpu_kms); 801 813 802 - if (dpu_kms->catalog) { 803 - for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { 804 - if (dpu_kms->hw_vbif[i]) { 805 - dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]); 806 - dpu_kms->hw_vbif[i] = NULL; 807 - } 808 - } 814 + for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { 815 + dpu_kms->hw_vbif[i] = NULL; 809 816 } 810 - 811 - if (dpu_kms->rm_init) 812 - dpu_rm_destroy(&dpu_kms->rm); 813 - dpu_kms->rm_init = false; 814 817 815 818 dpu_kms->catalog = NULL; 816 819 817 - if (dpu_kms->hw_mdp) 818 - dpu_hw_mdp_destroy(dpu_kms->hw_mdp); 819 820 dpu_kms->hw_mdp = NULL; 820 821 } 821 822 ··· 829 856 { 830 857 struct msm_drm_private *priv; 831 858 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 832 - int i; 833 859 834 860 if (!dpu_kms || !dpu_kms->dev) 835 861 return -EINVAL; ··· 836 864 priv = dpu_kms->dev->dev_private; 837 865 if (!priv) 838 866 return -EINVAL; 839 - 840 - for (i = 0; i < ARRAY_SIZE(priv->dp); i++) 841 - msm_dp_irq_postinstall(priv->dp[i]); 842 867 843 868 return 0; 844 869 } ··· 944 975 } 945 976 } 946 977 978 + if (cat->cdm) 979 + msm_disp_snapshot_add_block(disp_state, cat->cdm->len, 980 + dpu_kms->mmio + cat->cdm->base, cat->cdm->name); 981 + 947 982 pm_runtime_put_sync(&dpu_kms->pdev->dev); 948 983 } 949 984 ··· 1051 1078 if (!dpu_kms->catalog) { 1052 1079 DPU_ERROR("device config not known!\n"); 1053 1080 rc = -EINVAL; 1054 - goto power_error; 1081 + goto err_pm_put; 1055 1082 } 1056 1083 1057 1084 /* ··· 1061 1088 rc = _dpu_kms_mmu_init(dpu_kms); 1062 1089 if (rc) { 1063 1090 DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc); 1064 - goto power_error; 1091 + goto err_pm_put; 1065 1092 } 1066 1093 1067 1094 dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent); 1068 1095 if (IS_ERR(dpu_kms->mdss)) { 1069 1096 rc = PTR_ERR(dpu_kms->mdss); 1070 1097 DPU_ERROR("failed to get MDSS data: %d\n", rc); 1071 - goto power_error; 1098 + goto err_pm_put; 1072 1099 } 1073 1100 1074 1101 if (!dpu_kms->mdss) { 1075 1102 rc = -EINVAL; 1076 1103 DPU_ERROR("NULL MDSS data\n"); 1077 - goto power_error; 1104 + goto err_pm_put; 1078 1105 } 1079 1106 1080 - rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio); 1107 + rc = dpu_rm_init(dev, &dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio); 1081 1108 if (rc) { 1082 1109 DPU_ERROR("rm init failed: %d\n", rc); 1083 - goto power_error; 1110 + goto err_pm_put; 1084 1111 } 1085 1112 1086 - dpu_kms->rm_init = true; 1087 - 1088 - dpu_kms->hw_mdp = dpu_hw_mdptop_init(dpu_kms->catalog->mdp, 1113 + dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev, 1114 + dpu_kms->catalog->mdp, 1089 1115 dpu_kms->mmio, 1090 1116 dpu_kms->catalog); 1091 1117 if (IS_ERR(dpu_kms->hw_mdp)) { 1092 1118 rc = PTR_ERR(dpu_kms->hw_mdp); 1093 1119 DPU_ERROR("failed to get hw_mdp: %d\n", rc); 1094 1120 dpu_kms->hw_mdp = NULL; 1095 - goto power_error; 1121 + goto err_pm_put; 1096 1122 } 1097 1123 1098 1124 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { 1099 1125 struct dpu_hw_vbif *hw; 1100 1126 const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; 1101 1127 1102 - hw = dpu_hw_vbif_init(vbif, dpu_kms->vbif[vbif->id]); 1128 + hw = dpu_hw_vbif_init(dev, vbif, dpu_kms->vbif[vbif->id]); 1103 1129 if (IS_ERR(hw)) { 1104 1130 rc = PTR_ERR(hw); 1105 1131 DPU_ERROR("failed to init vbif %d: %d\n", vbif->id, rc); 1106 - goto power_error; 1132 + goto err_pm_put; 1107 1133 } 1108 1134 1109 1135 dpu_kms->hw_vbif[vbif->id] = hw; ··· 1118 1146 rc = dpu_core_perf_init(&dpu_kms->perf, dpu_kms->catalog->perf, max_core_clk_rate); 1119 1147 if (rc) { 1120 1148 DPU_ERROR("failed to init perf %d\n", rc); 1121 - goto perf_err; 1149 + goto err_pm_put; 1122 1150 } 1123 1151 1124 - dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog); 1125 - if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) { 1152 + dpu_kms->hw_intr = dpu_hw_intr_init(dev, dpu_kms->mmio, dpu_kms->catalog); 1153 + if (IS_ERR(dpu_kms->hw_intr)) { 1126 1154 rc = PTR_ERR(dpu_kms->hw_intr); 1127 1155 DPU_ERROR("hw_intr init failed: %d\n", rc); 1128 1156 dpu_kms->hw_intr = NULL; 1129 - goto hw_intr_init_err; 1157 + goto err_pm_put; 1130 1158 } 1131 1159 1132 1160 dev->mode_config.min_width = 0; ··· 1151 1179 rc = _dpu_kms_drm_obj_init(dpu_kms); 1152 1180 if (rc) { 1153 1181 DPU_ERROR("modeset init failed: %d\n", rc); 1154 - goto drm_obj_init_err; 1182 + goto err_pm_put; 1155 1183 } 1156 1184 1157 1185 dpu_vbif_init_memtypes(dpu_kms); ··· 1160 1188 1161 1189 return 0; 1162 1190 1163 - drm_obj_init_err: 1164 - hw_intr_init_err: 1165 - perf_err: 1166 - power_error: 1191 + err_pm_put: 1167 1192 pm_runtime_put_sync(&dpu_kms->pdev->dev); 1168 1193 error: 1169 1194 _dpu_kms_hw_destroy(dpu_kms); ··· 1318 1349 static const struct of_device_id dpu_dt_match[] = { 1319 1350 { .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, }, 1320 1351 { .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, }, 1352 + { .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, }, 1321 1353 { .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, }, 1322 1354 { .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, }, 1323 1355 { .compatible = "qcom,sc7280-dpu", .data = &dpu_sc7280_cfg, }, ··· 1333 1363 { .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, }, 1334 1364 { .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, }, 1335 1365 { .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, }, 1366 + { .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, }, 1336 1367 {} 1337 1368 }; 1338 1369 MODULE_DEVICE_TABLE(of, dpu_dt_match);
+2 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
··· 51 51 } while (0) 52 52 53 53 #define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__) 54 + #define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__) 54 55 55 56 /** 56 57 * ktime_compare_safe - compare two ktime structures ··· 89 88 struct drm_private_obj global_state; 90 89 91 90 struct dpu_rm rm; 92 - bool rm_init; 93 91 94 92 struct dpu_hw_vbif *hw_vbif[VBIF_MAX]; 95 93 struct dpu_hw_mdp *hw_mdp; ··· 136 136 uint32_t ctl_to_enc_id[CTL_MAX - CTL_0]; 137 137 uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0]; 138 138 uint32_t dsc_to_enc_id[DSC_MAX - DSC_0]; 139 + uint32_t cdm_to_enc_id; 139 140 }; 140 141 141 142 struct dpu_global_state
+20 -85
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
··· 21 21 #include "dpu_kms.h" 22 22 #include "dpu_formats.h" 23 23 #include "dpu_hw_sspp.h" 24 + #include "dpu_hw_util.h" 24 25 #include "dpu_trace.h" 25 26 #include "dpu_crtc.h" 26 27 #include "dpu_vbif.h" ··· 78 77 */ 79 78 struct dpu_plane { 80 79 struct drm_plane base; 81 - 82 - struct mutex lock; 83 80 84 81 enum dpu_sspp pipe; 85 82 ··· 469 470 scale_cfg->src_height[i] /= chroma_subsmpl_v; 470 471 } 471 472 472 - if (pipe_hw->cap->features & 473 - BIT(DPU_SSPP_SCALER_QSEED4)) { 473 + if (pipe_hw->cap->sblk->scaler_blk.version >= 0x3000) { 474 474 scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H; 475 475 scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V; 476 476 } else { ··· 508 510 pixel_ext->num_ext_pxls_left[i] = src_w; 509 511 } 510 512 } 511 - 512 - static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = { 513 - { 514 - /* S15.16 format */ 515 - 0x00012A00, 0x00000000, 0x00019880, 516 - 0x00012A00, 0xFFFF9B80, 0xFFFF3000, 517 - 0x00012A00, 0x00020480, 0x00000000, 518 - }, 519 - /* signed bias */ 520 - { 0xfff0, 0xff80, 0xff80,}, 521 - { 0x0, 0x0, 0x0,}, 522 - /* unsigned clamp */ 523 - { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,}, 524 - { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,}, 525 - }; 526 - 527 - static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = { 528 - { 529 - /* S15.16 format */ 530 - 0x00012A00, 0x00000000, 0x00019880, 531 - 0x00012A00, 0xFFFF9B80, 0xFFFF3000, 532 - 0x00012A00, 0x00020480, 0x00000000, 533 - }, 534 - /* signed bias */ 535 - { 0xffc0, 0xfe00, 0xfe00,}, 536 - { 0x0, 0x0, 0x0,}, 537 - /* unsigned clamp */ 538 - { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,}, 539 - { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,}, 540 - }; 541 513 542 514 static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe, 543 515 const struct dpu_format *fmt) ··· 742 774 min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1; 743 775 744 776 if (DPU_FORMAT_IS_YUV(fmt) && 745 - (!(pipe->sspp->cap->features & DPU_SSPP_SCALER) || 746 - !(pipe->sspp->cap->features & DPU_SSPP_CSC_ANY))) { 777 + (!pipe->sspp->cap->sblk->scaler_blk.len || 778 + !pipe->sspp->cap->sblk->csc_blk.len)) { 747 779 DPU_DEBUG_PLANE(pdpu, 748 780 "plane doesn't have scaler/csc for yuv\n"); 749 781 return -EINVAL; ··· 792 824 plane); 793 825 int ret = 0, min_scale; 794 826 struct dpu_plane *pdpu = to_dpu_plane(plane); 827 + struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); 828 + u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate; 795 829 struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); 796 830 struct dpu_sw_pipe *pipe = &pstate->pipe; 797 831 struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; ··· 862 892 863 893 max_linewidth = pdpu->catalog->caps->max_linewidth; 864 894 865 - if (drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) { 895 + if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || 896 + _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) { 866 897 /* 867 898 * In parallel multirect case only the half of the usual width 868 899 * is supported for tiled formats. If we are here, we know that 869 900 * full width is more than max_linewidth, thus each rect is 870 901 * wider than allowed. 871 902 */ 872 - if (DPU_FORMAT_IS_UBWC(fmt)) { 903 + if (DPU_FORMAT_IS_UBWC(fmt) && 904 + drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) { 873 905 DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n", 874 906 DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); 875 907 return -E2BIG; ··· 1185 1213 } 1186 1214 } 1187 1215 1188 - static void dpu_plane_destroy(struct drm_plane *plane) 1189 - { 1190 - struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL; 1191 - struct dpu_plane_state *pstate; 1192 - 1193 - DPU_DEBUG_PLANE(pdpu, "\n"); 1194 - 1195 - if (pdpu) { 1196 - pstate = to_dpu_plane_state(plane->state); 1197 - _dpu_plane_set_qos_ctrl(plane, &pstate->pipe, false); 1198 - 1199 - if (pstate->r_pipe.sspp) 1200 - _dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, false); 1201 - 1202 - mutex_destroy(&pdpu->lock); 1203 - 1204 - /* this will destroy the states as well */ 1205 - drm_plane_cleanup(plane); 1206 - 1207 - kfree(pdpu); 1208 - } 1209 - } 1210 - 1211 1216 static void dpu_plane_destroy_state(struct drm_plane *plane, 1212 1217 struct drm_plane_state *state) 1213 1218 { ··· 1354 1405 static const struct drm_plane_funcs dpu_plane_funcs = { 1355 1406 .update_plane = drm_atomic_helper_update_plane, 1356 1407 .disable_plane = drm_atomic_helper_disable_plane, 1357 - .destroy = dpu_plane_destroy, 1358 1408 .reset = dpu_plane_reset, 1359 1409 .atomic_duplicate_state = dpu_plane_duplicate_state, 1360 1410 .atomic_destroy_state = dpu_plane_destroy_state, ··· 1381 1433 struct dpu_hw_sspp *pipe_hw; 1382 1434 uint32_t num_formats; 1383 1435 uint32_t supported_rotations; 1384 - int ret = -EINVAL; 1385 - 1386 - /* create and zero local structure */ 1387 - pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL); 1388 - if (!pdpu) { 1389 - DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe); 1390 - ret = -ENOMEM; 1391 - return ERR_PTR(ret); 1392 - } 1393 - 1394 - /* cache local stuff for later */ 1395 - plane = &pdpu->base; 1396 - pdpu->pipe = pipe; 1436 + int ret; 1397 1437 1398 1438 /* initialize underlying h/w driver */ 1399 1439 pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe); 1400 1440 if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) { 1401 1441 DPU_ERROR("[%u]SSPP is invalid\n", pipe); 1402 - goto clean_plane; 1442 + return ERR_PTR(-EINVAL); 1403 1443 } 1404 1444 1405 1445 format_list = pipe_hw->cap->sblk->format_list; 1406 1446 num_formats = pipe_hw->cap->sblk->num_formats; 1407 1447 1408 - ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs, 1448 + pdpu = drmm_universal_plane_alloc(dev, struct dpu_plane, base, 1449 + 0xff, &dpu_plane_funcs, 1409 1450 format_list, num_formats, 1410 1451 supported_format_modifiers, type, NULL); 1411 - if (ret) 1412 - goto clean_plane; 1452 + if (IS_ERR(pdpu)) 1453 + return ERR_CAST(pdpu); 1454 + 1455 + /* cache local stuff for later */ 1456 + plane = &pdpu->base; 1457 + pdpu->pipe = pipe; 1413 1458 1414 1459 pdpu->catalog = kms->catalog; 1415 1460 ··· 1429 1488 /* success! finalize initialization */ 1430 1489 drm_plane_helper_add(plane, &dpu_plane_helper_funcs); 1431 1490 1432 - mutex_init(&pdpu->lock); 1433 - 1434 1491 DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name, 1435 1492 pipe, plane->base.id); 1436 1493 return plane; 1437 - 1438 - clean_plane: 1439 - kfree(pdpu); 1440 - return ERR_PTR(ret); 1441 1494 }
+61 -80
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
··· 8 8 #include "dpu_kms.h" 9 9 #include "dpu_hw_lm.h" 10 10 #include "dpu_hw_ctl.h" 11 + #include "dpu_hw_cdm.h" 11 12 #include "dpu_hw_pingpong.h" 12 13 #include "dpu_hw_sspp.h" 13 14 #include "dpu_hw_intf.h" ··· 35 34 struct msm_display_topology topology; 36 35 }; 37 36 38 - int dpu_rm_destroy(struct dpu_rm *rm) 39 - { 40 - int i; 41 - 42 - for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) { 43 - struct dpu_hw_dspp *hw; 44 - 45 - if (rm->dspp_blks[i]) { 46 - hw = to_dpu_hw_dspp(rm->dspp_blks[i]); 47 - dpu_hw_dspp_destroy(hw); 48 - } 49 - } 50 - for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { 51 - struct dpu_hw_pingpong *hw; 52 - 53 - if (rm->pingpong_blks[i]) { 54 - hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]); 55 - dpu_hw_pingpong_destroy(hw); 56 - } 57 - } 58 - for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) { 59 - struct dpu_hw_merge_3d *hw; 60 - 61 - if (rm->merge_3d_blks[i]) { 62 - hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]); 63 - dpu_hw_merge_3d_destroy(hw); 64 - } 65 - } 66 - for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) { 67 - struct dpu_hw_mixer *hw; 68 - 69 - if (rm->mixer_blks[i]) { 70 - hw = to_dpu_hw_mixer(rm->mixer_blks[i]); 71 - dpu_hw_lm_destroy(hw); 72 - } 73 - } 74 - for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) { 75 - struct dpu_hw_ctl *hw; 76 - 77 - if (rm->ctl_blks[i]) { 78 - hw = to_dpu_hw_ctl(rm->ctl_blks[i]); 79 - dpu_hw_ctl_destroy(hw); 80 - } 81 - } 82 - for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++) 83 - dpu_hw_intf_destroy(rm->hw_intf[i]); 84 - 85 - for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) { 86 - struct dpu_hw_dsc *hw; 87 - 88 - if (rm->dsc_blks[i]) { 89 - hw = to_dpu_hw_dsc(rm->dsc_blks[i]); 90 - dpu_hw_dsc_destroy(hw); 91 - } 92 - } 93 - 94 - for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++) 95 - dpu_hw_wb_destroy(rm->hw_wb[i]); 96 - 97 - for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++) 98 - dpu_hw_sspp_destroy(rm->hw_sspp[i]); 99 - 100 - return 0; 101 - } 102 - 103 - int dpu_rm_init(struct dpu_rm *rm, 37 + int dpu_rm_init(struct drm_device *dev, 38 + struct dpu_rm *rm, 104 39 const struct dpu_mdss_cfg *cat, 105 40 const struct msm_mdss_data *mdss_data, 106 41 void __iomem *mmio) ··· 56 119 struct dpu_hw_mixer *hw; 57 120 const struct dpu_lm_cfg *lm = &cat->mixer[i]; 58 121 59 - hw = dpu_hw_lm_init(lm, mmio); 122 + hw = dpu_hw_lm_init(dev, lm, mmio); 60 123 if (IS_ERR(hw)) { 61 124 rc = PTR_ERR(hw); 62 125 DPU_ERROR("failed lm object creation: err %d\n", rc); ··· 69 132 struct dpu_hw_merge_3d *hw; 70 133 const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i]; 71 134 72 - hw = dpu_hw_merge_3d_init(merge_3d, mmio); 135 + hw = dpu_hw_merge_3d_init(dev, merge_3d, mmio); 73 136 if (IS_ERR(hw)) { 74 137 rc = PTR_ERR(hw); 75 138 DPU_ERROR("failed merge_3d object creation: err %d\n", ··· 83 146 struct dpu_hw_pingpong *hw; 84 147 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; 85 148 86 - hw = dpu_hw_pingpong_init(pp, mmio, cat->mdss_ver); 149 + hw = dpu_hw_pingpong_init(dev, pp, mmio, cat->mdss_ver); 87 150 if (IS_ERR(hw)) { 88 151 rc = PTR_ERR(hw); 89 152 DPU_ERROR("failed pingpong object creation: err %d\n", ··· 99 162 struct dpu_hw_intf *hw; 100 163 const struct dpu_intf_cfg *intf = &cat->intf[i]; 101 164 102 - hw = dpu_hw_intf_init(intf, mmio, cat->mdss_ver); 165 + hw = dpu_hw_intf_init(dev, intf, mmio, cat->mdss_ver); 103 166 if (IS_ERR(hw)) { 104 167 rc = PTR_ERR(hw); 105 168 DPU_ERROR("failed intf object creation: err %d\n", rc); ··· 112 175 struct dpu_hw_wb *hw; 113 176 const struct dpu_wb_cfg *wb = &cat->wb[i]; 114 177 115 - hw = dpu_hw_wb_init(wb, mmio, cat->mdss_ver); 178 + hw = dpu_hw_wb_init(dev, wb, mmio, cat->mdss_ver); 116 179 if (IS_ERR(hw)) { 117 180 rc = PTR_ERR(hw); 118 181 DPU_ERROR("failed wb object creation: err %d\n", rc); ··· 125 188 struct dpu_hw_ctl *hw; 126 189 const struct dpu_ctl_cfg *ctl = &cat->ctl[i]; 127 190 128 - hw = dpu_hw_ctl_init(ctl, mmio, cat->mixer_count, cat->mixer); 191 + hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mixer_count, cat->mixer); 129 192 if (IS_ERR(hw)) { 130 193 rc = PTR_ERR(hw); 131 194 DPU_ERROR("failed ctl object creation: err %d\n", rc); ··· 138 201 struct dpu_hw_dspp *hw; 139 202 const struct dpu_dspp_cfg *dspp = &cat->dspp[i]; 140 203 141 - hw = dpu_hw_dspp_init(dspp, mmio); 204 + hw = dpu_hw_dspp_init(dev, dspp, mmio); 142 205 if (IS_ERR(hw)) { 143 206 rc = PTR_ERR(hw); 144 207 DPU_ERROR("failed dspp object creation: err %d\n", rc); ··· 152 215 const struct dpu_dsc_cfg *dsc = &cat->dsc[i]; 153 216 154 217 if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features)) 155 - hw = dpu_hw_dsc_init_1_2(dsc, mmio); 218 + hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio); 156 219 else 157 - hw = dpu_hw_dsc_init(dsc, mmio); 220 + hw = dpu_hw_dsc_init(dev, dsc, mmio); 158 221 159 222 if (IS_ERR(hw)) { 160 223 rc = PTR_ERR(hw); ··· 168 231 struct dpu_hw_sspp *hw; 169 232 const struct dpu_sspp_cfg *sspp = &cat->sspp[i]; 170 233 171 - hw = dpu_hw_sspp_init(sspp, mmio, mdss_data, cat->mdss_ver); 234 + hw = dpu_hw_sspp_init(dev, sspp, mmio, mdss_data, cat->mdss_ver); 172 235 if (IS_ERR(hw)) { 173 236 rc = PTR_ERR(hw); 174 237 DPU_ERROR("failed sspp object creation: err %d\n", rc); ··· 177 240 rm->hw_sspp[sspp->id - SSPP_NONE] = hw; 178 241 } 179 242 243 + if (cat->cdm) { 244 + struct dpu_hw_cdm *hw; 245 + 246 + hw = dpu_hw_cdm_init(dev, cat->cdm, mmio, cat->mdss_ver); 247 + if (IS_ERR(hw)) { 248 + rc = PTR_ERR(hw); 249 + DPU_ERROR("failed cdm object creation: err %d\n", rc); 250 + goto fail; 251 + } 252 + rm->cdm_blk = &hw->base; 253 + } 254 + 180 255 return 0; 181 256 182 257 fail: 183 - dpu_rm_destroy(rm); 184 - 185 258 return rc ? rc : -EFAULT; 186 259 } 187 260 ··· 435 488 return 0; 436 489 } 437 490 491 + static int _dpu_rm_reserve_cdm(struct dpu_rm *rm, 492 + struct dpu_global_state *global_state, 493 + struct drm_encoder *enc) 494 + { 495 + /* try allocating only one CDM block */ 496 + if (!rm->cdm_blk) { 497 + DPU_ERROR("CDM block does not exist\n"); 498 + return -EIO; 499 + } 500 + 501 + if (global_state->cdm_to_enc_id) { 502 + DPU_ERROR("CDM_0 is already allocated\n"); 503 + return -EIO; 504 + } 505 + 506 + global_state->cdm_to_enc_id = enc->base.id; 507 + 508 + return 0; 509 + } 510 + 438 511 static int _dpu_rm_make_reservation( 439 512 struct dpu_rm *rm, 440 513 struct dpu_global_state *global_state, ··· 480 513 if (ret) 481 514 return ret; 482 515 516 + if (reqs->topology.needs_cdm) { 517 + ret = _dpu_rm_reserve_cdm(rm, global_state, enc); 518 + if (ret) { 519 + DPU_ERROR("unable to find CDM blk\n"); 520 + return ret; 521 + } 522 + } 523 + 483 524 return ret; 484 525 } 485 526 ··· 498 523 { 499 524 reqs->topology = req_topology; 500 525 501 - DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n", 526 + DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d cdm: %d\n", 502 527 reqs->topology.num_lm, reqs->topology.num_dsc, 503 - reqs->topology.num_intf); 528 + reqs->topology.num_intf, reqs->topology.needs_cdm); 504 529 505 530 return 0; 506 531 } ··· 529 554 ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id); 530 555 _dpu_rm_clear_mapping(global_state->dspp_to_enc_id, 531 556 ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id); 557 + _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id); 532 558 } 533 559 534 560 int dpu_rm_reserve( ··· 602 626 hw_blks = rm->dsc_blks; 603 627 hw_to_enc_id = global_state->dsc_to_enc_id; 604 628 max_blks = ARRAY_SIZE(rm->dsc_blks); 629 + break; 630 + case DPU_HW_BLK_CDM: 631 + hw_blks = &rm->cdm_blk; 632 + hw_to_enc_id = &global_state->cdm_to_enc_id; 633 + max_blks = 1; 605 634 break; 606 635 default: 607 636 DPU_ERROR("blk type %d not managed by rm\n", type);
+5 -8
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
··· 22 22 * @hw_wb: array of wb hardware resources 23 23 * @dspp_blks: array of dspp hardware resources 24 24 * @hw_sspp: array of sspp hardware resources 25 + * @cdm_blk: cdm hardware resource 25 26 */ 26 27 struct dpu_rm { 27 28 struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0]; ··· 34 33 struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0]; 35 34 struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0]; 36 35 struct dpu_hw_sspp *hw_sspp[SSPP_MAX - SSPP_NONE]; 36 + struct dpu_hw_blk *cdm_blk; 37 37 }; 38 38 39 39 /** 40 40 * dpu_rm_init - Read hardware catalog and create reservation tracking objects 41 41 * for all HW blocks. 42 + * @dev: Corresponding device for devres management 42 43 * @rm: DPU Resource Manager handle 43 44 * @cat: Pointer to hardware catalog 44 45 * @mdss_data: Pointer to MDSS / UBWC configuration 45 46 * @mmio: mapped register io address of MDP 46 47 * @Return: 0 on Success otherwise -ERROR 47 48 */ 48 - int dpu_rm_init(struct dpu_rm *rm, 49 + int dpu_rm_init(struct drm_device *dev, 50 + struct dpu_rm *rm, 49 51 const struct dpu_mdss_cfg *cat, 50 52 const struct msm_mdss_data *mdss_data, 51 53 void __iomem *mmio); 52 - 53 - /** 54 - * dpu_rm_destroy - Free all memory allocated by dpu_rm_init 55 - * @rm: DPU Resource Manager handle 56 - * @Return: 0 on Success otherwise -ERROR 57 - */ 58 - int dpu_rm_destroy(struct dpu_rm *rm); 59 54 60 55 /** 61 56 * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+26 -16
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
··· 6 6 7 7 #include <drm/drm_crtc.h> 8 8 #include <drm/drm_flip_work.h> 9 + #include <drm/drm_managed.h> 9 10 #include <drm/drm_mode.h> 10 11 #include <drm/drm_probe_helper.h> 11 12 #include <drm/drm_vblank.h> ··· 122 121 123 122 msm_gem_unpin_iova(val, kms->aspace); 124 123 drm_gem_object_put(val); 125 - } 126 - 127 - static void mdp4_crtc_destroy(struct drm_crtc *crtc) 128 - { 129 - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 130 - 131 - drm_crtc_cleanup(crtc); 132 - drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); 133 - 134 - kfree(mdp4_crtc); 135 124 } 136 125 137 126 /* statically (for now) map planes to mixer stage (z-order): */ ··· 260 269 { 261 270 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 262 271 struct mdp4_kms *mdp4_kms = get_kms(crtc); 272 + unsigned long flags; 263 273 264 274 DBG("%s", mdp4_crtc->name); 265 275 ··· 272 280 273 281 mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); 274 282 mdp4_disable(mdp4_kms); 283 + 284 + if (crtc->state->event && !crtc->state->active) { 285 + WARN_ON(mdp4_crtc->event); 286 + spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags); 287 + drm_crtc_send_vblank_event(crtc, crtc->state->event); 288 + crtc->state->event = NULL; 289 + spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags); 290 + } 275 291 276 292 mdp4_crtc->enabled = false; 277 293 } ··· 475 475 476 476 static const struct drm_crtc_funcs mdp4_crtc_funcs = { 477 477 .set_config = drm_atomic_helper_set_config, 478 - .destroy = mdp4_crtc_destroy, 479 478 .page_flip = drm_atomic_helper_page_flip, 480 479 .cursor_set = mdp4_crtc_cursor_set, 481 480 .cursor_move = mdp4_crtc_cursor_move, ··· 615 616 "DMA_P", "DMA_S", "DMA_E", 616 617 }; 617 618 619 + static void mdp4_crtc_flip_cleanup(struct drm_device *dev, void *ptr) 620 + { 621 + struct mdp4_crtc *mdp4_crtc = ptr; 622 + 623 + drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); 624 + } 625 + 618 626 /* initialize crtc */ 619 627 struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, 620 628 struct drm_plane *plane, int id, int ovlp_id, ··· 629 623 { 630 624 struct drm_crtc *crtc = NULL; 631 625 struct mdp4_crtc *mdp4_crtc; 626 + int ret; 632 627 633 - mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL); 634 - if (!mdp4_crtc) 635 - return ERR_PTR(-ENOMEM); 628 + mdp4_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp4_crtc, base, 629 + plane, NULL, 630 + &mdp4_crtc_funcs, NULL); 631 + if (IS_ERR(mdp4_crtc)) 632 + return ERR_CAST(mdp4_crtc); 636 633 637 634 crtc = &mdp4_crtc->base; 638 635 ··· 657 648 658 649 drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 659 650 "unref cursor", unref_cursor_worker); 651 + ret = drmm_add_action_or_reset(dev, mdp4_crtc_flip_cleanup, mdp4_crtc); 652 + if (ret) 653 + return ERR_PTR(ret); 660 654 661 - drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs, 662 - NULL); 663 655 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); 664 656 665 657 return crtc;
+5 -27
drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
··· 26 26 return to_mdp4_kms(to_mdp_kms(priv->kms)); 27 27 } 28 28 29 - static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder) 30 - { 31 - struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder); 32 - 33 - drm_encoder_cleanup(encoder); 34 - kfree(mdp4_dsi_encoder); 35 - } 36 - 37 - static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = { 38 - .destroy = mdp4_dsi_encoder_destroy, 39 - }; 40 - 41 29 static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder, 42 30 struct drm_display_mode *mode, 43 31 struct drm_display_mode *adjusted_mode) ··· 136 148 /* initialize encoder */ 137 149 struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev) 138 150 { 139 - struct drm_encoder *encoder = NULL; 151 + struct drm_encoder *encoder; 140 152 struct mdp4_dsi_encoder *mdp4_dsi_encoder; 141 - int ret; 142 153 143 - mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL); 144 - if (!mdp4_dsi_encoder) { 145 - ret = -ENOMEM; 146 - goto fail; 147 - } 154 + mdp4_dsi_encoder = drmm_encoder_alloc(dev, struct mdp4_dsi_encoder, base, 155 + NULL, DRM_MODE_ENCODER_DSI, NULL); 156 + if (IS_ERR(mdp4_dsi_encoder)) 157 + return ERR_CAST(mdp4_dsi_encoder); 148 158 149 159 encoder = &mdp4_dsi_encoder->base; 150 160 151 - drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs, 152 - DRM_MODE_ENCODER_DSI, NULL); 153 161 drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs); 154 162 155 163 return encoder; 156 - 157 - fail: 158 - if (encoder) 159 - mdp4_dsi_encoder_destroy(encoder); 160 - 161 - return ERR_PTR(ret); 162 164 } 163 165 #endif /* CONFIG_DRM_MSM_DSI */
+7 -30
drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
··· 25 25 return to_mdp4_kms(to_mdp_kms(priv->kms)); 26 26 } 27 27 28 - static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder) 29 - { 30 - struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); 31 - drm_encoder_cleanup(encoder); 32 - kfree(mdp4_dtv_encoder); 33 - } 34 - 35 - static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = { 36 - .destroy = mdp4_dtv_encoder_destroy, 37 - }; 38 - 39 28 static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder, 40 29 struct drm_display_mode *mode, 41 30 struct drm_display_mode *adjusted_mode) ··· 162 173 /* initialize encoder */ 163 174 struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) 164 175 { 165 - struct drm_encoder *encoder = NULL; 176 + struct drm_encoder *encoder; 166 177 struct mdp4_dtv_encoder *mdp4_dtv_encoder; 167 - int ret; 168 178 169 - mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL); 170 - if (!mdp4_dtv_encoder) { 171 - ret = -ENOMEM; 172 - goto fail; 173 - } 179 + mdp4_dtv_encoder = drmm_encoder_alloc(dev, struct mdp4_dtv_encoder, base, 180 + NULL, DRM_MODE_ENCODER_TMDS, NULL); 181 + if (IS_ERR(mdp4_dtv_encoder)) 182 + return ERR_CAST(mdp4_dtv_encoder); 174 183 175 184 encoder = &mdp4_dtv_encoder->base; 176 185 177 - drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs, 178 - DRM_MODE_ENCODER_TMDS, NULL); 179 186 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); 180 187 181 188 mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk"); 182 189 if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) { 183 190 DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n"); 184 - ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk); 185 - goto fail; 191 + return ERR_CAST(mdp4_dtv_encoder->hdmi_clk); 186 192 } 187 193 188 194 mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk"); 189 195 if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) { 190 196 DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n"); 191 - ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk); 192 - goto fail; 197 + return ERR_CAST(mdp4_dtv_encoder->mdp_clk); 193 198 } 194 199 195 200 return encoder; 196 - 197 - fail: 198 - if (encoder) 199 - mdp4_dtv_encoder_destroy(encoder); 200 - 201 - return ERR_PTR(ret); 202 201 }
+22 -65
drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
··· 18 18 struct drm_panel *panel; 19 19 struct clk *lcdc_clk; 20 20 unsigned long int pixclock; 21 - struct regulator *regs[3]; 21 + struct regulator_bulk_data regs[3]; 22 22 bool enabled; 23 23 uint32_t bsc; 24 24 }; ··· 29 29 struct msm_drm_private *priv = encoder->dev->dev_private; 30 30 return to_mdp4_kms(to_mdp_kms(priv->kms)); 31 31 } 32 - 33 - static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder) 34 - { 35 - struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = 36 - to_mdp4_lcdc_encoder(encoder); 37 - drm_encoder_cleanup(encoder); 38 - kfree(mdp4_lcdc_encoder); 39 - } 40 - 41 - static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = { 42 - .destroy = mdp4_lcdc_encoder_destroy, 43 - }; 44 32 45 33 /* this should probably be a helper: */ 46 34 static struct drm_connector *get_connector(struct drm_encoder *encoder) ··· 259 271 260 272 static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) 261 273 { 262 - struct drm_device *dev = encoder->dev; 263 274 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = 264 275 to_mdp4_lcdc_encoder(encoder); 265 276 struct mdp4_kms *mdp4_kms = get_kms(encoder); 266 277 struct drm_panel *panel; 267 - int i, ret; 268 278 269 279 if (WARN_ON(!mdp4_lcdc_encoder->enabled)) 270 280 return; ··· 287 301 288 302 clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk); 289 303 290 - for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { 291 - ret = regulator_disable(mdp4_lcdc_encoder->regs[i]); 292 - if (ret) 293 - DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret); 294 - } 304 + regulator_bulk_disable(ARRAY_SIZE(mdp4_lcdc_encoder->regs), 305 + mdp4_lcdc_encoder->regs); 295 306 296 307 mdp4_lcdc_encoder->enabled = false; 297 308 } ··· 302 319 struct mdp4_kms *mdp4_kms = get_kms(encoder); 303 320 struct drm_panel *panel; 304 321 uint32_t config; 305 - int i, ret; 322 + int ret; 306 323 307 324 if (WARN_ON(mdp4_lcdc_encoder->enabled)) 308 325 return; ··· 322 339 mdp4_crtc_set_config(encoder->crtc, config); 323 340 mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0); 324 341 325 - for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { 326 - ret = regulator_enable(mdp4_lcdc_encoder->regs[i]); 327 - if (ret) 328 - DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret); 329 - } 342 + ret = regulator_bulk_enable(ARRAY_SIZE(mdp4_lcdc_encoder->regs), 343 + mdp4_lcdc_encoder->regs); 344 + if (ret) 345 + DRM_DEV_ERROR(dev->dev, "failed to enable regulators: %d\n", ret); 330 346 331 347 DBG("setting lcdc_clk=%lu", pc); 332 348 ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc); ··· 365 383 struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, 366 384 struct device_node *panel_node) 367 385 { 368 - struct drm_encoder *encoder = NULL; 386 + struct drm_encoder *encoder; 369 387 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder; 370 - struct regulator *reg; 371 388 int ret; 372 389 373 - mdp4_lcdc_encoder = kzalloc(sizeof(*mdp4_lcdc_encoder), GFP_KERNEL); 374 - if (!mdp4_lcdc_encoder) { 375 - ret = -ENOMEM; 376 - goto fail; 377 - } 390 + mdp4_lcdc_encoder = drmm_encoder_alloc(dev, struct mdp4_lcdc_encoder, base, 391 + NULL, DRM_MODE_ENCODER_LVDS, NULL); 392 + if (IS_ERR(mdp4_lcdc_encoder)) 393 + return ERR_CAST(mdp4_lcdc_encoder); 378 394 379 395 mdp4_lcdc_encoder->panel_node = panel_node; 380 396 381 397 encoder = &mdp4_lcdc_encoder->base; 382 398 383 - drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs, 384 - DRM_MODE_ENCODER_LVDS, NULL); 385 399 drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs); 386 400 387 401 /* TODO: do we need different pll in other cases? */ 388 402 mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev); 389 403 if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) { 390 404 DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n"); 391 - ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk); 392 - goto fail; 405 + return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk); 393 406 } 394 407 395 408 /* TODO: different regulators in other cases? */ 396 - reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v"); 397 - if (IS_ERR(reg)) { 398 - ret = PTR_ERR(reg); 399 - DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret); 400 - goto fail; 401 - } 402 - mdp4_lcdc_encoder->regs[0] = reg; 409 + mdp4_lcdc_encoder->regs[0].supply = "lvds-vccs-3p3v"; 410 + mdp4_lcdc_encoder->regs[1].supply = "lvds-vccs-3p3v"; 411 + mdp4_lcdc_encoder->regs[2].supply = "lvds-vdda"; 403 412 404 - reg = devm_regulator_get(dev->dev, "lvds-pll-vdda"); 405 - if (IS_ERR(reg)) { 406 - ret = PTR_ERR(reg); 407 - DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret); 408 - goto fail; 409 - } 410 - mdp4_lcdc_encoder->regs[1] = reg; 411 - 412 - reg = devm_regulator_get(dev->dev, "lvds-vdda"); 413 - if (IS_ERR(reg)) { 414 - ret = PTR_ERR(reg); 415 - DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret); 416 - goto fail; 417 - } 418 - mdp4_lcdc_encoder->regs[2] = reg; 413 + ret = devm_regulator_bulk_get(dev->dev, 414 + ARRAY_SIZE(mdp4_lcdc_encoder->regs), 415 + mdp4_lcdc_encoder->regs); 416 + if (ret) 417 + return ERR_PTR(ret); 419 418 420 419 return encoder; 421 - 422 - fail: 423 - if (encoder) 424 - mdp4_lcdc_encoder_destroy(encoder); 425 - 426 - return ERR_PTR(ret); 427 420 }
+5 -19
drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
··· 1350 1350 return cfg_handler->revision; 1351 1351 } 1352 1352 1353 - void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler) 1354 - { 1355 - kfree(cfg_handler); 1356 - } 1357 - 1358 1353 struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, 1359 1354 uint32_t major, uint32_t minor) 1360 1355 { 1361 1356 struct drm_device *dev = mdp5_kms->dev; 1362 1357 struct mdp5_cfg_handler *cfg_handler; 1363 1358 const struct mdp5_cfg_handler *cfg_handlers; 1364 - int i, ret = 0, num_handlers; 1359 + int i, num_handlers; 1365 1360 1366 - cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL); 1361 + cfg_handler = devm_kzalloc(dev->dev, sizeof(*cfg_handler), GFP_KERNEL); 1367 1362 if (unlikely(!cfg_handler)) { 1368 - ret = -ENOMEM; 1369 - goto fail; 1363 + return ERR_PTR(-ENOMEM); 1370 1364 } 1371 1365 1372 1366 switch (major) { ··· 1375 1381 default: 1376 1382 DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n", 1377 1383 major, minor); 1378 - ret = -ENXIO; 1379 - goto fail; 1384 + return ERR_PTR(-ENXIO); 1380 1385 } 1381 1386 1382 1387 /* only after mdp5_cfg global pointer's init can we access the hw */ ··· 1389 1396 if (unlikely(!mdp5_cfg)) { 1390 1397 DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n", 1391 1398 major, minor); 1392 - ret = -ENXIO; 1393 - goto fail; 1399 + return ERR_PTR(-ENXIO); 1394 1400 } 1395 1401 1396 1402 cfg_handler->revision = minor; ··· 1398 1406 DBG("MDP5: %s hw config selected", mdp5_cfg->name); 1399 1407 1400 1408 return cfg_handler; 1401 - 1402 - fail: 1403 - if (cfg_handler) 1404 - mdp5_cfg_destroy(cfg_handler); 1405 - 1406 - return ERR_PTR(ret); 1407 1409 }
-1
drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
··· 121 121 122 122 struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, 123 123 uint32_t major, uint32_t minor); 124 - void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd); 125 124 126 125 #endif /* __MDP5_CFG_H__ */
+15 -15
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
··· 13 13 #include <drm/drm_crtc.h> 14 14 #include <drm/drm_flip_work.h> 15 15 #include <drm/drm_fourcc.h> 16 + #include <drm/drm_managed.h> 16 17 #include <drm/drm_probe_helper.h> 17 18 #include <drm/drm_vblank.h> 18 19 ··· 173 172 drm_gem_object_put(val); 174 173 } 175 174 176 - static void mdp5_crtc_destroy(struct drm_crtc *crtc) 175 + static void mdp5_crtc_flip_cleanup(struct drm_device *dev, void *ptr) 177 176 { 178 - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 177 + struct mdp5_crtc *mdp5_crtc = ptr; 179 178 180 - drm_crtc_cleanup(crtc); 181 179 drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work); 182 - 183 - kfree(mdp5_crtc); 184 180 } 185 181 186 182 static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage) ··· 1145 1147 1146 1148 static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { 1147 1149 .set_config = drm_atomic_helper_set_config, 1148 - .destroy = mdp5_crtc_destroy, 1149 1150 .page_flip = drm_atomic_helper_page_flip, 1150 1151 .reset = mdp5_crtc_reset, 1151 1152 .atomic_duplicate_state = mdp5_crtc_duplicate_state, ··· 1158 1161 1159 1162 static const struct drm_crtc_funcs mdp5_crtc_funcs = { 1160 1163 .set_config = drm_atomic_helper_set_config, 1161 - .destroy = mdp5_crtc_destroy, 1162 1164 .page_flip = drm_atomic_helper_page_flip, 1163 1165 .reset = mdp5_crtc_reset, 1164 1166 .atomic_duplicate_state = mdp5_crtc_duplicate_state, ··· 1323 1327 { 1324 1328 struct drm_crtc *crtc = NULL; 1325 1329 struct mdp5_crtc *mdp5_crtc; 1330 + int ret; 1326 1331 1327 - mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL); 1328 - if (!mdp5_crtc) 1329 - return ERR_PTR(-ENOMEM); 1332 + mdp5_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp5_crtc, base, 1333 + plane, cursor_plane, 1334 + cursor_plane ? 1335 + &mdp5_crtc_no_lm_cursor_funcs : 1336 + &mdp5_crtc_funcs, 1337 + NULL); 1338 + if (IS_ERR(mdp5_crtc)) 1339 + return ERR_CAST(mdp5_crtc); 1330 1340 1331 1341 crtc = &mdp5_crtc->base; 1332 1342 ··· 1348 1346 1349 1347 mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true; 1350 1348 1351 - drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, 1352 - cursor_plane ? 1353 - &mdp5_crtc_no_lm_cursor_funcs : 1354 - &mdp5_crtc_funcs, NULL); 1355 - 1356 1349 drm_flip_work_init(&mdp5_crtc->unref_cursor_work, 1357 1350 "unref cursor", unref_cursor_worker); 1351 + ret = drmm_add_action_or_reset(dev, mdp5_crtc_flip_cleanup, mdp5_crtc); 1352 + if (ret) 1353 + return ERR_PTR(ret); 1358 1354 1359 1355 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); 1360 1356
+4 -17
drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
··· 681 681 } 682 682 } 683 683 684 - void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr) 685 - { 686 - kfree(ctl_mgr); 687 - } 688 - 689 684 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, 690 685 void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd) 691 686 { ··· 692 697 unsigned long flags; 693 698 int c, ret; 694 699 695 - ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL); 700 + ctl_mgr = devm_kzalloc(dev->dev, sizeof(*ctl_mgr), GFP_KERNEL); 696 701 if (!ctl_mgr) { 697 702 DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n"); 698 - ret = -ENOMEM; 699 - goto fail; 703 + return ERR_PTR(-ENOMEM); 700 704 } 701 705 702 706 if (WARN_ON(ctl_cfg->count > MAX_CTL)) { 703 707 DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n", 704 708 ctl_cfg->count); 705 - ret = -ENOSPC; 706 - goto fail; 709 + return ERR_PTR(-ENOSPC); 707 710 } 708 711 709 712 /* initialize the CTL manager: */ ··· 720 727 DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c); 721 728 ret = -EINVAL; 722 729 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); 723 - goto fail; 730 + return ERR_PTR(ret); 724 731 } 725 732 ctl->ctlm = ctl_mgr; 726 733 ctl->id = c; ··· 748 755 DBG("Pool of %d CTLs created.", ctl_mgr->nctl); 749 756 750 757 return ctl_mgr; 751 - 752 - fail: 753 - if (ctl_mgr) 754 - mdp5_ctlm_destroy(ctl_mgr); 755 - 756 - return ERR_PTR(ret); 757 758 }
-1
drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
··· 17 17 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, 18 18 void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd); 19 19 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm); 20 - void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); 21 20 22 21 /* 23 22 * CTL prototypes:
+4 -25
drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
··· 16 16 return to_mdp5_kms(to_mdp_kms(priv->kms)); 17 17 } 18 18 19 - static void mdp5_encoder_destroy(struct drm_encoder *encoder) 20 - { 21 - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 22 - drm_encoder_cleanup(encoder); 23 - kfree(mdp5_encoder); 24 - } 25 - 26 - static const struct drm_encoder_funcs mdp5_encoder_funcs = { 27 - .destroy = mdp5_encoder_destroy, 28 - }; 29 - 30 19 static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder, 31 20 struct drm_display_mode *mode, 32 21 struct drm_display_mode *adjusted_mode) ··· 331 342 struct mdp5_encoder *mdp5_encoder; 332 343 int enc_type = (intf->type == INTF_DSI) ? 333 344 DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS; 334 - int ret; 335 345 336 - mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL); 337 - if (!mdp5_encoder) { 338 - ret = -ENOMEM; 339 - goto fail; 340 - } 346 + mdp5_encoder = drmm_encoder_alloc(dev, struct mdp5_encoder, base, 347 + NULL, enc_type, NULL); 348 + if (IS_ERR(mdp5_encoder)) 349 + return ERR_CAST(mdp5_encoder); 341 350 342 351 encoder = &mdp5_encoder->base; 343 352 mdp5_encoder->ctl = ctl; ··· 343 356 344 357 spin_lock_init(&mdp5_encoder->intf_lock); 345 358 346 - drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL); 347 - 348 359 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); 349 360 350 361 return encoder; 351 - 352 - fail: 353 - if (encoder) 354 - mdp5_encoder_destroy(encoder); 355 - 356 - return ERR_PTR(ret); 357 362 }
+3 -22
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
··· 209 209 { 210 210 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 211 211 struct msm_gem_address_space *aspace = kms->aspace; 212 - int i; 213 - 214 - for (i = 0; i < mdp5_kms->num_hwmixers; i++) 215 - mdp5_mixer_destroy(mdp5_kms->hwmixers[i]); 216 - 217 - for (i = 0; i < mdp5_kms->num_hwpipes; i++) 218 - mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); 219 212 220 213 if (aspace) { 221 214 aspace->mmu->funcs->detach(aspace->mmu); ··· 616 623 617 624 static void mdp5_destroy(struct mdp5_kms *mdp5_kms) 618 625 { 619 - int i; 620 - 621 - if (mdp5_kms->ctlm) 622 - mdp5_ctlm_destroy(mdp5_kms->ctlm); 623 - if (mdp5_kms->smp) 624 - mdp5_smp_destroy(mdp5_kms->smp); 625 - if (mdp5_kms->cfg) 626 - mdp5_cfg_destroy(mdp5_kms->cfg); 627 - 628 - for (i = 0; i < mdp5_kms->num_intfs; i++) 629 - kfree(mdp5_kms->intfs[i]); 630 - 631 626 if (mdp5_kms->rpm_enabled) 632 627 pm_runtime_disable(&mdp5_kms->pdev->dev); 633 628 ··· 633 652 for (i = 0; i < cnt; i++) { 634 653 struct mdp5_hw_pipe *hwpipe; 635 654 636 - hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps); 655 + hwpipe = mdp5_pipe_init(dev, pipes[i], offsets[i], caps); 637 656 if (IS_ERR(hwpipe)) { 638 657 ret = PTR_ERR(hwpipe); 639 658 DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n", ··· 705 724 for (i = 0; i < hw_cfg->lm.count; i++) { 706 725 struct mdp5_hw_mixer *mixer; 707 726 708 - mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]); 727 + mixer = mdp5_mixer_init(dev, &hw_cfg->lm.instances[i]); 709 728 if (IS_ERR(mixer)) { 710 729 ret = PTR_ERR(mixer); 711 730 DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n", ··· 736 755 if (intf_types[i] == INTF_DISABLED) 737 756 continue; 738 757 739 - intf = kzalloc(sizeof(*intf), GFP_KERNEL); 758 + intf = devm_kzalloc(dev->dev, sizeof(*intf), GFP_KERNEL); 740 759 if (!intf) { 741 760 DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i); 742 761 return -ENOMEM;
+3 -7
drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
··· 140 140 return 0; 141 141 } 142 142 143 - void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer) 144 - { 145 - kfree(mixer); 146 - } 147 - 148 143 static const char * const mixer_names[] = { 149 144 "LM0", "LM1", "LM2", "LM3", "LM4", "LM5", 150 145 }; 151 146 152 - struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm) 147 + struct mdp5_hw_mixer *mdp5_mixer_init(struct drm_device *dev, 148 + const struct mdp5_lm_instance *lm) 153 149 { 154 150 struct mdp5_hw_mixer *mixer; 155 151 156 - mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); 152 + mixer = devm_kzalloc(dev->dev, sizeof(*mixer), GFP_KERNEL); 157 153 if (!mixer) 158 154 return ERR_PTR(-ENOMEM); 159 155
+2 -2
drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
··· 25 25 struct drm_crtc *hwmixer_to_crtc[8]; 26 26 }; 27 27 28 - struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm); 29 - void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm); 28 + struct mdp5_hw_mixer *mdp5_mixer_init(struct drm_device *dev, 29 + const struct mdp5_lm_instance *lm); 30 30 int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, 31 31 uint32_t caps, struct mdp5_hw_mixer **mixer, 32 32 struct mdp5_hw_mixer **r_mixer);
+3 -7
drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
··· 151 151 return 0; 152 152 } 153 153 154 - void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe) 155 - { 156 - kfree(hwpipe); 157 - } 158 - 159 - struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, 154 + struct mdp5_hw_pipe *mdp5_pipe_init(struct drm_device *dev, 155 + enum mdp5_pipe pipe, 160 156 uint32_t reg_offset, uint32_t caps) 161 157 { 162 158 struct mdp5_hw_pipe *hwpipe; 163 159 164 - hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL); 160 + hwpipe = devm_kzalloc(dev->dev, sizeof(*hwpipe), GFP_KERNEL); 165 161 if (!hwpipe) 166 162 return ERR_PTR(-ENOMEM); 167 163
+2 -2
drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
··· 39 39 struct mdp5_hw_pipe **r_hwpipe); 40 40 int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe); 41 41 42 - struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, 42 + struct mdp5_hw_pipe *mdp5_pipe_init(struct drm_device *dev, 43 + enum mdp5_pipe pipe, 43 44 uint32_t reg_offset, uint32_t caps); 44 - void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe); 45 45 46 46 #endif /* __MDP5_PIPE_H__ */
+4 -15
drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
··· 370 370 drm_modeset_unlock(&mdp5_kms->glob_state_lock); 371 371 } 372 372 373 - void mdp5_smp_destroy(struct mdp5_smp *smp) 374 - { 375 - kfree(smp); 376 - } 377 373 378 374 struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg) 379 375 { 376 + struct drm_device *dev = mdp5_kms->dev; 380 377 struct mdp5_smp_state *state; 381 378 struct mdp5_global_state *global_state; 382 379 struct mdp5_smp *smp; 383 - int ret; 384 380 385 - smp = kzalloc(sizeof(*smp), GFP_KERNEL); 386 - if (unlikely(!smp)) { 387 - ret = -ENOMEM; 388 - goto fail; 389 - } 381 + smp = devm_kzalloc(dev->dev, sizeof(*smp), GFP_KERNEL); 382 + if (unlikely(!smp)) 383 + return ERR_PTR(-ENOMEM); 390 384 391 385 smp->dev = mdp5_kms->dev; 392 386 smp->blk_cnt = cfg->mmb_count; ··· 394 400 memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved)); 395 401 396 402 return smp; 397 - fail: 398 - if (smp) 399 - mdp5_smp_destroy(smp); 400 - 401 - return ERR_PTR(ret); 402 403 }
-1
drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
··· 68 68 69 69 struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, 70 70 const struct mdp5_smp_block *cfg); 71 - void mdp5_smp_destroy(struct mdp5_smp *smp); 72 71 73 72 void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p); 74 73
+32 -7
drivers/gpu/drm/msm/dp/dp_aux.c
··· 291 291 return -EINVAL; 292 292 } 293 293 294 + ret = pm_runtime_resume_and_get(dp_aux->dev); 295 + if (ret) 296 + return ret; 297 + 294 298 mutex_lock(&aux->mutex); 295 299 if (!aux->initted) { 296 300 ret = -EIO; ··· 368 364 369 365 exit: 370 366 mutex_unlock(&aux->mutex); 367 + pm_runtime_put_sync(dp_aux->dev); 371 368 372 369 return ret; 373 370 } ··· 479 474 480 475 int dp_aux_register(struct drm_dp_aux *dp_aux) 481 476 { 482 - struct dp_aux_private *aux; 483 477 int ret; 484 478 485 479 if (!dp_aux) { ··· 486 482 return -EINVAL; 487 483 } 488 484 489 - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); 490 - 491 - aux->dp_aux.name = "dpu_dp_aux"; 492 - aux->dp_aux.dev = aux->dev; 493 - aux->dp_aux.transfer = dp_aux_transfer; 494 - ret = drm_dp_aux_register(&aux->dp_aux); 485 + ret = drm_dp_aux_register(dp_aux); 495 486 if (ret) { 496 487 DRM_ERROR("%s: failed to register drm aux: %d\n", __func__, 497 488 ret); ··· 499 500 void dp_aux_unregister(struct drm_dp_aux *dp_aux) 500 501 { 501 502 drm_dp_aux_unregister(dp_aux); 503 + } 504 + 505 + static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux, 506 + unsigned long wait_us) 507 + { 508 + int ret; 509 + struct dp_aux_private *aux; 510 + 511 + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); 512 + 513 + pm_runtime_get_sync(aux->dev); 514 + ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog); 515 + pm_runtime_put_sync(aux->dev); 516 + 517 + return ret; 502 518 } 503 519 504 520 struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, ··· 538 524 aux->dev = dev; 539 525 aux->catalog = catalog; 540 526 aux->retry_cnt = 0; 527 + 528 + /* 529 + * Use the drm_dp_aux_init() to use the aux adapter 530 + * before registering AUX with the DRM device so that 531 + * msm eDP panel can be detected by generic_dep_panel_probe(). 532 + */ 533 + aux->dp_aux.name = "dpu_dp_aux"; 534 + aux->dp_aux.dev = dev; 535 + aux->dp_aux.transfer = dp_aux_transfer; 536 + aux->dp_aux.wait_hpd_asserted = dp_wait_hpd_asserted; 537 + drm_dp_aux_init(&aux->dp_aux); 541 538 542 539 return &aux->dp_aux; 543 540 }
+16 -53
drivers/gpu/drm/msm/dp/dp_debug.c
··· 19 19 #define DEBUG_NAME "msm_dp" 20 20 21 21 struct dp_debug_private { 22 - struct dentry *root; 23 - 24 22 struct dp_link *link; 25 23 struct dp_panel *panel; 26 24 struct drm_connector *connector; 27 - struct device *dev; 28 - struct drm_device *drm_dev; 29 25 30 26 struct dp_debug dp_debug; 31 27 }; ··· 200 204 .write = dp_test_active_write 201 205 }; 202 206 203 - static void dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor) 207 + static void dp_debug_init(struct dp_debug *dp_debug, struct dentry *root, bool is_edp) 204 208 { 205 - char path[64]; 206 209 struct dp_debug_private *debug = container_of(dp_debug, 207 210 struct dp_debug_private, dp_debug); 208 211 209 - snprintf(path, sizeof(path), "msm_dp-%s", debug->connector->name); 210 - 211 - debug->root = debugfs_create_dir(path, minor->debugfs_root); 212 - 213 - debugfs_create_file("dp_debug", 0444, debug->root, 212 + debugfs_create_file("dp_debug", 0444, root, 214 213 debug, &dp_debug_fops); 215 214 216 - debugfs_create_file("msm_dp_test_active", 0444, 217 - debug->root, 218 - debug, &test_active_fops); 215 + if (!is_edp) { 216 + debugfs_create_file("msm_dp_test_active", 0444, 217 + root, 218 + debug, &test_active_fops); 219 219 220 - debugfs_create_file("msm_dp_test_data", 0444, 221 - debug->root, 222 - debug, &dp_test_data_fops); 220 + debugfs_create_file("msm_dp_test_data", 0444, 221 + root, 222 + debug, &dp_test_data_fops); 223 223 224 - debugfs_create_file("msm_dp_test_type", 0444, 225 - debug->root, 226 - debug, &dp_test_type_fops); 224 + debugfs_create_file("msm_dp_test_type", 0444, 225 + root, 226 + debug, &dp_test_type_fops); 227 + } 227 228 } 228 229 229 230 struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, 230 231 struct dp_link *link, 231 - struct drm_connector *connector, struct drm_minor *minor) 232 + struct drm_connector *connector, 233 + struct dentry *root, bool is_edp) 232 234 { 233 235 struct dp_debug_private *debug; 234 236 struct dp_debug *dp_debug; ··· 247 253 debug->dp_debug.debug_en = false; 248 254 debug->link = link; 249 255 debug->panel = panel; 250 - debug->dev = dev; 251 - debug->drm_dev = minor->dev; 252 - debug->connector = connector; 253 256 254 257 dp_debug = &debug->dp_debug; 255 258 dp_debug->vdisplay = 0; 256 259 dp_debug->hdisplay = 0; 257 260 dp_debug->vrefresh = 0; 258 261 259 - dp_debug_init(dp_debug, minor); 262 + dp_debug_init(dp_debug, root, is_edp); 260 263 261 264 return dp_debug; 262 265 error: 263 266 return ERR_PTR(rc); 264 - } 265 - 266 - static int dp_debug_deinit(struct dp_debug *dp_debug) 267 - { 268 - struct dp_debug_private *debug; 269 - 270 - if (!dp_debug) 271 - return -EINVAL; 272 - 273 - debug = container_of(dp_debug, struct dp_debug_private, dp_debug); 274 - 275 - debugfs_remove_recursive(debug->root); 276 - 277 - return 0; 278 - } 279 - 280 - void dp_debug_put(struct dp_debug *dp_debug) 281 - { 282 - struct dp_debug_private *debug; 283 - 284 - if (!dp_debug) 285 - return; 286 - 287 - debug = container_of(dp_debug, struct dp_debug_private, dp_debug); 288 - 289 - dp_debug_deinit(dp_debug); 290 - 291 - devm_kfree(debug->dev, debug); 292 267 }
+7 -16
drivers/gpu/drm/msm/dp/dp_debug.h
··· 34 34 * @panel: instance of panel module 35 35 * @link: instance of link module 36 36 * @connector: double pointer to display connector 37 - * @minor: pointer to drm minor number after device registration 37 + * @root: connector's debugfs root 38 + * @is_edp: set for eDP connectors / panels 38 39 * return: pointer to allocated debug module data 39 40 * 40 41 * This function sets up the debug module and provides a way ··· 44 43 struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, 45 44 struct dp_link *link, 46 45 struct drm_connector *connector, 47 - struct drm_minor *minor); 48 - 49 - /** 50 - * dp_debug_put() 51 - * 52 - * Cleans up dp_debug instance 53 - * 54 - * @dp_debug: instance of dp_debug 55 - */ 56 - void dp_debug_put(struct dp_debug *dp_debug); 46 + struct dentry *root, 47 + bool is_edp); 57 48 58 49 #else 59 50 60 51 static inline 61 52 struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, 62 53 struct dp_link *link, 63 - struct drm_connector *connector, struct drm_minor *minor) 54 + struct drm_connector *connector, 55 + struct dentry *root, 56 + bool is_edp) 64 57 { 65 58 return ERR_PTR(-EINVAL); 66 - } 67 - 68 - static inline void dp_debug_put(struct dp_debug *dp_debug) 69 - { 70 59 } 71 60 72 61 #endif /* defined(CONFIG_DEBUG_FS) */
+146 -223
drivers/gpu/drm/msm/dp/dp_display.c
··· 49 49 ST_CONNECTED, 50 50 ST_DISCONNECT_PENDING, 51 51 ST_DISPLAY_OFF, 52 - ST_SUSPENDED, 53 52 }; 54 53 55 54 enum { 56 55 EV_NO_EVENT, 57 56 /* hpd events */ 58 - EV_HPD_INIT_SETUP, 59 57 EV_HPD_PLUG_INT, 60 58 EV_IRQ_HPD_INT, 61 59 EV_HPD_UNPLUG_INT, ··· 168 170 {} 169 171 }; 170 172 173 + static const struct msm_dp_desc sm8650_dp_descs[] = { 174 + { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, 175 + {} 176 + }; 177 + 171 178 static const struct of_device_id dp_dt_match[] = { 172 179 { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_descs }, 173 180 { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_descs }, ··· 183 180 { .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_edp_descs }, 184 181 { .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs }, 185 182 { .compatible = "qcom,sm8350-dp", .data = &sm8350_dp_descs }, 183 + { .compatible = "qcom,sm8650-dp", .data = &sm8650_dp_descs }, 186 184 {} 187 185 }; 188 186 ··· 279 275 dp->dp_display.drm_dev = drm; 280 276 priv->dp[dp->id] = &dp->dp_display; 281 277 282 - rc = dp->parser->parse(dp->parser); 283 - if (rc) { 284 - DRM_ERROR("device tree parsing failed\n"); 285 - goto end; 286 - } 287 278 288 279 289 280 dp->drm_dev = drm; ··· 289 290 goto end; 290 291 } 291 292 292 - rc = dp_power_client_init(dp->power); 293 - if (rc) { 294 - DRM_ERROR("Power client create failed\n"); 295 - goto end; 296 - } 297 293 298 294 rc = dp_register_audio_driver(dev, dp->audio); 299 295 if (rc) { ··· 313 319 struct dp_display_private *dp = dev_get_dp_display_private(dev); 314 320 struct msm_drm_private *priv = dev_get_drvdata(master); 315 321 316 - /* disable all HPD interrupts */ 317 - if (dp->core_initialized) 318 - dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); 319 - 320 322 kthread_stop(dp->ev_tsk); 321 323 322 324 of_dp_aux_depopulate_bus(dp->aux); 323 325 324 - dp_power_client_deinit(dp->power); 325 326 dp_unregister_audio_driver(dev, dp->audio); 326 327 dp_aux_unregister(dp->aux); 327 328 dp->drm_dev = NULL; ··· 329 340 .unbind = dp_display_unbind, 330 341 }; 331 342 332 - static void dp_display_send_hpd_event(struct msm_dp *dp_display) 333 - { 334 - struct dp_display_private *dp; 335 - struct drm_connector *connector; 336 - 337 - dp = container_of(dp_display, struct dp_display_private, dp_display); 338 - 339 - connector = dp->dp_display.connector; 340 - drm_helper_hpd_irq_event(connector->dev); 341 - } 342 - 343 - 344 343 static int dp_display_send_hpd_notification(struct dp_display_private *dp, 345 344 bool hpd) 346 345 { 347 - if ((hpd && dp->dp_display.is_connected) || 348 - (!hpd && !dp->dp_display.is_connected)) { 349 - drm_dbg_dp(dp->drm_dev, "HPD already %s\n", 350 - (hpd ? "on" : "off")); 351 - return 0; 352 - } 346 + struct drm_bridge *bridge = dp->dp_display.bridge; 353 347 354 348 /* reset video pattern flag on disconnect */ 355 349 if (!hpd) { ··· 344 372 dp->panel->downstream_ports); 345 373 } 346 374 347 - dp->dp_display.is_connected = hpd; 375 + dp->dp_display.link_ready = hpd; 348 376 349 377 drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n", 350 378 dp->dp_display.connector_type, hpd); 351 - dp_display_send_hpd_event(&dp->dp_display); 379 + drm_bridge_hpd_notify(bridge, dp->dp_display.link_ready); 352 380 353 381 return 0; 354 382 } ··· 547 575 { 548 576 u32 state; 549 577 int ret; 578 + struct platform_device *pdev = dp->dp_display.pdev; 550 579 551 580 mutex_lock(&dp->event_mutex); 552 581 ··· 555 582 drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", 556 583 dp->dp_display.connector_type, state); 557 584 558 - if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) { 585 + if (state == ST_DISPLAY_OFF) { 559 586 mutex_unlock(&dp->event_mutex); 560 587 return 0; 561 588 } ··· 572 599 return 0; 573 600 } 574 601 575 - ret = dp_display_usbpd_configure_cb(&dp->dp_display.pdev->dev); 602 + ret = pm_runtime_resume_and_get(&pdev->dev); 603 + if (ret) { 604 + DRM_ERROR("failed to pm_runtime_resume\n"); 605 + mutex_unlock(&dp->event_mutex); 606 + return ret; 607 + } 608 + 609 + ret = dp_display_usbpd_configure_cb(&pdev->dev); 576 610 if (ret) { /* link train failed */ 577 611 dp->hpd_state = ST_DISCONNECTED; 578 612 } else { ··· 611 631 static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) 612 632 { 613 633 u32 state; 634 + struct platform_device *pdev = dp->dp_display.pdev; 614 635 615 636 mutex_lock(&dp->event_mutex); 616 637 ··· 662 681 dp->dp_display.connector_type, state); 663 682 664 683 /* uevent will complete disconnection part */ 684 + pm_runtime_put_sync(&pdev->dev); 665 685 mutex_unlock(&dp->event_mutex); 666 686 return 0; 667 687 } ··· 678 696 drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", 679 697 dp->dp_display.connector_type, state); 680 698 681 - if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) { 699 + if (state == ST_DISPLAY_OFF) { 682 700 mutex_unlock(&dp->event_mutex); 683 701 return 0; 684 702 } ··· 702 720 703 721 static void dp_display_deinit_sub_modules(struct dp_display_private *dp) 704 722 { 705 - dp_debug_put(dp->debug); 706 723 dp_audio_put(dp->audio); 707 724 dp_panel_put(dp->panel); 708 725 dp_aux_put(dp->aux); ··· 899 918 900 919 dp_display->plugged_cb = fn; 901 920 dp_display->codec_dev = codec_dev; 902 - plugged = dp_display->is_connected; 921 + plugged = dp_display->link_ready; 903 922 dp_display_handle_plugged_change(dp_display, plugged); 904 923 905 924 return 0; ··· 1089 1108 spin_unlock_irqrestore(&dp_priv->event_lock, flag); 1090 1109 1091 1110 switch (todo->event_id) { 1092 - case EV_HPD_INIT_SETUP: 1093 - dp_display_host_init(dp_priv); 1094 - break; 1095 1111 case EV_HPD_PLUG_INT: 1096 1112 dp_hpd_plug_handle(dp_priv, todo->data); 1097 1113 break; ··· 1167 1189 return ret; 1168 1190 } 1169 1191 1170 - int dp_display_request_irq(struct msm_dp *dp_display) 1192 + static int dp_display_request_irq(struct dp_display_private *dp) 1171 1193 { 1172 1194 int rc = 0; 1173 - struct dp_display_private *dp; 1195 + struct platform_device *pdev = dp->dp_display.pdev; 1174 1196 1175 - if (!dp_display) { 1176 - DRM_ERROR("invalid input\n"); 1177 - return -EINVAL; 1178 - } 1179 - 1180 - dp = container_of(dp_display, struct dp_display_private, dp_display); 1181 - 1182 - dp->irq = irq_of_parse_and_map(dp->dp_display.pdev->dev.of_node, 0); 1183 - if (!dp->irq) { 1197 + dp->irq = platform_get_irq(pdev, 0); 1198 + if (dp->irq < 0) { 1184 1199 DRM_ERROR("failed to get irq\n"); 1185 - return -EINVAL; 1200 + return dp->irq; 1186 1201 } 1187 1202 1188 - rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq, 1189 - dp_display_irq_handler, 1190 - IRQF_TRIGGER_HIGH, "dp_display_isr", dp); 1203 + rc = devm_request_irq(&pdev->dev, dp->irq, dp_display_irq_handler, 1204 + IRQF_TRIGGER_HIGH|IRQF_NO_AUTOEN, 1205 + "dp_display_isr", dp); 1206 + 1191 1207 if (rc < 0) { 1192 1208 DRM_ERROR("failed to request IRQ%u: %d\n", 1193 1209 dp->irq, rc); ··· 1208 1236 1209 1237 dev_err(&pdev->dev, "unknown displayport instance\n"); 1210 1238 return NULL; 1239 + } 1240 + 1241 + static int dp_display_get_next_bridge(struct msm_dp *dp); 1242 + 1243 + static int dp_display_probe_tail(struct device *dev) 1244 + { 1245 + struct msm_dp *dp = dev_get_drvdata(dev); 1246 + int ret; 1247 + 1248 + ret = dp_display_get_next_bridge(dp); 1249 + if (ret) 1250 + return ret; 1251 + 1252 + ret = component_add(dev, &dp_display_comp_ops); 1253 + if (ret) 1254 + DRM_ERROR("component add failed, rc=%d\n", ret); 1255 + 1256 + return ret; 1257 + } 1258 + 1259 + static int dp_auxbus_done_probe(struct drm_dp_aux *aux) 1260 + { 1261 + return dp_display_probe_tail(aux->dev); 1211 1262 } 1212 1263 1213 1264 static int dp_display_probe(struct platform_device *pdev) ··· 1266 1271 return -EPROBE_DEFER; 1267 1272 } 1268 1273 1274 + rc = dp->parser->parse(dp->parser); 1275 + if (rc) { 1276 + DRM_ERROR("device tree parsing failed\n"); 1277 + goto err; 1278 + } 1279 + 1280 + rc = dp_power_client_init(dp->power); 1281 + if (rc) { 1282 + DRM_ERROR("Power client create failed\n"); 1283 + goto err; 1284 + } 1285 + 1269 1286 /* setup event q */ 1270 1287 mutex_init(&dp->event_mutex); 1271 1288 init_waitqueue_head(&dp->event_q); ··· 1290 1283 1291 1284 platform_set_drvdata(pdev, &dp->dp_display); 1292 1285 1293 - rc = component_add(&pdev->dev, &dp_display_comp_ops); 1294 - if (rc) { 1295 - DRM_ERROR("component add failed, rc=%d\n", rc); 1296 - dp_display_deinit_sub_modules(dp); 1286 + rc = devm_pm_runtime_enable(&pdev->dev); 1287 + if (rc) 1288 + goto err; 1289 + 1290 + rc = dp_display_request_irq(dp); 1291 + if (rc) 1292 + goto err; 1293 + 1294 + if (dp->dp_display.is_edp) { 1295 + rc = devm_of_dp_aux_populate_bus(dp->aux, dp_auxbus_done_probe); 1296 + if (rc) { 1297 + DRM_ERROR("eDP auxbus population failed, rc=%d\n", rc); 1298 + goto err; 1299 + } 1300 + } else { 1301 + rc = dp_display_probe_tail(&pdev->dev); 1302 + if (rc) 1303 + goto err; 1297 1304 } 1298 1305 1306 + return rc; 1307 + 1308 + err: 1309 + dp_display_deinit_sub_modules(dp); 1299 1310 return rc; 1300 1311 } 1301 1312 ··· 1323 1298 1324 1299 component_del(&pdev->dev, &dp_display_comp_ops); 1325 1300 dp_display_deinit_sub_modules(dp); 1326 - 1327 1301 platform_set_drvdata(pdev, NULL); 1328 1302 } 1329 1303 1330 - static int dp_pm_resume(struct device *dev) 1304 + static int dp_pm_runtime_suspend(struct device *dev) 1331 1305 { 1332 - struct platform_device *pdev = to_platform_device(dev); 1333 - struct msm_dp *dp_display = platform_get_drvdata(pdev); 1334 - struct dp_display_private *dp; 1335 - int sink_count = 0; 1306 + struct dp_display_private *dp = dev_get_dp_display_private(dev); 1336 1307 1337 - dp = container_of(dp_display, struct dp_display_private, dp_display); 1308 + disable_irq(dp->irq); 1338 1309 1339 - mutex_lock(&dp->event_mutex); 1340 - 1341 - drm_dbg_dp(dp->drm_dev, 1342 - "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n", 1343 - dp->dp_display.connector_type, dp->core_initialized, 1344 - dp->phy_initialized, dp_display->power_on); 1345 - 1346 - /* start from disconnected state */ 1347 - dp->hpd_state = ST_DISCONNECTED; 1348 - 1349 - /* turn on dp ctrl/phy */ 1350 - dp_display_host_init(dp); 1351 - 1352 - if (dp_display->is_edp) 1353 - dp_catalog_ctrl_hpd_enable(dp->catalog); 1354 - 1355 - if (dp_catalog_link_is_connected(dp->catalog)) { 1356 - /* 1357 - * set sink to normal operation mode -- D0 1358 - * before dpcd read 1359 - */ 1360 - dp_display_host_phy_init(dp); 1361 - dp_link_psm_config(dp->link, &dp->panel->link_info, false); 1362 - sink_count = drm_dp_read_sink_count(dp->aux); 1363 - if (sink_count < 0) 1364 - sink_count = 0; 1365 - 1310 + if (dp->dp_display.is_edp) { 1366 1311 dp_display_host_phy_exit(dp); 1312 + dp_catalog_ctrl_hpd_disable(dp->catalog); 1367 1313 } 1368 - 1369 - dp->link->sink_count = sink_count; 1370 - /* 1371 - * can not declared display is connected unless 1372 - * HDMI cable is plugged in and sink_count of 1373 - * dongle become 1 1374 - * also only signal audio when disconnected 1375 - */ 1376 - if (dp->link->sink_count) { 1377 - dp->dp_display.is_connected = true; 1378 - } else { 1379 - dp->dp_display.is_connected = false; 1380 - dp_display_handle_plugged_change(dp_display, false); 1381 - } 1382 - 1383 - drm_dbg_dp(dp->drm_dev, 1384 - "After, type=%d sink=%d conn=%d core_init=%d phy_init=%d power=%d\n", 1385 - dp->dp_display.connector_type, dp->link->sink_count, 1386 - dp->dp_display.is_connected, dp->core_initialized, 1387 - dp->phy_initialized, dp_display->power_on); 1388 - 1389 - mutex_unlock(&dp->event_mutex); 1314 + dp_display_host_deinit(dp); 1390 1315 1391 1316 return 0; 1392 1317 } 1393 1318 1394 - static int dp_pm_suspend(struct device *dev) 1319 + static int dp_pm_runtime_resume(struct device *dev) 1395 1320 { 1396 - struct platform_device *pdev = to_platform_device(dev); 1397 - struct msm_dp *dp_display = platform_get_drvdata(pdev); 1398 - struct dp_display_private *dp; 1321 + struct dp_display_private *dp = dev_get_dp_display_private(dev); 1399 1322 1400 - dp = container_of(dp_display, struct dp_display_private, dp_display); 1323 + /* 1324 + * for eDP, host cotroller, HPD block and PHY are enabled here 1325 + * but with HPD irq disabled 1326 + * 1327 + * for DP, only host controller is enabled here. 1328 + * HPD block is enabled at dp_bridge_hpd_enable() 1329 + * PHY will be enabled at plugin handler later 1330 + */ 1331 + dp_display_host_init(dp); 1332 + if (dp->dp_display.is_edp) { 1333 + dp_catalog_ctrl_hpd_enable(dp->catalog); 1334 + dp_display_host_phy_init(dp); 1335 + } 1401 1336 1402 - mutex_lock(&dp->event_mutex); 1403 - 1404 - drm_dbg_dp(dp->drm_dev, 1405 - "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n", 1406 - dp->dp_display.connector_type, dp->core_initialized, 1407 - dp->phy_initialized, dp_display->power_on); 1408 - 1409 - /* mainlink enabled */ 1410 - if (dp_power_clk_status(dp->power, DP_CTRL_PM)) 1411 - dp_ctrl_off_link_stream(dp->ctrl); 1412 - 1413 - dp_display_host_phy_exit(dp); 1414 - 1415 - /* host_init will be called at pm_resume */ 1416 - dp_display_host_deinit(dp); 1417 - 1418 - dp->hpd_state = ST_SUSPENDED; 1419 - 1420 - drm_dbg_dp(dp->drm_dev, 1421 - "After, type=%d core_inited=%d phy_inited=%d power_on=%d\n", 1422 - dp->dp_display.connector_type, dp->core_initialized, 1423 - dp->phy_initialized, dp_display->power_on); 1424 - 1425 - mutex_unlock(&dp->event_mutex); 1426 - 1337 + enable_irq(dp->irq); 1427 1338 return 0; 1428 1339 } 1429 1340 1430 1341 static const struct dev_pm_ops dp_pm_ops = { 1431 - .suspend = dp_pm_suspend, 1432 - .resume = dp_pm_resume, 1342 + SET_RUNTIME_PM_OPS(dp_pm_runtime_suspend, dp_pm_runtime_resume, NULL) 1343 + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1344 + pm_runtime_force_resume) 1433 1345 }; 1434 1346 1435 1347 static struct platform_driver dp_display_driver = { ··· 1396 1434 platform_driver_unregister(&dp_display_driver); 1397 1435 } 1398 1436 1399 - void msm_dp_irq_postinstall(struct msm_dp *dp_display) 1400 - { 1401 - struct dp_display_private *dp; 1402 - 1403 - if (!dp_display) 1404 - return; 1405 - 1406 - dp = container_of(dp_display, struct dp_display_private, dp_display); 1407 - 1408 - if (!dp_display->is_edp) 1409 - dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 0); 1410 - } 1411 - 1412 1437 bool msm_dp_wide_bus_available(const struct msm_dp *dp_display) 1413 1438 { 1414 1439 struct dp_display_private *dp; ··· 1405 1456 return dp->wide_bus_en; 1406 1457 } 1407 1458 1408 - void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor) 1459 + void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, bool is_edp) 1409 1460 { 1410 1461 struct dp_display_private *dp; 1411 1462 struct device *dev; ··· 1416 1467 1417 1468 dp->debug = dp_debug_get(dev, dp->panel, 1418 1469 dp->link, dp->dp_display.connector, 1419 - minor); 1470 + root, is_edp); 1420 1471 if (IS_ERR(dp->debug)) { 1421 1472 rc = PTR_ERR(dp->debug); 1422 1473 DRM_ERROR("failed to initialize debug, rc = %d\n", rc); ··· 1428 1479 { 1429 1480 int rc; 1430 1481 struct dp_display_private *dp_priv; 1431 - struct device_node *aux_bus; 1432 - struct device *dev; 1433 1482 1434 1483 dp_priv = container_of(dp, struct dp_display_private, dp_display); 1435 - dev = &dp_priv->dp_display.pdev->dev; 1436 - aux_bus = of_get_child_by_name(dev->of_node, "aux-bus"); 1437 - 1438 - if (aux_bus && dp->is_edp) { 1439 - dp_display_host_init(dp_priv); 1440 - dp_catalog_ctrl_hpd_enable(dp_priv->catalog); 1441 - dp_display_host_phy_init(dp_priv); 1442 - 1443 - /* 1444 - * The code below assumes that the panel will finish probing 1445 - * by the time devm_of_dp_aux_populate_ep_devices() returns. 1446 - * This isn't a great assumption since it will fail if the 1447 - * panel driver is probed asynchronously but is the best we 1448 - * can do without a bigger driver reorganization. 1449 - */ 1450 - rc = of_dp_aux_populate_bus(dp_priv->aux, NULL); 1451 - of_node_put(aux_bus); 1452 - if (rc) 1453 - goto error; 1454 - } else if (dp->is_edp) { 1455 - DRM_ERROR("eDP aux_bus not found\n"); 1456 - return -ENODEV; 1457 - } 1458 1484 1459 1485 /* 1460 1486 * External bridges are mandatory for eDP interfaces: one has to ··· 1438 1514 * For DisplayPort interfaces external bridges are optional, so 1439 1515 * silently ignore an error if one is not present (-ENODEV). 1440 1516 */ 1441 - rc = devm_dp_parser_find_next_bridge(dp->drm_dev->dev, dp_priv->parser); 1517 + rc = devm_dp_parser_find_next_bridge(&dp->pdev->dev, dp_priv->parser); 1442 1518 if (!dp->is_edp && rc == -ENODEV) 1443 1519 return 0; 1444 1520 1445 - if (!rc) { 1521 + if (!rc) 1446 1522 dp->next_bridge = dp_priv->parser->next_bridge; 1447 - return 0; 1448 - } 1449 1523 1450 - error: 1451 - if (dp->is_edp) { 1452 - of_dp_aux_depopulate_bus(dp_priv->aux); 1453 - dp_display_host_phy_exit(dp_priv); 1454 - dp_display_host_deinit(dp_priv); 1455 - } 1456 1524 return rc; 1457 1525 } 1458 1526 ··· 1457 1541 dp_display->drm_dev = dev; 1458 1542 1459 1543 dp_priv = container_of(dp_display, struct dp_display_private, dp_display); 1460 - 1461 - ret = dp_display_request_irq(dp_display); 1462 - if (ret) { 1463 - DRM_ERROR("request_irq failed, ret=%d\n", ret); 1464 - return ret; 1465 - } 1466 - 1467 - ret = dp_display_get_next_bridge(dp_display); 1468 - if (ret) 1469 - return ret; 1470 1544 1471 1545 ret = dp_bridge_init(dp_display, dev, encoder); 1472 1546 if (ret) { ··· 1499 1593 dp_hpd_plug_handle(dp_display, 0); 1500 1594 1501 1595 mutex_lock(&dp_display->event_mutex); 1596 + if (pm_runtime_resume_and_get(&dp->pdev->dev)) { 1597 + DRM_ERROR("failed to pm_runtime_resume\n"); 1598 + mutex_unlock(&dp_display->event_mutex); 1599 + return; 1600 + } 1502 1601 1503 1602 state = dp_display->hpd_state; 1504 1603 if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) { ··· 1568 1657 mutex_lock(&dp_display->event_mutex); 1569 1658 1570 1659 state = dp_display->hpd_state; 1571 - if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) { 1572 - mutex_unlock(&dp_display->event_mutex); 1573 - return; 1574 - } 1660 + if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) 1661 + drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n", 1662 + dp->connector_type, state); 1575 1663 1576 1664 dp_display_disable(dp_display); 1577 1665 ··· 1583 1673 } 1584 1674 1585 1675 drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type); 1676 + 1677 + pm_runtime_put_sync(&dp->pdev->dev); 1586 1678 mutex_unlock(&dp_display->event_mutex); 1587 1679 } 1588 1680 ··· 1623 1711 struct msm_dp *dp_display = dp_bridge->dp_display; 1624 1712 struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); 1625 1713 1714 + /* 1715 + * this is for external DP with hpd irq enabled case, 1716 + * step-1: dp_pm_runtime_resume() enable dp host only 1717 + * step-2: enable hdp block and have hpd irq enabled here 1718 + * step-3: waiting for plugin irq while phy is not initialized 1719 + * step-4: DP PHY is initialized at plugin handler before link training 1720 + * 1721 + */ 1626 1722 mutex_lock(&dp->event_mutex); 1723 + if (pm_runtime_resume_and_get(&dp_display->pdev->dev)) { 1724 + DRM_ERROR("failed to resume power\n"); 1725 + mutex_unlock(&dp->event_mutex); 1726 + return; 1727 + } 1728 + 1627 1729 dp_catalog_ctrl_hpd_enable(dp->catalog); 1628 1730 1629 1731 /* enable HDP interrupts */ ··· 1659 1733 dp_catalog_ctrl_hpd_disable(dp->catalog); 1660 1734 1661 1735 dp_display->internal_hpd = false; 1736 + 1737 + pm_runtime_put_sync(&dp_display->pdev->dev); 1662 1738 mutex_unlock(&dp->event_mutex); 1663 1739 } 1664 1740 ··· 1675 1747 if (dp_display->internal_hpd) 1676 1748 return; 1677 1749 1678 - if (!dp->core_initialized) { 1679 - drm_dbg_dp(dp->drm_dev, "not initialized\n"); 1680 - return; 1681 - } 1682 - 1683 - if (!dp_display->is_connected && status == connector_status_connected) 1750 + if (!dp_display->link_ready && status == connector_status_connected) 1684 1751 dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); 1685 - else if (dp_display->is_connected && status == connector_status_disconnected) 1752 + else if (dp_display->link_ready && status == connector_status_disconnected) 1686 1753 dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); 1687 1754 }
+2 -2
drivers/gpu/drm/msm/dp/dp_display.h
··· 17 17 struct drm_bridge *bridge; 18 18 struct drm_connector *connector; 19 19 struct drm_bridge *next_bridge; 20 - bool is_connected; 20 + bool link_ready; 21 21 bool audio_enabled; 22 22 bool power_on; 23 23 unsigned int connector_type; ··· 36 36 int dp_display_set_plugged_cb(struct msm_dp *dp_display, 37 37 hdmi_codec_plugged_cb fn, struct device *codec_dev); 38 38 int dp_display_get_modes(struct msm_dp *dp_display); 39 - int dp_display_request_irq(struct msm_dp *dp_display); 40 39 bool dp_display_check_video_test(struct msm_dp *dp_display); 41 40 int dp_display_get_test_bpp(struct msm_dp *dp_display); 42 41 void dp_display_signal_audio_start(struct msm_dp *dp_display); 43 42 void dp_display_signal_audio_complete(struct msm_dp *dp_display); 44 43 void dp_display_set_psr(struct msm_dp *dp, bool enter); 44 + void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *dentry, bool is_edp); 45 45 46 46 #endif /* _DP_DISPLAY_H_ */
+23 -7
drivers/gpu/drm/msm/dp/dp_drm.c
··· 24 24 25 25 dp = to_dp_bridge(bridge)->dp_display; 26 26 27 - drm_dbg_dp(dp->drm_dev, "is_connected = %s\n", 28 - (dp->is_connected) ? "true" : "false"); 27 + drm_dbg_dp(dp->drm_dev, "link_ready = %s\n", 28 + (dp->link_ready) ? "true" : "false"); 29 29 30 - return (dp->is_connected) ? connector_status_connected : 30 + return (dp->link_ready) ? connector_status_connected : 31 31 connector_status_disconnected; 32 32 } 33 33 ··· 40 40 41 41 dp = to_dp_bridge(bridge)->dp_display; 42 42 43 - drm_dbg_dp(dp->drm_dev, "is_connected = %s\n", 44 - (dp->is_connected) ? "true" : "false"); 43 + drm_dbg_dp(dp->drm_dev, "link_ready = %s\n", 44 + (dp->link_ready) ? "true" : "false"); 45 45 46 46 /* 47 47 * There is no protection in the DRM framework to check if the display ··· 55 55 * After that this piece of code can be removed. 56 56 */ 57 57 if (bridge->ops & DRM_BRIDGE_OP_HPD) 58 - return (dp->is_connected) ? 0 : -ENOTCONN; 58 + return (dp->link_ready) ? 0 : -ENOTCONN; 59 59 60 60 return 0; 61 61 } ··· 78 78 dp = to_dp_bridge(bridge)->dp_display; 79 79 80 80 /* pluggable case assumes EDID is read when HPD */ 81 - if (dp->is_connected) { 81 + if (dp->link_ready) { 82 82 rc = dp_display_get_modes(dp); 83 83 if (rc <= 0) { 84 84 DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc); ··· 88 88 drm_dbg_dp(connector->dev, "No sink connected\n"); 89 89 } 90 90 return rc; 91 + } 92 + 93 + static void dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) 94 + { 95 + struct msm_dp *dp = to_dp_bridge(bridge)->dp_display; 96 + 97 + dp_display_debugfs_init(dp, root, false); 91 98 } 92 99 93 100 static const struct drm_bridge_funcs dp_bridge_ops = { ··· 112 105 .hpd_enable = dp_bridge_hpd_enable, 113 106 .hpd_disable = dp_bridge_hpd_disable, 114 107 .hpd_notify = dp_bridge_hpd_notify, 108 + .debugfs_init = dp_bridge_debugfs_init, 115 109 }; 116 110 117 111 static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge, ··· 268 260 return MODE_OK; 269 261 } 270 262 263 + static void edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) 264 + { 265 + struct msm_dp *dp = to_dp_bridge(bridge)->dp_display; 266 + 267 + dp_display_debugfs_init(dp, root, true); 268 + } 269 + 271 270 static const struct drm_bridge_funcs edp_bridge_ops = { 272 271 .atomic_enable = edp_bridge_atomic_enable, 273 272 .atomic_disable = edp_bridge_atomic_disable, ··· 285 270 .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, 286 271 .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, 287 272 .atomic_check = edp_bridge_atomic_check, 273 + .debugfs_init = edp_bridge_debugfs_init, 288 274 }; 289 275 290 276 int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
+2 -30
drivers/gpu/drm/msm/dp/dp_power.c
··· 152 152 153 153 power = container_of(dp_power, struct dp_power_private, dp_power); 154 154 155 - pm_runtime_enable(power->dev); 156 - 157 155 return dp_power_clk_init(power); 158 - } 159 - 160 - void dp_power_client_deinit(struct dp_power *dp_power) 161 - { 162 - struct dp_power_private *power; 163 - 164 - power = container_of(dp_power, struct dp_power_private, dp_power); 165 - 166 - pm_runtime_disable(power->dev); 167 156 } 168 157 169 158 int dp_power_init(struct dp_power *dp_power) 170 159 { 171 - int rc = 0; 172 - struct dp_power_private *power = NULL; 173 - 174 - power = container_of(dp_power, struct dp_power_private, dp_power); 175 - 176 - pm_runtime_get_sync(power->dev); 177 - 178 - rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true); 179 - if (rc) 180 - pm_runtime_put_sync(power->dev); 181 - 182 - return rc; 160 + return dp_power_clk_enable(dp_power, DP_CORE_PM, true); 183 161 } 184 162 185 163 int dp_power_deinit(struct dp_power *dp_power) 186 164 { 187 - struct dp_power_private *power; 188 - 189 - power = container_of(dp_power, struct dp_power_private, dp_power); 190 - 191 - dp_power_clk_enable(dp_power, DP_CORE_PM, false); 192 - pm_runtime_put_sync(power->dev); 193 - return 0; 165 + return dp_power_clk_enable(dp_power, DP_CORE_PM, false); 194 166 } 195 167 196 168 struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser)
-11
drivers/gpu/drm/msm/dp/dp_power.h
··· 81 81 int dp_power_client_init(struct dp_power *power); 82 82 83 83 /** 84 - * dp_power_clinet_deinit() - de-initialize clock and regulator modules 85 - * 86 - * @power: instance of power module 87 - * return: 0 for success, error for failure. 88 - * 89 - * This API will de-initialize the DisplayPort's clocks and regulator 90 - * modules. 91 - */ 92 - void dp_power_client_deinit(struct dp_power *power); 93 - 94 - /** 95 84 * dp_power_get() - configure and get the DisplayPort power module data 96 85 * 97 86 * @parser: instance of parser module
+17
drivers/gpu/drm/msm/dsi/dsi_cfg.c
··· 190 190 }, 191 191 }; 192 192 193 + static const struct regulator_bulk_data sm8650_dsi_regulators[] = { 194 + { .supply = "vdda", .init_load_uA = 16600 }, /* 1.2 V */ 195 + }; 196 + 197 + static const struct msm_dsi_config sm8650_dsi_cfg = { 198 + .io_offset = DSI_6G_REG_SHIFT, 199 + .regulator_data = sm8650_dsi_regulators, 200 + .num_regulators = ARRAY_SIZE(sm8650_dsi_regulators), 201 + .bus_clk_names = dsi_v2_4_clk_names, 202 + .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names), 203 + .io_start = { 204 + { 0xae94000, 0xae96000 }, 205 + }, 206 + }; 207 + 193 208 static const struct regulator_bulk_data sc7280_dsi_regulators[] = { 194 209 { .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */ 195 210 { .supply = "refgen" }, ··· 296 281 &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, 297 282 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0, 298 283 &sm8550_dsi_cfg, &msm_dsi_6g_v2_host_ops}, 284 + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_8_0, 285 + &sm8650_dsi_cfg, &msm_dsi_6g_v2_host_ops}, 299 286 }; 300 287 301 288 const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
+1
drivers/gpu/drm/msm/dsi/dsi_cfg.h
··· 28 28 #define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000 29 29 #define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000 30 30 #define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000 31 + #define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000 31 32 32 33 #define MSM_DSI_V2_VER_MINOR_8064 0x0 33 34
+9 -1
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
··· 516 516 struct device *dev = &phy->pdev->dev; 517 517 int ret; 518 518 519 - pm_runtime_get_sync(dev); 519 + ret = pm_runtime_resume_and_get(dev); 520 + if (ret) 521 + return ret; 520 522 521 523 ret = clk_prepare_enable(phy->ahb_clk); 522 524 if (ret) { ··· 587 585 .data = &dsi_phy_5nm_8450_cfgs }, 588 586 { .compatible = "qcom,sm8550-dsi-phy-4nm", 589 587 .data = &dsi_phy_4nm_8550_cfgs }, 588 + { .compatible = "qcom,sm8650-dsi-phy-4nm", 589 + .data = &dsi_phy_4nm_8650_cfgs }, 590 590 #endif 591 591 {} 592 592 }; ··· 692 688 if (IS_ERR(phy->ahb_clk)) 693 689 return dev_err_probe(dev, PTR_ERR(phy->ahb_clk), 694 690 "Unable to get ahb clk\n"); 691 + 692 + ret = devm_pm_runtime_enable(&pdev->dev); 693 + if (ret) 694 + return ret; 695 695 696 696 /* PLL init will call into clk_register which requires 697 697 * register access, so we need to enable power and ahb clock.
+1
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
··· 62 62 extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs; 63 63 extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs; 64 64 extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs; 65 + extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs; 65 66 66 67 struct msm_dsi_dphy_timing { 67 68 u32 clk_zero;
+27
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
··· 1121 1121 { .supply = "vdds", .init_load_uA = 37550 }, 1122 1122 }; 1123 1123 1124 + static const struct regulator_bulk_data dsi_phy_7nm_98000uA_regulators[] = { 1125 + { .supply = "vdds", .init_load_uA = 98000 }, 1126 + }; 1127 + 1124 1128 static const struct regulator_bulk_data dsi_phy_7nm_97800uA_regulators[] = { 1125 1129 { .supply = "vdds", .init_load_uA = 97800 }, 1126 1130 }; ··· 1267 1263 .has_phy_lane = true, 1268 1264 .regulator_data = dsi_phy_7nm_98400uA_regulators, 1269 1265 .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98400uA_regulators), 1266 + .ops = { 1267 + .enable = dsi_7nm_phy_enable, 1268 + .disable = dsi_7nm_phy_disable, 1269 + .pll_init = dsi_pll_7nm_init, 1270 + .save_pll_state = dsi_7nm_pll_save_state, 1271 + .restore_pll_state = dsi_7nm_pll_restore_state, 1272 + .set_continuous_clock = dsi_7nm_set_continuous_clock, 1273 + }, 1274 + .min_pll_rate = 600000000UL, 1275 + #ifdef CONFIG_64BIT 1276 + .max_pll_rate = 5000000000UL, 1277 + #else 1278 + .max_pll_rate = ULONG_MAX, 1279 + #endif 1280 + .io_start = { 0xae95000, 0xae97000 }, 1281 + .num_dsi_phy = 2, 1282 + .quirks = DSI_PHY_7NM_QUIRK_V5_2, 1283 + }; 1284 + 1285 + const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs = { 1286 + .has_phy_lane = true, 1287 + .regulator_data = dsi_phy_7nm_98000uA_regulators, 1288 + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98000uA_regulators), 1270 1289 .ops = { 1271 1290 .enable = dsi_7nm_phy_enable, 1272 1291 .disable = dsi_7nm_phy_disable,
+25 -16
drivers/gpu/drm/msm/msm_debugfs.c
··· 304 304 return ret; 305 305 } 306 306 307 - void msm_debugfs_init(struct drm_minor *minor) 307 + static void msm_debugfs_gpu_init(struct drm_minor *minor) 308 308 { 309 309 struct drm_device *dev = minor->dev; 310 310 struct msm_drm_private *priv = dev->dev_private; 311 311 struct dentry *gpu_devfreq; 312 312 313 - drm_debugfs_create_files(msm_debugfs_list, 314 - ARRAY_SIZE(msm_debugfs_list), 315 - minor->debugfs_root, minor); 316 - 317 313 debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root, 318 314 dev, &msm_gpu_fops); 319 - 320 - if (priv->kms) { 321 - drm_debugfs_create_files(msm_kms_debugfs_list, 322 - ARRAY_SIZE(msm_kms_debugfs_list), 323 - minor->debugfs_root, minor); 324 - debugfs_create_file("kms", S_IRUSR, minor->debugfs_root, 325 - dev, &msm_kms_fops); 326 - } 327 315 328 316 debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root, 329 317 &priv->hangcheck_period); 330 318 331 319 debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root, 332 320 &priv->disable_err_irq); 333 - 334 - debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root, 335 - dev, &shrink_fops); 336 321 337 322 gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root); 338 323 ··· 329 344 330 345 debugfs_create_u32("downdifferential",0600, gpu_devfreq, 331 346 &priv->gpu_devfreq_config.downdifferential); 347 + } 348 + 349 + void msm_debugfs_init(struct drm_minor *minor) 350 + { 351 + struct drm_device *dev = minor->dev; 352 + struct msm_drm_private *priv = dev->dev_private; 353 + 354 + drm_debugfs_create_files(msm_debugfs_list, 355 + ARRAY_SIZE(msm_debugfs_list), 356 + minor->debugfs_root, minor); 357 + 358 + if (priv->gpu_pdev) 359 + msm_debugfs_gpu_init(minor); 360 + 361 + if (priv->kms) { 362 + drm_debugfs_create_files(msm_kms_debugfs_list, 363 + ARRAY_SIZE(msm_kms_debugfs_list), 364 + minor->debugfs_root, minor); 365 + debugfs_create_file("kms", S_IRUSR, minor->debugfs_root, 366 + dev, &msm_kms_fops); 367 + } 368 + 369 + debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root, 370 + dev, &shrink_fops); 332 371 333 372 if (priv->kms && priv->kms->funcs->debugfs_init) 334 373 priv->kms->funcs->debugfs_init(priv->kms, minor);
+92 -2
drivers/gpu/drm/msm/msm_drv.c
··· 37 37 * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN 38 38 * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT 39 39 * - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST) 40 + * - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA 40 41 */ 41 42 #define MSM_VERSION_MAJOR 1 42 - #define MSM_VERSION_MINOR 10 43 + #define MSM_VERSION_MINOR 12 43 44 #define MSM_VERSION_PATCHLEVEL 0 44 45 45 46 static void msm_deinit_vram(struct drm_device *ddev); ··· 545 544 return msm_gem_set_iova(obj, ctx->aspace, iova); 546 545 } 547 546 547 + static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj, 548 + __user void *metadata, 549 + u32 metadata_size) 550 + { 551 + struct msm_gem_object *msm_obj = to_msm_bo(obj); 552 + void *buf; 553 + int ret; 554 + 555 + /* Impose a moderate upper bound on metadata size: */ 556 + if (metadata_size > 128) { 557 + return -EOVERFLOW; 558 + } 559 + 560 + /* Use a temporary buf to keep copy_from_user() outside of gem obj lock: */ 561 + buf = memdup_user(metadata, metadata_size); 562 + if (IS_ERR(buf)) 563 + return PTR_ERR(buf); 564 + 565 + ret = msm_gem_lock_interruptible(obj); 566 + if (ret) 567 + goto out; 568 + 569 + msm_obj->metadata = 570 + krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL); 571 + msm_obj->metadata_size = metadata_size; 572 + memcpy(msm_obj->metadata, buf, metadata_size); 573 + 574 + msm_gem_unlock(obj); 575 + 576 + out: 577 + kfree(buf); 578 + 579 + return ret; 580 + } 581 + 582 + static int msm_ioctl_gem_info_get_metadata(struct drm_gem_object *obj, 583 + __user void *metadata, 584 + u32 *metadata_size) 585 + { 586 + struct msm_gem_object *msm_obj = to_msm_bo(obj); 587 + void *buf; 588 + int ret, len; 589 + 590 + if (!metadata) { 591 + /* 592 + * Querying the size is inherently racey, but 593 + * EXT_external_objects expects the app to confirm 594 + * via device and driver UUIDs that the exporter and 595 + * importer versions match. All we can do from the 596 + * kernel side is check the length under obj lock 597 + * when userspace tries to retrieve the metadata 598 + */ 599 + *metadata_size = msm_obj->metadata_size; 600 + return 0; 601 + } 602 + 603 + ret = msm_gem_lock_interruptible(obj); 604 + if (ret) 605 + return ret; 606 + 607 + /* Avoid copy_to_user() under gem obj lock: */ 608 + len = msm_obj->metadata_size; 609 + buf = kmemdup(msm_obj->metadata, len, GFP_KERNEL); 610 + 611 + msm_gem_unlock(obj); 612 + 613 + if (*metadata_size < len) { 614 + ret = -ETOOSMALL; 615 + } else if (copy_to_user(metadata, buf, len)) { 616 + ret = -EFAULT; 617 + } else { 618 + *metadata_size = len; 619 + } 620 + 621 + kfree(buf); 622 + 623 + return 0; 624 + } 625 + 548 626 static int msm_ioctl_gem_info(struct drm_device *dev, void *data, 549 627 struct drm_file *file) 550 628 { ··· 646 566 break; 647 567 case MSM_INFO_SET_NAME: 648 568 case MSM_INFO_GET_NAME: 569 + case MSM_INFO_SET_METADATA: 570 + case MSM_INFO_GET_METADATA: 649 571 break; 650 572 default: 651 573 return -EINVAL; ··· 700 618 break; 701 619 case MSM_INFO_GET_NAME: 702 620 if (args->value && (args->len < strlen(msm_obj->name))) { 703 - ret = -EINVAL; 621 + ret = -ETOOSMALL; 704 622 break; 705 623 } 706 624 args->len = strlen(msm_obj->name); ··· 709 627 msm_obj->name, args->len)) 710 628 ret = -EFAULT; 711 629 } 630 + break; 631 + case MSM_INFO_SET_METADATA: 632 + ret = msm_ioctl_gem_info_set_metadata( 633 + obj, u64_to_user_ptr(args->value), args->len); 634 + break; 635 + case MSM_INFO_GET_METADATA: 636 + ret = msm_ioctl_gem_info_get_metadata( 637 + obj, u64_to_user_ptr(args->value), &args->len); 712 638 break; 713 639 } 714 640
+2 -13
drivers/gpu/drm/msm/msm_drv.h
··· 78 78 * enum msm_event_wait - type of HW events to wait for 79 79 * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW 80 80 * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel 81 - * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters) 82 81 */ 83 82 enum msm_event_wait { 84 83 MSM_ENC_COMMIT_DONE = 0, 85 84 MSM_ENC_TX_COMPLETE, 86 - MSM_ENC_VBLANK, 87 85 }; 88 86 89 87 /** ··· 90 92 * @num_intf: number of interfaces the panel is mounted on 91 93 * @num_dspp: number of dspp blocks used 92 94 * @num_dsc: number of Display Stream Compression (DSC) blocks used 95 + * @needs_cdm: indicates whether cdm block is needed for this display topology 93 96 */ 94 97 struct msm_display_topology { 95 98 u32 num_lm; 96 99 u32 num_intf; 97 100 u32 num_dspp; 98 101 u32 num_dsc; 102 + bool needs_cdm; 99 103 }; 100 104 101 105 /* Commit/Event thread specific structure */ ··· 386 386 void __exit msm_dp_unregister(void); 387 387 int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, 388 388 struct drm_encoder *encoder); 389 - void msm_dp_irq_postinstall(struct msm_dp *dp_display); 390 389 void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display); 391 390 392 - void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor); 393 391 bool msm_dp_wide_bus_available(const struct msm_dp *dp_display); 394 392 395 393 #else ··· 405 407 return -EINVAL; 406 408 } 407 409 408 - static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display) 409 - { 410 - } 411 - 412 410 static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display) 413 - { 414 - } 415 - 416 - static inline void msm_dp_debugfs_init(struct msm_dp *dp_display, 417 - struct drm_minor *minor) 418 411 { 419 412 } 420 413
+4 -3
drivers/gpu/drm/msm/msm_gem.c
··· 226 226 227 227 msm_gem_assert_locked(obj); 228 228 229 - if (GEM_WARN_ON(msm_obj->madv > madv)) { 230 - DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 231 - msm_obj->madv, madv); 229 + if (msm_obj->madv > madv) { 230 + DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n", 231 + msm_obj->madv, madv); 232 232 return ERR_PTR(-EBUSY); 233 233 } 234 234 ··· 1058 1058 1059 1059 drm_gem_object_release(obj); 1060 1060 1061 + kfree(msm_obj->metadata); 1061 1062 kfree(msm_obj); 1062 1063 } 1063 1064
+9 -8
drivers/gpu/drm/msm/msm_gem.h
··· 9 9 10 10 #include <linux/kref.h> 11 11 #include <linux/dma-resv.h> 12 + #include "drm/drm_exec.h" 12 13 #include "drm/gpu_scheduler.h" 13 14 #include "msm_drv.h" 14 15 ··· 108 107 struct drm_mm_node *vram_node; 109 108 110 109 char name[32]; /* Identifier to print for the debugfs files */ 110 + 111 + /* userspace metadata backchannel */ 112 + void *metadata; 113 + u32 metadata_size; 111 114 112 115 /** 113 116 * pin_count: Number of times the pages are pinned ··· 259 254 struct msm_gpu *gpu; 260 255 struct msm_gem_address_space *aspace; 261 256 struct list_head node; /* node in ring submit list */ 262 - struct ww_acquire_ctx ticket; 257 + struct drm_exec exec; 263 258 uint32_t seqno; /* Sequence number of the submit on the ring */ 264 259 265 260 /* Hw fence, which is created when the scheduler executes the job, and ··· 275 270 int fence_id; /* key into queue->fence_idr */ 276 271 struct msm_gpu_submitqueue *queue; 277 272 struct pid *pid; /* submitting process */ 278 - bool fault_dumped; /* Limit devcoredump dumping to one per submit */ 279 - bool valid; /* true if no cmdstream patching needed */ 280 - bool in_rb; /* "sudo" mode, copy cmds into RB */ 273 + bool bos_pinned : 1; 274 + bool fault_dumped:1;/* Limit devcoredump dumping to one per submit */ 275 + bool in_rb : 1; /* "sudo" mode, copy cmds into RB */ 281 276 struct msm_ringbuffer *ring; 282 277 unsigned int nr_cmds; 283 278 unsigned int nr_bos; ··· 292 287 struct drm_msm_gem_submit_reloc *relocs; 293 288 } *cmd; /* array of size nr_cmds */ 294 289 struct { 295 - /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */ 296 - #define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */ 297 - #define BO_LOCKED 0x4000 /* obj lock is held */ 298 - #define BO_PINNED 0x2000 /* obj (pages) is pinned and on active list */ 299 290 uint32_t flags; 300 291 union { 301 292 struct drm_gem_object *obj;
+1 -1
drivers/gpu/drm/msm/msm_gem_shrinker.c
··· 75 75 wait_for_idle(struct drm_gem_object *obj) 76 76 { 77 77 enum dma_resv_usage usage = dma_resv_usage_rw(true); 78 - return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0; 78 + return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0; 79 79 } 80 80 81 81 static bool
+69 -164
drivers/gpu/drm/msm/msm_gem_submit.c
··· 17 17 #include "msm_gem.h" 18 18 #include "msm_gpu_trace.h" 19 19 20 + /* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable 21 + * error msgs for debugging, but we don't spam dmesg by default 22 + */ 23 + #define SUBMIT_ERROR(submit, fmt, ...) \ 24 + DRM_DEV_DEBUG_DRIVER((submit)->dev->dev, fmt, ##__VA_ARGS__) 25 + 20 26 /* 21 27 * Cmdstream submission: 22 28 */ ··· 43 37 if (sz > SIZE_MAX) 44 38 return ERR_PTR(-ENOMEM); 45 39 46 - submit = kzalloc(sz, GFP_KERNEL); 40 + submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); 47 41 if (!submit) 48 42 return ERR_PTR(-ENOMEM); 49 43 ··· 142 136 143 137 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) || 144 138 !(submit_bo.flags & MANDATORY_FLAGS)) { 145 - DRM_ERROR("invalid flags: %x\n", submit_bo.flags); 139 + SUBMIT_ERROR(submit, "invalid flags: %x\n", submit_bo.flags); 146 140 ret = -EINVAL; 147 141 i = 0; 148 142 goto out; ··· 150 144 151 145 submit->bos[i].handle = submit_bo.handle; 152 146 submit->bos[i].flags = submit_bo.flags; 153 - /* in validate_objects() we figure out if this is true: */ 154 - submit->bos[i].iova = submit_bo.presumed; 155 147 } 156 148 157 149 spin_lock(&file->table_lock); ··· 162 158 */ 163 159 obj = idr_find(&file->object_idr, submit->bos[i].handle); 164 160 if (!obj) { 165 - DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i); 161 + SUBMIT_ERROR(submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i); 166 162 ret = -EINVAL; 167 163 goto out_unlock; 168 164 } ··· 206 202 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: 207 203 break; 208 204 default: 209 - DRM_ERROR("invalid type: %08x\n", submit_cmd.type); 205 + SUBMIT_ERROR(submit, "invalid type: %08x\n", submit_cmd.type); 210 206 return -EINVAL; 211 207 } 212 208 213 209 if (submit_cmd.size % 4) { 214 - DRM_ERROR("non-aligned cmdstream buffer size: %u\n", 215 - submit_cmd.size); 210 + SUBMIT_ERROR(submit, "non-aligned cmdstream buffer size: %u\n", 211 + submit_cmd.size); 216 212 ret = -EINVAL; 217 213 goto out; 218 214 } ··· 232 228 ret = -ENOMEM; 233 229 goto out; 234 230 } 235 - submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL); 231 + submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN); 236 232 if (!submit->cmd[i].relocs) { 237 233 ret = -ENOMEM; 238 234 goto out; ··· 248 244 return ret; 249 245 } 250 246 251 - /* Unwind bo state, according to cleanup_flags. In the success case, only 252 - * the lock is dropped at the end of the submit (and active/pin ref is dropped 253 - * later when the submit is retired). 254 - */ 255 - static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, 256 - unsigned cleanup_flags) 257 - { 258 - struct drm_gem_object *obj = submit->bos[i].obj; 259 - unsigned flags = submit->bos[i].flags & cleanup_flags; 260 - 261 - /* 262 - * Clear flags bit before dropping lock, so that the msm_job_run() 263 - * path isn't racing with submit_cleanup() (ie. the read/modify/ 264 - * write is protected by the obj lock in all paths) 265 - */ 266 - submit->bos[i].flags &= ~cleanup_flags; 267 - 268 - if (flags & BO_PINNED) 269 - msm_gem_unpin_locked(obj); 270 - 271 - if (flags & BO_LOCKED) 272 - dma_resv_unlock(obj->resv); 273 - } 274 - 275 - static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) 276 - { 277 - unsigned cleanup_flags = BO_PINNED | BO_LOCKED; 278 - submit_cleanup_bo(submit, i, cleanup_flags); 279 - 280 - if (!(submit->bos[i].flags & BO_VALID)) 281 - submit->bos[i].iova = 0; 282 - } 283 - 284 247 /* This is where we make sure all the bo's are reserved and pin'd: */ 285 248 static int submit_lock_objects(struct msm_gem_submit *submit) 286 249 { 287 - int contended, slow_locked = -1, i, ret = 0; 250 + int ret; 288 251 289 - retry: 290 - for (i = 0; i < submit->nr_bos; i++) { 291 - struct drm_gem_object *obj = submit->bos[i].obj; 252 + drm_exec_init(&submit->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, submit->nr_bos); 292 253 293 - if (slow_locked == i) 294 - slow_locked = -1; 295 - 296 - contended = i; 297 - 298 - if (!(submit->bos[i].flags & BO_LOCKED)) { 299 - ret = dma_resv_lock_interruptible(obj->resv, 300 - &submit->ticket); 254 + drm_exec_until_all_locked (&submit->exec) { 255 + for (unsigned i = 0; i < submit->nr_bos; i++) { 256 + struct drm_gem_object *obj = submit->bos[i].obj; 257 + ret = drm_exec_prepare_obj(&submit->exec, obj, 1); 258 + drm_exec_retry_on_contention(&submit->exec); 301 259 if (ret) 302 - goto fail; 303 - submit->bos[i].flags |= BO_LOCKED; 260 + goto error; 304 261 } 305 262 } 306 - 307 - ww_acquire_done(&submit->ticket); 308 263 309 264 return 0; 310 265 311 - fail: 312 - if (ret == -EALREADY) { 313 - DRM_ERROR("handle %u at index %u already on submit list\n", 314 - submit->bos[i].handle, i); 315 - ret = -EINVAL; 316 - } 317 - 318 - for (; i >= 0; i--) 319 - submit_unlock_unpin_bo(submit, i); 320 - 321 - if (slow_locked > 0) 322 - submit_unlock_unpin_bo(submit, slow_locked); 323 - 324 - if (ret == -EDEADLK) { 325 - struct drm_gem_object *obj = submit->bos[contended].obj; 326 - /* we lost out in a seqno race, lock and retry.. */ 327 - ret = dma_resv_lock_slow_interruptible(obj->resv, 328 - &submit->ticket); 329 - if (!ret) { 330 - submit->bos[contended].flags |= BO_LOCKED; 331 - slow_locked = contended; 332 - goto retry; 333 - } 334 - 335 - /* Not expecting -EALREADY here, if the bo was already 336 - * locked, we should have gotten -EALREADY already from 337 - * the dma_resv_lock_interruptable() call. 338 - */ 339 - WARN_ON_ONCE(ret == -EALREADY); 340 - } 341 - 266 + error: 342 267 return ret; 343 268 } 344 269 345 - static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) 270 + static int submit_fence_sync(struct msm_gem_submit *submit) 346 271 { 347 272 int i, ret = 0; 348 273 349 274 for (i = 0; i < submit->nr_bos; i++) { 350 275 struct drm_gem_object *obj = submit->bos[i].obj; 351 276 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; 352 - 353 - /* NOTE: _reserve_shared() must happen before 354 - * _add_shared_fence(), which makes this a slightly 355 - * strange place to call it. OTOH this is a 356 - * convenient can-fail point to hook it in. 357 - */ 358 - ret = dma_resv_reserve_fences(obj->resv, 1); 359 - if (ret) 360 - return ret; 361 - 362 - /* If userspace has determined that explicit fencing is 363 - * used, it can disable implicit sync on the entire 364 - * submit: 365 - */ 366 - if (no_implicit) 367 - continue; 368 277 369 278 /* Otherwise userspace can ask for implicit sync to be 370 279 * disabled on specific buffers. This is useful for internal ··· 301 384 struct msm_drm_private *priv = submit->dev->dev_private; 302 385 int i, ret = 0; 303 386 304 - submit->valid = true; 305 - 306 387 for (i = 0; i < submit->nr_bos; i++) { 307 388 struct drm_gem_object *obj = submit->bos[i].obj; 308 389 struct msm_gem_vma *vma; ··· 316 401 if (ret) 317 402 break; 318 403 319 - if (vma->iova == submit->bos[i].iova) { 320 - submit->bos[i].flags |= BO_VALID; 321 - } else { 322 - submit->bos[i].iova = vma->iova; 323 - /* iova changed, so address in cmdstream is not valid: */ 324 - submit->bos[i].flags &= ~BO_VALID; 325 - submit->valid = false; 326 - } 404 + submit->bos[i].iova = vma->iova; 327 405 } 328 406 329 407 /* ··· 329 421 mutex_lock(&priv->lru.lock); 330 422 for (i = 0; i < submit->nr_bos; i++) { 331 423 msm_gem_pin_obj_locked(submit->bos[i].obj); 332 - submit->bos[i].flags |= BO_PINNED; 333 424 } 334 425 mutex_unlock(&priv->lru.lock); 335 426 427 + submit->bos_pinned = true; 428 + 336 429 return ret; 430 + } 431 + 432 + static void submit_unpin_objects(struct msm_gem_submit *submit) 433 + { 434 + if (!submit->bos_pinned) 435 + return; 436 + 437 + for (int i = 0; i < submit->nr_bos; i++) { 438 + struct drm_gem_object *obj = submit->bos[i].obj; 439 + 440 + msm_gem_unpin_locked(obj); 441 + } 442 + 443 + submit->bos_pinned = false; 337 444 } 338 445 339 446 static void submit_attach_object_fences(struct msm_gem_submit *submit) ··· 368 445 } 369 446 370 447 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, 371 - struct drm_gem_object **obj, uint64_t *iova, bool *valid) 448 + struct drm_gem_object **obj, uint64_t *iova) 372 449 { 373 450 if (idx >= submit->nr_bos) { 374 - DRM_ERROR("invalid buffer index: %u (out of %u)\n", 375 - idx, submit->nr_bos); 451 + SUBMIT_ERROR(submit, "invalid buffer index: %u (out of %u)\n", 452 + idx, submit->nr_bos); 376 453 return -EINVAL; 377 454 } 378 455 ··· 380 457 *obj = submit->bos[idx].obj; 381 458 if (iova) 382 459 *iova = submit->bos[idx].iova; 383 - if (valid) 384 - *valid = !!(submit->bos[idx].flags & BO_VALID); 385 460 386 461 return 0; 387 462 } ··· 392 471 uint32_t *ptr; 393 472 int ret = 0; 394 473 395 - if (!nr_relocs) 396 - return 0; 397 - 398 474 if (offset % 4) { 399 - DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); 475 + SUBMIT_ERROR(submit, "non-aligned cmdstream buffer: %u\n", offset); 400 476 return -EINVAL; 401 477 } 402 478 ··· 412 494 struct drm_msm_gem_submit_reloc submit_reloc = relocs[i]; 413 495 uint32_t off; 414 496 uint64_t iova; 415 - bool valid; 416 497 417 498 if (submit_reloc.submit_offset % 4) { 418 - DRM_ERROR("non-aligned reloc offset: %u\n", 419 - submit_reloc.submit_offset); 499 + SUBMIT_ERROR(submit, "non-aligned reloc offset: %u\n", 500 + submit_reloc.submit_offset); 420 501 ret = -EINVAL; 421 502 goto out; 422 503 } ··· 425 508 426 509 if ((off >= (obj->size / 4)) || 427 510 (off < last_offset)) { 428 - DRM_ERROR("invalid offset %u at reloc %u\n", off, i); 511 + SUBMIT_ERROR(submit, "invalid offset %u at reloc %u\n", off, i); 429 512 ret = -EINVAL; 430 513 goto out; 431 514 } 432 515 433 - ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid); 516 + ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova); 434 517 if (ret) 435 518 goto out; 436 - 437 - if (valid) 438 - continue; 439 519 440 520 iova += submit_reloc.reloc_offset; 441 521 ··· 458 544 */ 459 545 static void submit_cleanup(struct msm_gem_submit *submit, bool error) 460 546 { 461 - unsigned cleanup_flags = BO_LOCKED; 462 - unsigned i; 463 - 464 - if (error) 465 - cleanup_flags |= BO_PINNED; 466 - 467 - for (i = 0; i < submit->nr_bos; i++) { 468 - struct drm_gem_object *obj = submit->bos[i].obj; 469 - submit_cleanup_bo(submit, i, cleanup_flags); 470 - if (error) 471 - drm_gem_object_put(obj); 547 + if (error) { 548 + submit_unpin_objects(submit); 549 + /* job wasn't enqueued to scheduler, so early retirement: */ 550 + msm_submit_retire(submit); 472 551 } 552 + 553 + if (submit->exec.objects) 554 + drm_exec_fini(&submit->exec); 473 555 } 474 556 475 557 void msm_submit_retire(struct msm_gem_submit *submit) ··· 659 749 struct msm_submit_post_dep *post_deps = NULL; 660 750 struct drm_syncobj **syncobjs_to_reset = NULL; 661 751 int out_fence_fd = -1; 662 - bool has_ww_ticket = false; 663 752 unsigned i; 664 753 int ret; 665 754 ··· 764 855 goto out; 765 856 766 857 /* copy_*_user while holding a ww ticket upsets lockdep */ 767 - ww_acquire_init(&submit->ticket, &reservation_ww_class); 768 - has_ww_ticket = true; 769 858 ret = submit_lock_objects(submit); 770 859 if (ret) 771 860 goto out; 772 861 773 - ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT)); 774 - if (ret) 775 - goto out; 862 + if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { 863 + ret = submit_fence_sync(submit); 864 + if (ret) 865 + goto out; 866 + } 776 867 777 868 ret = submit_pin_objects(submit); 778 869 if (ret) ··· 782 873 struct drm_gem_object *obj; 783 874 uint64_t iova; 784 875 785 - ret = submit_bo(submit, submit->cmd[i].idx, 786 - &obj, &iova, NULL); 876 + ret = submit_bo(submit, submit->cmd[i].idx, &obj, &iova); 787 877 if (ret) 788 878 goto out; 789 879 790 880 if (!submit->cmd[i].size || 791 881 ((submit->cmd[i].size + submit->cmd[i].offset) > 792 882 obj->size / 4)) { 793 - DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4); 883 + SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4); 794 884 ret = -EINVAL; 795 885 goto out; 796 886 } 797 887 798 888 submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4); 799 889 800 - if (submit->valid) 890 + if (likely(!submit->cmd[i].nr_relocs)) 801 891 continue; 802 892 803 893 if (!gpu->allow_relocs) { 804 - if (submit->cmd[i].nr_relocs) { 805 - DRM_ERROR("relocs not allowed\n"); 806 - ret = -EINVAL; 807 - goto out; 808 - } 809 - 810 - continue; 894 + SUBMIT_ERROR(submit, "relocs not allowed\n"); 895 + ret = -EINVAL; 896 + goto out; 811 897 } 812 898 813 899 ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4, ··· 878 974 } 879 975 } 880 976 977 + if (ret) 978 + goto out; 979 + 881 980 submit_attach_object_fences(submit); 882 981 883 982 /* The scheduler owns a ref now: */ ··· 900 993 901 994 out: 902 995 submit_cleanup(submit, !!ret); 903 - if (has_ww_ticket) 904 - ww_acquire_fini(&submit->ticket); 905 996 out_unlock: 906 997 mutex_unlock(&queue->lock); 907 998 out_post_unlock:
+23 -21
drivers/gpu/drm/msm/msm_gpu.c
··· 292 292 /* Set the active crash state to be dumped on failure */ 293 293 gpu->crashstate = state; 294 294 295 - /* FIXME: Release the crashstate if this errors out? */ 296 - dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL, 295 + dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL, 297 296 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free); 298 297 } 299 298 #else ··· 365 366 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name); 366 367 367 368 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); 368 - if (submit) { 369 - /* Increment the fault counts */ 370 - submit->queue->faults++; 371 - if (submit->aspace) 372 - submit->aspace->faults++; 373 369 374 - get_comm_cmdline(submit, &comm, &cmd); 370 + /* 371 + * If the submit retired while we were waiting for the worker to run, 372 + * or waiting to acquire the gpu lock, then nothing more to do. 373 + */ 374 + if (!submit) 375 + goto out_unlock; 375 376 376 - if (comm && cmd) { 377 - DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n", 378 - gpu->name, comm, cmd); 377 + /* Increment the fault counts */ 378 + submit->queue->faults++; 379 + if (submit->aspace) 380 + submit->aspace->faults++; 379 381 380 - msm_rd_dump_submit(priv->hangrd, submit, 381 - "offending task: %s (%s)", comm, cmd); 382 - } else { 383 - msm_rd_dump_submit(priv->hangrd, submit, NULL); 384 - } 382 + get_comm_cmdline(submit, &comm, &cmd); 383 + 384 + if (comm && cmd) { 385 + DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n", 386 + gpu->name, comm, cmd); 387 + 388 + msm_rd_dump_submit(priv->hangrd, submit, 389 + "offending task: %s (%s)", comm, cmd); 385 390 } else { 386 - /* 387 - * We couldn't attribute this fault to any particular context, 388 - * so increment the global fault count instead. 389 - */ 390 - gpu->global_faults++; 391 + DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name); 392 + 393 + msm_rd_dump_submit(priv->hangrd, submit, NULL); 391 394 } 392 395 393 396 /* Record the crash state */ ··· 442 441 443 442 pm_runtime_put(&gpu->pdev->dev); 444 443 444 + out_unlock: 445 445 mutex_unlock(&gpu->lock); 446 446 447 447 msm_gpu_retire(gpu);
+70 -36
drivers/gpu/drm/msm/msm_mdss.c
··· 28 28 29 29 #define MIN_IB_BW 400000000UL /* Min ib vote 400MB */ 30 30 31 + #define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */ 32 + 31 33 struct msm_mdss { 32 34 struct device *dev; 33 35 ··· 42 40 struct irq_domain *domain; 43 41 } irq_controller; 44 42 const struct msm_mdss_data *mdss_data; 45 - struct icc_path *path[2]; 46 - u32 num_paths; 43 + struct icc_path *mdp_path[2]; 44 + u32 num_mdp_paths; 45 + struct icc_path *reg_bus_path; 47 46 }; 48 47 49 48 static int msm_mdss_parse_data_bus_icc_path(struct device *dev, ··· 52 49 { 53 50 struct icc_path *path0; 54 51 struct icc_path *path1; 52 + struct icc_path *reg_bus_path; 55 53 56 - path0 = of_icc_get(dev, "mdp0-mem"); 54 + path0 = devm_of_icc_get(dev, "mdp0-mem"); 57 55 if (IS_ERR_OR_NULL(path0)) 58 56 return PTR_ERR_OR_ZERO(path0); 59 57 60 - msm_mdss->path[0] = path0; 61 - msm_mdss->num_paths = 1; 58 + msm_mdss->mdp_path[0] = path0; 59 + msm_mdss->num_mdp_paths = 1; 62 60 63 - path1 = of_icc_get(dev, "mdp1-mem"); 61 + path1 = devm_of_icc_get(dev, "mdp1-mem"); 64 62 if (!IS_ERR_OR_NULL(path1)) { 65 - msm_mdss->path[1] = path1; 66 - msm_mdss->num_paths++; 63 + msm_mdss->mdp_path[1] = path1; 64 + msm_mdss->num_mdp_paths++; 67 65 } 68 66 67 + reg_bus_path = of_icc_get(dev, "cpu-cfg"); 68 + if (!IS_ERR_OR_NULL(reg_bus_path)) 69 + msm_mdss->reg_bus_path = reg_bus_path; 70 + 69 71 return 0; 70 - } 71 - 72 - static void msm_mdss_put_icc_path(void *data) 73 - { 74 - struct msm_mdss *msm_mdss = data; 75 - int i; 76 - 77 - for (i = 0; i < msm_mdss->num_paths; i++) 78 - icc_put(msm_mdss->path[i]); 79 - } 80 - 81 - static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw) 82 - { 83 - int i; 84 - 85 - for (i = 0; i < msm_mdss->num_paths; i++) 86 - icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw)); 87 72 } 88 73 89 74 static void msm_mdss_irq(struct irq_desc *desc) ··· 227 236 228 237 static int msm_mdss_enable(struct msm_mdss *msm_mdss) 229 238 { 230 - int ret; 239 + int ret, i; 231 240 232 241 /* 233 242 * Several components have AXI clocks that can only be turned on if 234 243 * the interconnect is enabled (non-zero bandwidth). Let's make sure 235 244 * that the interconnects are at least at a minimum amount. 236 245 */ 237 - msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW); 246 + for (i = 0; i < msm_mdss->num_mdp_paths; i++) 247 + icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(MIN_IB_BW)); 248 + 249 + if (msm_mdss->mdss_data && msm_mdss->mdss_data->reg_bus_bw) 250 + icc_set_bw(msm_mdss->reg_bus_path, 0, 251 + msm_mdss->mdss_data->reg_bus_bw); 252 + else 253 + icc_set_bw(msm_mdss->reg_bus_path, 0, 254 + DEFAULT_REG_BW); 238 255 239 256 ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks); 240 257 if (ret) { ··· 294 295 295 296 static int msm_mdss_disable(struct msm_mdss *msm_mdss) 296 297 { 298 + int i; 299 + 297 300 clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks); 298 - msm_mdss_icc_request_bw(msm_mdss, 0); 301 + 302 + for (i = 0; i < msm_mdss->num_mdp_paths; i++) 303 + icc_set_bw(msm_mdss->mdp_path[i], 0, 0); 304 + 305 + if (msm_mdss->reg_bus_path) 306 + icc_set_bw(msm_mdss->reg_bus_path, 0, 0); 299 307 300 308 return 0; 301 309 } ··· 390 384 if (!msm_mdss) 391 385 return ERR_PTR(-ENOMEM); 392 386 387 + msm_mdss->mdss_data = of_device_get_match_data(&pdev->dev); 388 + 393 389 msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss"); 394 390 if (IS_ERR(msm_mdss->mmio)) 395 391 return ERR_CAST(msm_mdss->mmio); ··· 399 391 dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio); 400 392 401 393 ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss); 402 - if (ret) 403 - return ERR_PTR(ret); 404 - ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss); 405 394 if (ret) 406 395 return ERR_PTR(ret); 407 396 ··· 482 477 if (IS_ERR(mdss)) 483 478 return PTR_ERR(mdss); 484 479 485 - mdss->mdss_data = of_device_get_match_data(&pdev->dev); 486 - 487 480 platform_set_drvdata(pdev, mdss); 488 481 489 482 /* ··· 513 510 .ubwc_enc_version = UBWC_1_0, 514 511 .ubwc_dec_version = UBWC_1_0, 515 512 .highest_bank_bit = 2, 513 + .reg_bus_bw = 76800, 516 514 }; 517 515 518 516 static const struct msm_mdss_data qcm2290_data = { 519 517 /* no UBWC */ 520 518 .highest_bank_bit = 0x2, 519 + .reg_bus_bw = 76800, 521 520 }; 522 521 523 522 static const struct msm_mdss_data sc7180_data = { ··· 527 522 .ubwc_dec_version = UBWC_2_0, 528 523 .ubwc_static = 0x1e, 529 524 .highest_bank_bit = 0x3, 525 + .reg_bus_bw = 76800, 530 526 }; 531 527 532 528 static const struct msm_mdss_data sc7280_data = { ··· 537 531 .ubwc_static = 1, 538 532 .highest_bank_bit = 1, 539 533 .macrotile_mode = 1, 534 + .reg_bus_bw = 74000, 540 535 }; 541 536 542 537 static const struct msm_mdss_data sc8180x_data = { ··· 545 538 .ubwc_dec_version = UBWC_3_0, 546 539 .highest_bank_bit = 3, 547 540 .macrotile_mode = 1, 541 + .reg_bus_bw = 76800, 548 542 }; 549 543 550 544 static const struct msm_mdss_data sc8280xp_data = { ··· 553 545 .ubwc_dec_version = UBWC_4_0, 554 546 .ubwc_swizzle = 6, 555 547 .ubwc_static = 1, 556 - .highest_bank_bit = 2, 548 + .highest_bank_bit = 3, 557 549 .macrotile_mode = 1, 550 + .reg_bus_bw = 76800, 551 + }; 552 + 553 + static const struct msm_mdss_data sdm670_data = { 554 + .ubwc_enc_version = UBWC_2_0, 555 + .ubwc_dec_version = UBWC_2_0, 556 + .highest_bank_bit = 1, 558 557 }; 559 558 560 559 static const struct msm_mdss_data sdm845_data = { 561 560 .ubwc_enc_version = UBWC_2_0, 562 561 .ubwc_dec_version = UBWC_2_0, 563 562 .highest_bank_bit = 2, 563 + .reg_bus_bw = 76800, 564 564 }; 565 565 566 566 static const struct msm_mdss_data sm6350_data = { ··· 577 561 .ubwc_swizzle = 6, 578 562 .ubwc_static = 0x1e, 579 563 .highest_bank_bit = 1, 564 + .reg_bus_bw = 76800, 580 565 }; 581 566 582 567 static const struct msm_mdss_data sm8150_data = { 583 568 .ubwc_enc_version = UBWC_3_0, 584 569 .ubwc_dec_version = UBWC_3_0, 585 570 .highest_bank_bit = 2, 571 + .reg_bus_bw = 76800, 586 572 }; 587 573 588 574 static const struct msm_mdss_data sm6115_data = { ··· 593 575 .ubwc_swizzle = 7, 594 576 .ubwc_static = 0x11f, 595 577 .highest_bank_bit = 0x1, 578 + .reg_bus_bw = 76800, 596 579 }; 597 580 598 581 static const struct msm_mdss_data sm6125_data = { ··· 611 592 /* TODO: highest_bank_bit = 2 for LP_DDR4 */ 612 593 .highest_bank_bit = 3, 613 594 .macrotile_mode = 1, 595 + .reg_bus_bw = 76800, 596 + }; 597 + 598 + static const struct msm_mdss_data sm8350_data = { 599 + .ubwc_enc_version = UBWC_4_0, 600 + .ubwc_dec_version = UBWC_4_0, 601 + .ubwc_swizzle = 6, 602 + .ubwc_static = 1, 603 + /* TODO: highest_bank_bit = 2 for LP_DDR4 */ 604 + .highest_bank_bit = 3, 605 + .macrotile_mode = 1, 606 + .reg_bus_bw = 74000, 614 607 }; 615 608 616 609 static const struct msm_mdss_data sm8550_data = { ··· 633 602 /* TODO: highest_bank_bit = 2 for LP_DDR4 */ 634 603 .highest_bank_bit = 3, 635 604 .macrotile_mode = 1, 605 + .reg_bus_bw = 57000, 636 606 }; 637 607 static const struct of_device_id mdss_dt_match[] = { 638 608 { .compatible = "qcom,mdss" }, 639 609 { .compatible = "qcom,msm8998-mdss", .data = &msm8998_data }, 640 610 { .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data }, 611 + { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data }, 641 612 { .compatible = "qcom,sdm845-mdss", .data = &sdm845_data }, 642 613 { .compatible = "qcom,sc7180-mdss", .data = &sc7180_data }, 643 614 { .compatible = "qcom,sc7280-mdss", .data = &sc7280_data }, ··· 651 618 { .compatible = "qcom,sm6375-mdss", .data = &sm6350_data }, 652 619 { .compatible = "qcom,sm8150-mdss", .data = &sm8150_data }, 653 620 { .compatible = "qcom,sm8250-mdss", .data = &sm8250_data }, 654 - { .compatible = "qcom,sm8350-mdss", .data = &sm8250_data }, 655 - { .compatible = "qcom,sm8450-mdss", .data = &sm8250_data }, 621 + { .compatible = "qcom,sm8350-mdss", .data = &sm8350_data }, 622 + { .compatible = "qcom,sm8450-mdss", .data = &sm8350_data }, 656 623 { .compatible = "qcom,sm8550-mdss", .data = &sm8550_data }, 624 + { .compatible = "qcom,sm8650-mdss", .data = &sm8550_data}, 657 625 {} 658 626 }; 659 627 MODULE_DEVICE_TABLE(of, mdss_dt_match);
+1
drivers/gpu/drm/msm/msm_mdss.h
··· 14 14 u32 ubwc_static; 15 15 u32 highest_bank_bit; 16 16 u32 macrotile_mode; 17 + u32 reg_bus_bw; 17 18 }; 18 19 19 20 #define UBWC_1_0 0x10000000
+3
drivers/gpu/drm/msm/msm_rd.c
··· 270 270 struct msm_rd_state *rd; 271 271 int ret; 272 272 273 + if (!priv->gpu_pdev) 274 + return 0; 275 + 273 276 /* only create on first minor: */ 274 277 if (priv->rd) 275 278 return 0;
+2 -1
drivers/gpu/drm/msm/msm_ringbuffer.c
··· 29 29 struct drm_gem_object *obj = submit->bos[i].obj; 30 30 31 31 msm_gem_unpin_active(obj); 32 - submit->bos[i].flags &= ~BO_PINNED; 33 32 } 33 + 34 + submit->bos_pinned = false; 34 35 35 36 mutex_unlock(&priv->lru.lock); 36 37
+1 -1
drivers/gpu/drm/nouveau/nouveau_uvmm.c
··· 1347 1347 } 1348 1348 } 1349 1349 1350 - drm_exec_init(exec, vme->flags); 1350 + drm_exec_init(exec, vme->flags, 0); 1351 1351 drm_exec_until_all_locked(exec) { 1352 1352 ret = bind_lock_validate(job, exec, vme->num_fences); 1353 1353 drm_exec_retry_on_contention(exec);
+8 -8
drivers/gpu/drm/tests/drm_exec_test.c
··· 46 46 { 47 47 struct drm_exec exec; 48 48 49 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 49 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 50 50 drm_exec_fini(&exec); 51 51 KUNIT_SUCCEED(test); 52 52 } ··· 60 60 61 61 drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE); 62 62 63 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 63 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 64 64 drm_exec_until_all_locked(&exec) { 65 65 ret = drm_exec_lock_obj(&exec, &gobj); 66 66 drm_exec_retry_on_contention(&exec); ··· 80 80 81 81 drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE); 82 82 83 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 83 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 84 84 drm_exec_until_all_locked(&exec) { 85 85 ret = drm_exec_lock_obj(&exec, &gobj); 86 86 drm_exec_retry_on_contention(&exec); ··· 107 107 108 108 drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE); 109 109 110 - drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); 110 + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 111 111 drm_exec_until_all_locked(&exec) { 112 112 ret = drm_exec_lock_obj(&exec, &gobj); 113 113 drm_exec_retry_on_contention(&exec); ··· 134 134 135 135 drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE); 136 136 137 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 137 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 138 138 drm_exec_until_all_locked(&exec) { 139 139 ret = drm_exec_prepare_obj(&exec, &gobj, 1); 140 140 drm_exec_retry_on_contention(&exec); ··· 159 159 drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE); 160 160 drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE); 161 161 162 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 162 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 163 163 drm_exec_until_all_locked(&exec) 164 164 ret = drm_exec_prepare_array(&exec, array, ARRAY_SIZE(array), 165 165 1); ··· 174 174 { 175 175 struct drm_exec exec; 176 176 177 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 177 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 178 178 drm_exec_until_all_locked(&exec) 179 179 { 180 180 break; 181 181 } 182 182 drm_exec_fini(&exec); 183 183 184 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 184 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 185 185 drm_exec_until_all_locked(&exec) 186 186 { 187 187 break;
+1 -1
include/drm/drm_exec.h
··· 135 135 return !!exec->contended; 136 136 } 137 137 138 - void drm_exec_init(struct drm_exec *exec, uint32_t flags); 138 + void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr); 139 139 void drm_exec_fini(struct drm_exec *exec); 140 140 bool drm_exec_cleanup(struct drm_exec *exec); 141 141 int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+3
include/uapi/drm/msm_drm.h
··· 86 86 #define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */ 87 87 #define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */ 88 88 #define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */ 89 + #define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */ 89 90 90 91 /* For backwards compat. The original support for preemption was based on 91 92 * a single ring per priority level so # of priority levels equals the # ··· 140 139 #define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */ 141 140 #define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */ 142 141 #define MSM_INFO_GET_FLAGS 0x05 /* get the MSM_BO_x flags */ 142 + #define MSM_INFO_SET_METADATA 0x06 /* set userspace metadata */ 143 + #define MSM_INFO_GET_METADATA 0x07 /* get userspace metadata */ 143 144 144 145 struct drm_msm_gem_info { 145 146 __u32 handle; /* in */