Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-msm-next-2019-06-25' of https://gitlab.freedesktop.org/drm/msm into drm-next

+ usual progress on cleanups
+ dsi vs EPROBE_DEFER fixes
+ msm8998 (snapdragon 835 support)
+ a540 gpu support (mesa support already landed)
+ dsi, dsi-phy support
+ mdp5 and dpu interconnect (bus/memory scaling) support
+ initial prep work for per-context pagetables (at least the parts that
don't have external dependencies like iommu/arm-smmu)

There is one more patch for fixing DSI cmd mode panels (part of a set of
patches to get things working on nexus5), but it would be conflicty with
1cff7440a86e04a613665803b42034 in drm-next without rebasing or back-merge,
and since it doesn't conflict with anything in msm-next, I think it best
if Sean merges that through drm-mix-fixes instead.

(In other news, I've been making some progress w/ getting efifb working
properly on sdm850 laptop without horrible hacks, and drm/msm + clk stuff
not totally falling over when bootloader enables display and things are
already running when driver probes.. but not quite ready yet, hopefully
we can post some of that for 5.4.. should help for both the sdm835 and
sdm850 laptops.)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGsj3N4XzDLSDoa+4RHZ9wXObYmhcep0M3LjnRg48BeLvg@mail.gmail.com

+717 -540
+10
Documentation/devicetree/bindings/display/msm/dpu.txt
··· 28 28 - #address-cells: number of address cells for the MDSS children. Should be 1. 29 29 - #size-cells: Should be 1. 30 30 - ranges: parent bus address space is the same as the child bus address space. 31 + - interconnects : interconnect path specifier for MDSS according to 32 + Documentation/devicetree/bindings/interconnect/interconnect.txt. Should be 33 + 2 paths corresponding to 2 AXI ports. 34 + - interconnect-names : MDSS will have 2 port names to differentiate between the 35 + 2 interconnect paths defined with interconnect specifier. 31 36 32 37 Optional properties: 33 38 - assigned-clocks: list of clock specifiers for clocks needing rate assignment ··· 90 85 interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; 91 86 interrupt-controller; 92 87 #interrupt-cells = <1>; 88 + 89 + interconnects = <&rsc_hlos MASTER_MDP0 &rsc_hlos SLAVE_EBI1>, 90 + <&rsc_hlos MASTER_MDP1 &rsc_hlos SLAVE_EBI1>; 91 + 92 + interconnect-names = "mdp0-mem", "mdp1-mem"; 93 93 94 94 iommus = <&apps_iommu 0>; 95 95
+1
Documentation/devicetree/bindings/display/msm/dsi.txt
··· 88 88 * "qcom,dsi-phy-28nm-8960" 89 89 * "qcom,dsi-phy-14nm" 90 90 * "qcom,dsi-phy-10nm" 91 + * "qcom,dsi-phy-10nm-8998" 91 92 - reg: Physical base address and length of the registers of PLL, PHY. Some 92 93 revisions require the PHY regulator base address, whereas others require the 93 94 PHY lane base address. See below for each PHY revision.
+11 -13
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
··· 395 395 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e, 396 396 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8, 397 397 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7, 398 - 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356, 399 - 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d, 400 - 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472, 401 - 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef, 402 - 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511, 403 - 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed, 404 - 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a, 405 - 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce, 406 - 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec, 407 - 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749, 408 - 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d, 409 - 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036, 410 - 0x303c, 0x303c, 0x305e, 0x305f, 398 + 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2440, 0x2440, 0x2444, 0x2444, 399 + 0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 400 + 0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 401 + 0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 402 + 0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 403 + 0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 404 + 0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 405 + 0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 406 + 0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 407 + 0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d, 408 + 0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f, 411 409 ~0 /* sentinel */ 412 410 }; 413 411
+14 -12
drivers/gpu/drm/msm/adreno/a5xx.xml.h
··· 8 8 git clone https://github.com/freedreno/envytools.git 9 9 10 10 The rules-ng-ng source files this header was generated from are: 11 - - /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13) 12 - - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) 13 - - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03) 14 - - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54) 15 - - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54) 16 - - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) 17 - - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) 18 - - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54) 19 - - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54) 20 - - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) 21 - - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) 11 + - /home/ubuntu/envytools/envytools/rnndb/./adreno.xml ( 501 bytes, from 2019-05-29 01:28:15) 12 + - /home/ubuntu/envytools/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2019-05-29 01:28:15) 13 + - /home/ubuntu/envytools/envytools/rnndb/adreno/a2xx.xml ( 79608 bytes, from 2019-05-29 01:28:15) 14 + - /home/ubuntu/envytools/envytools/rnndb/adreno/adreno_common.xml ( 14239 bytes, from 2019-05-29 01:28:15) 15 + - /home/ubuntu/envytools/envytools/rnndb/adreno/adreno_pm4.xml ( 43155 bytes, from 2019-05-29 01:28:15) 16 + - /home/ubuntu/envytools/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2019-05-29 01:28:15) 17 + - /home/ubuntu/envytools/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2019-05-29 01:28:15) 18 + - /home/ubuntu/envytools/envytools/rnndb/adreno/a5xx.xml ( 147291 bytes, from 2019-05-29 14:51:41) 19 + - /home/ubuntu/envytools/envytools/rnndb/adreno/a6xx.xml ( 148461 bytes, from 2019-05-29 01:28:15) 20 + - /home/ubuntu/envytools/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2019-05-29 01:28:15) 21 + - /home/ubuntu/envytools/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2019-05-29 01:28:15) 22 22 23 - Copyright (C) 2013-2018 by the following authors: 23 + Copyright (C) 2013-2019 by the following authors: 24 24 - Rob Clark <robdclark@gmail.com> (robclark) 25 25 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 26 26 ··· 2147 2147 #define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00 2148 2148 2149 2149 #define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01 2150 + 2151 + #define REG_A5XX_HLSQ_DBG_ECO_CNTL 0x00000e04 2150 2152 2151 2153 #define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05 2152 2154
+2 -6
drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
··· 149 149 int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) 150 150 { 151 151 struct drm_device *dev; 152 - struct dentry *ent; 153 152 int ret; 154 153 155 154 if (!minor) ··· 165 166 return ret; 166 167 } 167 168 168 - ent = debugfs_create_file("reset", S_IWUGO, 169 - minor->debugfs_root, 170 - dev, &reset_fops); 171 - if (!ent) 172 - return -ENOMEM; 169 + debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev, 170 + &reset_fops); 173 171 174 172 return 0; 175 173 }
+39 -1
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 309 309 310 310 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) 311 311 { 312 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 312 313 unsigned int i; 313 314 314 315 for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++) 315 316 gpu_write(gpu, a5xx_hwcg[i].offset, 316 317 state ? a5xx_hwcg[i].value : 0); 318 + 319 + if (adreno_is_a540(adreno_gpu)) { 320 + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0); 321 + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0); 322 + } 317 323 318 324 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0); 319 325 gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180); ··· 504 498 505 499 gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); 506 500 501 + if (adreno_is_a540(adreno_gpu)) 502 + gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); 503 + 507 504 /* Make all blocks contribute to the GPU BUSY perf counter */ 508 505 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF); 509 506 ··· 567 558 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); 568 559 569 560 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); 570 - gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); 561 + if (adreno_is_a530(adreno_gpu)) 562 + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); 563 + if (adreno_is_a540(adreno_gpu)) 564 + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); 571 565 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); 572 566 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); 573 567 ··· 595 583 /* Set the highest bank bit */ 596 584 gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7); 597 585 gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1); 586 + if (adreno_is_a540(adreno_gpu)) 587 + gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, 2); 598 588 599 589 /* Protect registers from the CP */ 600 590 gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007); ··· 646 632 gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 647 633 REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000); 648 634 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); 635 + 636 + /* Put the GPU into 64 bit by default */ 637 + gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1); 638 + gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1); 639 + gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1); 640 + gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1); 641 + gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1); 642 + gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1); 643 + gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1); 644 + gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1); 645 + gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1); 646 + gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1); 647 + gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1); 648 + gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); 649 + 650 + /* 651 + * VPC corner case with local memory load kill leads to corrupt 652 + * internal state. Normal Disable does not work for all a5x chips. 653 + * So do the following setting to disable it. 654 + */ 655 + if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) { 656 + gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23)); 657 + gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0); 658 + } 649 659 650 660 ret = adreno_hw_init(gpu); 651 661 if (ret)
+73 -3
drivers/gpu/drm/msm/adreno/a5xx_power.c
··· 23 23 #define AGC_POWER_CONFIG_PRODUCTION_ID 1 24 24 #define AGC_INIT_MSG_VALUE 0xBABEFACE 25 25 26 + /* AGC_LM_CONFIG (A540+) */ 27 + #define AGC_LM_CONFIG (136/4) 28 + #define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17 29 + #define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1 30 + #define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8) 31 + #define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4) 32 + #define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4) 33 + #define AGC_LM_CONFIG_LLM_ENABLED (1 << 16) 34 + #define AGC_LM_CONFIG_BCL_DISABLED (1 << 24) 35 + 36 + #define AGC_LEVEL_CONFIG (140/4) 37 + 26 38 static struct { 27 39 uint32_t reg; 28 40 uint32_t value; ··· 119 107 } 120 108 121 109 /* Setup thermal limit management */ 122 - static void a5xx_lm_setup(struct msm_gpu *gpu) 110 + static void a530_lm_setup(struct msm_gpu *gpu) 123 111 { 124 112 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 125 113 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); ··· 168 156 gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE); 169 157 } 170 158 159 + #define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32)) 160 + #define LM_DCVS_LIMIT 1 161 + #define LEVEL_CONFIG ~(0x303) 162 + 163 + static void a540_lm_setup(struct msm_gpu *gpu) 164 + { 165 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 166 + u32 config; 167 + 168 + /* The battery current limiter isn't enabled for A540 */ 169 + config = AGC_LM_CONFIG_BCL_DISABLED; 170 + config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT; 171 + 172 + /* For now disable GPMU side throttling */ 173 + config |= AGC_LM_CONFIG_THROTTLE_DISABLE; 174 + 175 + /* Until we get clock scaling 0 is always the active power level */ 176 + gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0); 177 + 178 + /* Fixed at 6000 for now */ 179 + gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000); 180 + 181 + gpu_write(gpu, AGC_MSG_STATE, 0x80000001); 182 + gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID); 183 + 184 + gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448); 185 + gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1); 186 + 187 + gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate)); 188 + gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000); 189 + 190 + gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config); 191 + gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG); 192 + gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 193 + PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1)); 194 + 195 + gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE); 196 + } 197 + 171 198 /* Enable SP/TP cpower collapse */ 172 199 static void a5xx_pc_init(struct msm_gpu *gpu) 173 200 { ··· 248 197 return -EINVAL; 249 198 } 250 199 251 - gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014); 200 + if (adreno_is_a530(adreno_gpu)) 201 + gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014); 252 202 253 203 /* Kick off the GPMU */ 254 204 gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0); ··· 263 211 DRM_ERROR("%s: GPMU firmware initialization timed out\n", 264 212 gpu->name); 265 213 214 + if (!adreno_is_a530(adreno_gpu)) { 215 + u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1); 216 + 217 + if (val) 218 + DRM_ERROR("%s: GPMU firmware initialization failed: %d\n", 219 + gpu->name, val); 220 + } 221 + 266 222 return 0; 267 223 } 268 224 269 225 /* Enable limits management */ 270 226 static void a5xx_lm_enable(struct msm_gpu *gpu) 271 227 { 228 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 229 + 230 + /* This init sequence only applies to A530 */ 231 + if (!adreno_is_a530(adreno_gpu)) 232 + return; 233 + 272 234 gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0); 273 235 gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A); 274 236 gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01); ··· 294 228 295 229 int a5xx_power_init(struct msm_gpu *gpu) 296 230 { 231 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 297 232 int ret; 298 233 299 234 /* Set up the limits management */ 300 - a5xx_lm_setup(gpu); 235 + if (adreno_is_a530(adreno_gpu)) 236 + a530_lm_setup(gpu); 237 + else 238 + a540_lm_setup(gpu); 301 239 302 240 /* Set up SP/TP power collpase */ 303 241 a5xx_pc_init(gpu);
+46 -24
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 74 74 u32 val; 75 75 76 76 /* This can be called from gpu state code so make sure GMU is valid */ 77 - if (IS_ERR_OR_NULL(gmu->mmio)) 77 + if (!gmu->initialized) 78 78 return false; 79 79 80 80 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); ··· 90 90 u32 val; 91 91 92 92 /* This can be called from gpu state code so make sure GMU is valid */ 93 - if (IS_ERR_OR_NULL(gmu->mmio)) 93 + if (!gmu->initialized) 94 94 return false; 95 95 96 96 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); ··· 504 504 wmb(); 505 505 506 506 err: 507 - devm_iounmap(gmu->dev, pdcptr); 508 - devm_iounmap(gmu->dev, seqptr); 507 + if (!IS_ERR_OR_NULL(pdcptr)) 508 + iounmap(pdcptr); 509 + if (!IS_ERR_OR_NULL(seqptr)) 510 + iounmap(seqptr); 509 511 } 510 512 511 513 /* ··· 697 695 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 698 696 int status, ret; 699 697 700 - if (WARN(!gmu->mmio, "The GMU is not set up yet\n")) 698 + if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) 701 699 return 0; 702 700 703 701 gmu->hung = false; ··· 767 765 { 768 766 u32 reg; 769 767 770 - if (!gmu->mmio) 768 + if (!gmu->initialized) 771 769 return true; 772 770 773 771 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); ··· 1197 1195 return ERR_PTR(-EINVAL); 1198 1196 } 1199 1197 1200 - ret = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1198 + ret = ioremap(res->start, resource_size(res)); 1201 1199 if (!ret) { 1202 1200 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); 1203 1201 return ERR_PTR(-EINVAL); ··· 1213 1211 1214 1212 irq = platform_get_irq_byname(pdev, name); 1215 1213 1216 - ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH, 1217 - name, gmu); 1214 + ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); 1218 1215 if (ret) { 1219 - DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name); 1216 + DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", 1217 + name, ret); 1220 1218 return ret; 1221 1219 } 1222 1220 ··· 1229 1227 { 1230 1228 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1231 1229 1232 - if (IS_ERR_OR_NULL(gmu->mmio)) 1230 + if (!gmu->initialized) 1233 1231 return; 1234 1232 1235 - a6xx_gmu_stop(a6xx_gpu); 1236 - 1237 - pm_runtime_disable(gmu->dev); 1233 + pm_runtime_force_suspend(gmu->dev); 1238 1234 1239 1235 if (!IS_ERR_OR_NULL(gmu->gxpd)) { 1240 1236 pm_runtime_disable(gmu->gxpd); 1241 1237 dev_pm_domain_detach(gmu->gxpd, false); 1242 1238 } 1243 1239 1244 - a6xx_gmu_irq_disable(gmu); 1240 + iounmap(gmu->mmio); 1241 + gmu->mmio = NULL; 1242 + 1245 1243 a6xx_gmu_memory_free(gmu, gmu->hfi); 1246 1244 1247 1245 iommu_detach_device(gmu->domain, gmu->dev); 1248 1246 1249 1247 iommu_domain_free(gmu->domain); 1248 + 1249 + free_irq(gmu->gmu_irq, gmu); 1250 + free_irq(gmu->hfi_irq, gmu); 1251 + 1252 + /* Drop reference taken in of_find_device_by_node */ 1253 + put_device(gmu->dev); 1254 + 1255 + gmu->initialized = false; 1250 1256 } 1251 1257 1252 - int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) 1258 + int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) 1253 1259 { 1254 1260 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1255 1261 struct platform_device *pdev = of_find_device_by_node(node); ··· 1278 1268 /* Get the list of clocks */ 1279 1269 ret = a6xx_gmu_clocks_probe(gmu); 1280 1270 if (ret) 1281 - return ret; 1271 + goto err_put_device; 1282 1272 1283 1273 /* Set up the IOMMU context bank */ 1284 1274 ret = a6xx_gmu_memory_probe(gmu); 1285 1275 if (ret) 1286 - return ret; 1276 + goto err_put_device; 1287 1277 1288 1278 /* Allocate memory for for the HFI queues */ 1289 1279 gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); 1290 1280 if (IS_ERR(gmu->hfi)) 1291 - goto err; 1281 + goto err_memory; 1292 1282 1293 1283 /* Allocate memory for the GMU debug region */ 1294 1284 gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K); 1295 1285 if (IS_ERR(gmu->debug)) 1296 - goto err; 1286 + goto err_memory; 1297 1287 1298 1288 /* Map the GMU registers */ 1299 1289 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); 1300 1290 if (IS_ERR(gmu->mmio)) 1301 - goto err; 1291 + goto err_memory; 1302 1292 1303 1293 /* Get the HFI and GMU interrupts */ 1304 1294 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); 1305 1295 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); 1306 1296 1307 1297 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) 1308 - goto err; 1298 + goto err_mmio; 1309 1299 1310 1300 /* 1311 1301 * Get a link to the GX power domain to reset the GPU in case of GMU ··· 1319 1309 /* Set up the HFI queues */ 1320 1310 a6xx_hfi_init(gmu); 1321 1311 1312 + gmu->initialized = true; 1313 + 1322 1314 return 0; 1323 - err: 1315 + 1316 + err_mmio: 1317 + iounmap(gmu->mmio); 1318 + free_irq(gmu->gmu_irq, gmu); 1319 + free_irq(gmu->hfi_irq, gmu); 1320 + err_memory: 1324 1321 a6xx_gmu_memory_free(gmu, gmu->hfi); 1325 1322 1326 1323 if (gmu->domain) { ··· 1335 1318 1336 1319 iommu_domain_free(gmu->domain); 1337 1320 } 1321 + ret = -ENODEV; 1338 1322 1339 - return -ENODEV; 1323 + err_put_device: 1324 + /* Drop reference taken in of_find_device_by_node */ 1325 + put_device(gmu->dev); 1326 + 1327 + return ret; 1340 1328 }
+1
drivers/gpu/drm/msm/adreno/a6xx_gmu.h
··· 75 75 76 76 struct a6xx_hfi_queue queues[2]; 77 77 78 + bool initialized; 78 79 bool hung; 79 80 }; 80 81
+15 -1
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 391 391 REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000); 392 392 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); 393 393 394 + /* Turn on 64 bit addressing for all blocks */ 395 + gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1); 396 + gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1); 397 + gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1); 398 + gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1); 399 + gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1); 400 + gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1); 401 + gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1); 402 + gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1); 403 + gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1); 404 + gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1); 405 + gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1); 406 + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); 407 + 394 408 /* enable hardware clockgating */ 395 409 a6xx_set_hwcg(gpu, true); 396 410 ··· 868 854 /* FIXME: How do we gracefully handle this? */ 869 855 BUG_ON(!node); 870 856 871 - ret = a6xx_gmu_probe(a6xx_gpu, node); 857 + ret = a6xx_gmu_init(a6xx_gpu, node); 872 858 if (ret) { 873 859 a6xx_destroy(&(a6xx_gpu->base.base)); 874 860 return ERR_PTR(ret);
+1 -1
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
··· 53 53 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); 54 54 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); 55 55 56 - int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node); 56 + int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); 57 57 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu); 58 58 59 59 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
+19 -1
drivers/gpu/drm/msm/adreno/adreno_device.c
··· 145 145 .init = a5xx_gpu_init, 146 146 .zapfw = "a530_zap.mdt", 147 147 }, { 148 + .rev = ADRENO_REV(5, 4, 0, 2), 149 + .revn = 540, 150 + .name = "A540", 151 + .fw = { 152 + [ADRENO_FW_PM4] = "a530_pm4.fw", 153 + [ADRENO_FW_PFP] = "a530_pfp.fw", 154 + [ADRENO_FW_GPMU] = "a540_gpmu.fw2", 155 + }, 156 + .gmem = SZ_1M, 157 + /* 158 + * Increase inactive period to 250 to avoid bouncing 159 + * the GDSC which appears to make it grumpy 160 + */ 161 + .inactive_period = 250, 162 + .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, 163 + .init = a5xx_gpu_init, 164 + .zapfw = "a540_zap.mdt", 165 + }, { 148 166 .rev = ADRENO_REV(6, 3, 0, ANY_ID), 149 167 .revn = 630, 150 168 .name = "A630", ··· 369 351 { 370 352 struct msm_gpu *gpu = dev_get_drvdata(dev); 371 353 372 - gpu->funcs->pm_suspend(gpu); 354 + pm_runtime_force_suspend(dev); 373 355 gpu->funcs->destroy(gpu); 374 356 375 357 set_gpu_pdev(dev_get_drvdata(master), NULL);
+7 -1
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 67 67 return ret; 68 68 69 69 mem_phys = r.start; 70 - mem_size = resource_size(&r); 71 70 72 71 /* Request the MDT file for the firmware */ 73 72 fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); ··· 79 80 mem_size = qcom_mdt_get_size(fw); 80 81 if (mem_size < 0) { 81 82 ret = mem_size; 83 + goto out; 84 + } 85 + 86 + if (mem_size > resource_size(&r)) { 87 + DRM_DEV_ERROR(dev, 88 + "memory region is too small to load the MDT\n"); 89 + ret = -E2BIG; 82 90 goto out; 83 91 } 84 92
+6
drivers/gpu/drm/msm/adreno/adreno_gpu.h
··· 61 61 enum adreno_quirks { 62 62 ADRENO_QUIRK_TWO_PASS_USE_WFI = 1, 63 63 ADRENO_QUIRK_FAULT_DETECT_MASK = 2, 64 + ADRENO_QUIRK_LMLOADKILL_DISABLE = 3, 64 65 }; 65 66 66 67 struct adreno_rev { ··· 220 219 static inline int adreno_is_a530(struct adreno_gpu *gpu) 221 220 { 222 221 return gpu->revn == 530; 222 + } 223 + 224 + static inline int adreno_is_a540(struct adreno_gpu *gpu) 225 + { 226 + return gpu->revn == 540; 223 227 } 224 228 225 229 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
+66 -106
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
··· 69 69 struct dpu_core_perf_params *perf) 70 70 { 71 71 struct dpu_crtc_state *dpu_cstate; 72 - int i; 73 72 74 73 if (!kms || !kms->catalog || !crtc || !state || !perf) { 75 74 DPU_ERROR("invalid parameters\n"); ··· 79 80 memset(perf, 0, sizeof(struct dpu_core_perf_params)); 80 81 81 82 if (!dpu_cstate->bw_control) { 82 - for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) { 83 - perf->bw_ctl[i] = kms->catalog->perf.max_bw_high * 83 + perf->bw_ctl = kms->catalog->perf.max_bw_high * 84 84 1000ULL; 85 - perf->max_per_pipe_ib[i] = perf->bw_ctl[i]; 86 - } 85 + perf->max_per_pipe_ib = perf->bw_ctl; 87 86 perf->core_clk_rate = kms->perf.max_core_clk_rate; 88 87 } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { 89 - for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) { 90 - perf->bw_ctl[i] = 0; 91 - perf->max_per_pipe_ib[i] = 0; 92 - } 88 + perf->bw_ctl = 0; 89 + perf->max_per_pipe_ib = 0; 93 90 perf->core_clk_rate = 0; 94 91 } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) { 95 - for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) { 96 - perf->bw_ctl[i] = kms->perf.fix_core_ab_vote; 97 - perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote; 98 - } 92 + perf->bw_ctl = kms->perf.fix_core_ab_vote; 93 + perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote; 99 94 perf->core_clk_rate = kms->perf.fix_core_clk_rate; 100 95 } 101 96 102 97 DPU_DEBUG( 103 - "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n", 98 + "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n", 104 99 crtc->base.id, perf->core_clk_rate, 105 - perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MNOC], 106 - perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC], 107 - perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_LLCC], 108 - perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC], 109 - perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_EBI], 110 - perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI]); 100 + perf->max_per_pipe_ib, perf->bw_ctl); 111 101 } 112 102 113 103 int dpu_core_perf_crtc_check(struct drm_crtc *crtc, ··· 109 121 struct dpu_crtc_state *dpu_cstate; 110 122 struct drm_crtc *tmp_crtc; 111 123 struct dpu_kms *kms; 112 - int i; 113 124 114 125 if (!crtc || !state) { 115 126 DPU_ERROR("invalid crtc\n"); ··· 130 143 /* obtain new values */ 131 144 _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf); 132 145 133 - for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC; 134 - i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) { 135 - bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i]; 136 - curr_client_type = dpu_crtc_get_client_type(crtc); 146 + bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl; 147 + curr_client_type = dpu_crtc_get_client_type(crtc); 137 148 138 - drm_for_each_crtc(tmp_crtc, crtc->dev) { 139 - if (tmp_crtc->enabled && 140 - (dpu_crtc_get_client_type(tmp_crtc) == 141 - curr_client_type) && 142 - (tmp_crtc != crtc)) { 143 - struct dpu_crtc_state *tmp_cstate = 144 - to_dpu_crtc_state(tmp_crtc->state); 149 + drm_for_each_crtc(tmp_crtc, crtc->dev) { 150 + if (tmp_crtc->enabled && 151 + (dpu_crtc_get_client_type(tmp_crtc) == 152 + curr_client_type) && (tmp_crtc != crtc)) { 153 + struct dpu_crtc_state *tmp_cstate = 154 + to_dpu_crtc_state(tmp_crtc->state); 145 155 146 - DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n", 147 - tmp_crtc->base.id, 148 - tmp_cstate->new_perf.bw_ctl[i], 149 - tmp_cstate->bw_control); 150 - /* 151 - * For bw check only use the bw if the 152 - * atomic property has been already set 153 - */ 154 - if (tmp_cstate->bw_control) 155 - bw_sum_of_intfs += 156 - tmp_cstate->new_perf.bw_ctl[i]; 157 - } 156 + DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n", 157 + tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl, 158 + tmp_cstate->bw_control); 159 + /* 160 + * For bw check only use the bw if the 161 + * atomic property has been already set 162 + */ 163 + if (tmp_cstate->bw_control) 164 + bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl; 158 165 } 159 166 160 167 /* convert bandwidth to kb */ ··· 179 198 } 180 199 181 200 static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, 182 - struct drm_crtc *crtc, u32 bus_id) 201 + struct drm_crtc *crtc) 183 202 { 184 - struct dpu_core_perf_params perf = { { 0 } }; 203 + struct dpu_core_perf_params perf = { 0 }; 185 204 enum dpu_crtc_client_type curr_client_type 186 205 = dpu_crtc_get_client_type(crtc); 187 206 struct drm_crtc *tmp_crtc; ··· 194 213 dpu_crtc_get_client_type(tmp_crtc)) { 195 214 dpu_cstate = to_dpu_crtc_state(tmp_crtc->state); 196 215 197 - perf.max_per_pipe_ib[bus_id] = 198 - max(perf.max_per_pipe_ib[bus_id], 199 - dpu_cstate->new_perf.max_per_pipe_ib[bus_id]); 216 + perf.max_per_pipe_ib = max(perf.max_per_pipe_ib, 217 + dpu_cstate->new_perf.max_per_pipe_ib); 200 218 201 - DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n", 202 - tmp_crtc->base.id, bus_id, 203 - dpu_cstate->new_perf.bw_ctl[bus_id]); 219 + DPU_DEBUG("crtc=%d bw=%llu\n", tmp_crtc->base.id, 220 + dpu_cstate->new_perf.bw_ctl); 204 221 } 205 222 } 206 223 return ret; ··· 218 239 struct dpu_crtc *dpu_crtc; 219 240 struct dpu_crtc_state *dpu_cstate; 220 241 struct dpu_kms *kms; 221 - int i; 222 242 223 243 if (!crtc) { 224 244 DPU_ERROR("invalid crtc\n"); ··· 253 275 if (kms->perf.enable_bw_release) { 254 276 trace_dpu_cmd_release_bw(crtc->base.id); 255 277 DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id); 256 - for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) { 257 - dpu_crtc->cur_perf.bw_ctl[i] = 0; 258 - _dpu_core_perf_crtc_update_bus(kms, crtc, i); 259 - } 278 + dpu_crtc->cur_perf.bw_ctl = 0; 279 + _dpu_core_perf_crtc_update_bus(kms, crtc); 260 280 } 261 281 } 262 282 ··· 297 321 int params_changed, bool stop_req) 298 322 { 299 323 struct dpu_core_perf_params *new, *old; 300 - int update_bus = 0, update_clk = 0; 324 + bool update_bus = false, update_clk = false; 301 325 u64 clk_rate = 0; 302 326 struct dpu_crtc *dpu_crtc; 303 327 struct dpu_crtc_state *dpu_cstate; 304 - int i; 305 328 struct msm_drm_private *priv; 306 329 struct dpu_kms *kms; 307 330 int ret; ··· 327 352 new = &dpu_cstate->new_perf; 328 353 329 354 if (crtc->enabled && !stop_req) { 330 - for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) { 331 - /* 332 - * cases for bus bandwidth update. 333 - * 1. new bandwidth vote - "ab or ib vote" is higher 334 - * than current vote for update request. 335 - * 2. new bandwidth vote - "ab or ib vote" is lower 336 - * than current vote at end of commit or stop. 337 - */ 338 - if ((params_changed && ((new->bw_ctl[i] > 339 - old->bw_ctl[i]) || 340 - (new->max_per_pipe_ib[i] > 341 - old->max_per_pipe_ib[i]))) || 342 - (!params_changed && ((new->bw_ctl[i] < 343 - old->bw_ctl[i]) || 344 - (new->max_per_pipe_ib[i] < 345 - old->max_per_pipe_ib[i])))) { 346 - DPU_DEBUG( 347 - "crtc=%d p=%d new_bw=%llu,old_bw=%llu\n", 348 - crtc->base.id, params_changed, 349 - new->bw_ctl[i], old->bw_ctl[i]); 350 - old->bw_ctl[i] = new->bw_ctl[i]; 351 - old->max_per_pipe_ib[i] = 352 - new->max_per_pipe_ib[i]; 353 - update_bus |= BIT(i); 354 - } 355 + /* 356 + * cases for bus bandwidth update. 357 + * 1. new bandwidth vote - "ab or ib vote" is higher 358 + * than current vote for update request. 359 + * 2. new bandwidth vote - "ab or ib vote" is lower 360 + * than current vote at end of commit or stop. 361 + */ 362 + if ((params_changed && ((new->bw_ctl > old->bw_ctl) || 363 + (new->max_per_pipe_ib > old->max_per_pipe_ib))) || 364 + (!params_changed && ((new->bw_ctl < old->bw_ctl) || 365 + (new->max_per_pipe_ib < old->max_per_pipe_ib)))) { 366 + DPU_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n", 367 + crtc->base.id, params_changed, 368 + new->bw_ctl, old->bw_ctl); 369 + old->bw_ctl = new->bw_ctl; 370 + old->max_per_pipe_ib = new->max_per_pipe_ib; 371 + update_bus = true; 355 372 } 356 373 357 374 if ((params_changed && 358 - (new->core_clk_rate > old->core_clk_rate)) || 359 - (!params_changed && 360 - (new->core_clk_rate < old->core_clk_rate))) { 375 + (new->core_clk_rate > old->core_clk_rate)) || 376 + (!params_changed && 377 + (new->core_clk_rate < old->core_clk_rate))) { 361 378 old->core_clk_rate = new->core_clk_rate; 362 - update_clk = 1; 379 + update_clk = true; 363 380 } 364 381 } else { 365 382 DPU_DEBUG("crtc=%d disable\n", crtc->base.id); 366 383 memset(old, 0, sizeof(*old)); 367 384 memset(new, 0, sizeof(*new)); 368 - update_bus = ~0; 369 - update_clk = 1; 385 + update_bus = true; 386 + update_clk = true; 370 387 } 371 - trace_dpu_perf_crtc_update(crtc->base.id, 372 - new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC], 373 - new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC], 374 - new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI], 375 - new->core_clk_rate, stop_req, 376 - update_bus, update_clk); 377 388 378 - for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) { 379 - if (update_bus & BIT(i)) { 380 - ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i); 381 - if (ret) { 382 - DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n", 383 - crtc->base.id, i); 384 - return ret; 385 - } 389 + trace_dpu_perf_crtc_update(crtc->base.id, new->bw_ctl, 390 + new->core_clk_rate, stop_req, update_bus, update_clk); 391 + 392 + if (update_bus) { 393 + ret = _dpu_core_perf_crtc_update_bus(kms, crtc); 394 + if (ret) { 395 + DPU_ERROR("crtc-%d: failed to update bus bw vote\n", 396 + crtc->base.id); 397 + return ret; 386 398 } 387 399 } 388 400 ··· 460 498 struct dentry *entry; 461 499 462 500 entry = debugfs_create_dir("core_perf", parent); 463 - if (IS_ERR_OR_NULL(entry)) 464 - return -EINVAL; 465 501 466 502 debugfs_create_u64("max_core_clk_rate", 0600, entry, 467 503 &perf->max_core_clk_rate);
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
··· 34 34 * @core_clk_rate: core clock rate request 35 35 */ 36 36 struct dpu_core_perf_params { 37 - u64 max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MAX]; 38 - u64 bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MAX]; 37 + u64 max_per_pipe_ib; 38 + u64 bw_ctl; 39 39 u64 core_clk_rate; 40 40 }; 41 41
+3 -11
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
··· 1233 1233 { 1234 1234 struct drm_crtc *crtc = (struct drm_crtc *) s->private; 1235 1235 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1236 - int i; 1237 1236 1238 1237 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); 1239 1238 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); 1240 1239 seq_printf(s, "core_clk_rate: %llu\n", 1241 1240 dpu_crtc->cur_perf.core_clk_rate); 1242 - for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC; 1243 - i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) { 1244 - seq_printf(s, "bw_ctl[%d]: %llu\n", i, 1245 - dpu_crtc->cur_perf.bw_ctl[i]); 1246 - seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i, 1247 - dpu_crtc->cur_perf.max_per_pipe_ib[i]); 1248 - } 1241 + seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl); 1242 + seq_printf(s, "max_per_pipe_ib: %llu\n", 1243 + dpu_crtc->cur_perf.max_per_pipe_ib); 1249 1244 1250 1245 return 0; 1251 1246 } ··· 1259 1264 1260 1265 dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name, 1261 1266 crtc->dev->primary->debugfs_root); 1262 - if (!dpu_crtc->debugfs_root) 1263 - return -ENOMEM; 1264 1267 1265 - /* don't error check these */ 1266 1268 debugfs_create_file("status", 0400, 1267 1269 dpu_crtc->debugfs_root, 1268 1270 dpu_crtc, &debugfs_status_fops);
-5
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 622 622 } 623 623 } 624 624 625 - if (!ret) 626 - drm_mode_set_crtcinfo(adj_mode, 0); 627 - 628 625 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags, 629 626 adj_mode->private_flags); 630 627 ··· 1982 1985 /* create overall sub-directory for the encoder */ 1983 1986 dpu_enc->debugfs_root = debugfs_create_dir(name, 1984 1987 drm_enc->dev->primary->debugfs_root); 1985 - if (!dpu_enc->debugfs_root) 1986 - return -ENOMEM; 1987 1988 1988 1989 /* don't error check these */ 1989 1990 debugfs_create_file("status", 0600,
-110
drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
··· 471 471 }; 472 472 473 473 /* 474 - * A5x tile formats tables: 475 - * These tables hold the A5x tile formats supported. 476 - */ 477 - static const struct dpu_format dpu_format_map_tile[] = { 478 - INTERLEAVED_RGB_FMT_TILED(BGR565, 479 - 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, 480 - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, 481 - false, 2, 0, 482 - DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED), 483 - 484 - INTERLEAVED_RGB_FMT_TILED(ARGB8888, 485 - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 486 - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, 487 - true, 4, 0, 488 - DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED), 489 - 490 - INTERLEAVED_RGB_FMT_TILED(ABGR8888, 491 - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 492 - C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, 493 - true, 4, 0, 494 - DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED), 495 - 496 - INTERLEAVED_RGB_FMT_TILED(XBGR8888, 497 - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 498 - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, 499 - false, 4, 0, 500 - DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED), 501 - 502 - INTERLEAVED_RGB_FMT_TILED(RGBA8888, 503 - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 504 - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, 505 - true, 4, 0, 506 - DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED), 507 - 508 - INTERLEAVED_RGB_FMT_TILED(BGRA8888, 509 - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 510 - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, 511 - true, 4, 0, 512 - DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED), 513 - 514 - INTERLEAVED_RGB_FMT_TILED(BGRX8888, 515 - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 516 - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, 517 - false, 4, 0, 518 - DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED), 519 - 520 - INTERLEAVED_RGB_FMT_TILED(XRGB8888, 521 - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 522 - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, 523 - false, 4, 0, 524 - DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED), 525 - 526 - INTERLEAVED_RGB_FMT_TILED(RGBX8888, 527 - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 528 - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, 529 - false, 4, 0, 530 - DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED), 531 - 532 - INTERLEAVED_RGB_FMT_TILED(ABGR2101010, 533 - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 534 - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, 535 - true, 4, DPU_FORMAT_FLAG_DX, 536 - DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED), 537 - 538 - INTERLEAVED_RGB_FMT_TILED(XBGR2101010, 539 - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 540 - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, 541 - true, 4, DPU_FORMAT_FLAG_DX, 542 - DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED), 543 - 544 - PSEUDO_YUV_FMT_TILED(NV12, 545 - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 546 - C1_B_Cb, C2_R_Cr, 547 - DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV, 548 - DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12), 549 - 550 - PSEUDO_YUV_FMT_TILED(NV21, 551 - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 552 - C2_R_Cr, C1_B_Cb, 553 - DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV, 554 - DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12), 555 - }; 556 - 557 - /* 558 474 * UBWC formats table: 559 475 * This table holds the UBWC formats supported. 560 476 * If a compression ratio needs to be used for this or any other format, ··· 512 596 C1_B_Cb, C2_R_Cr, 513 597 DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV | 514 598 DPU_FORMAT_FLAG_COMPRESSED, 515 - DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12), 516 - }; 517 - 518 - static const struct dpu_format dpu_format_map_p010[] = { 519 - PSEUDO_YUV_FMT_LOOSE(NV12, 520 - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 521 - C1_B_Cb, C2_R_Cr, 522 - DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX), 523 - DPU_FETCH_LINEAR, 2), 524 - }; 525 - 526 - static const struct dpu_format dpu_format_map_p010_ubwc[] = { 527 - PSEUDO_YUV_FMT_LOOSE_TILED(NV12, 528 - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 529 - C1_B_Cb, C2_R_Cr, 530 - DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX | 531 - DPU_FORMAT_FLAG_COMPRESSED), 532 - DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12), 533 - }; 534 - 535 - static const struct dpu_format dpu_format_map_tp10_ubwc[] = { 536 - PSEUDO_YUV_FMT_TILED(NV12, 537 - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 538 - C1_B_Cb, C2_R_Cr, 539 - DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX | 540 - DPU_FORMAT_FLAG_COMPRESSED), 541 599 DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12), 542 600 }; 543 601
+3 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
··· 106 106 rc = -EPERM; 107 107 } 108 108 109 - if (rc) { 110 - msm_dss_enable_clk(&clk_arry[i], 111 - i, false); 109 + if (rc && i) { 110 + msm_dss_enable_clk(&clk_arry[i - 1], 111 + i - 1, false); 112 112 break; 113 113 } 114 114 }
+16 -30
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
··· 56 56 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask" 57 57 58 58 static int dpu_kms_hw_init(struct msm_kms *kms); 59 - static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms); 59 + static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms); 60 60 61 61 static unsigned long dpu_iomap_size(struct platform_device *pdev, 62 62 const char *name) ··· 142 142 struct dentry *parent) 143 143 { 144 144 struct dentry *entry = debugfs_create_dir("danger", parent); 145 - if (IS_ERR_OR_NULL(entry)) 146 - return; 147 145 148 146 debugfs_create_file("danger_status", 0600, entry, 149 147 dpu_kms, &dpu_debugfs_danger_stats_fops); ··· 216 218 } 217 219 } 218 220 219 - void *dpu_debugfs_create_regset32(const char *name, umode_t mode, 221 + void dpu_debugfs_create_regset32(const char *name, umode_t mode, 220 222 void *parent, struct dpu_debugfs_regset32 *regset) 221 223 { 222 224 if (!name || !regset || !regset->dpu_kms || !regset->blk_len) 223 - return NULL; 225 + return; 224 226 225 227 /* make sure offset is a multiple of 4 */ 226 228 regset->offset = round_down(regset->offset, 4); 227 229 228 - return debugfs_create_file(name, mode, parent, 229 - regset, &dpu_fops_regset32); 230 + debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32); 230 231 } 231 232 232 - static int _dpu_debugfs_init(struct dpu_kms *dpu_kms) 233 + static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) 233 234 { 235 + struct dpu_kms *dpu_kms = to_dpu_kms(kms); 234 236 void *p = dpu_hw_util_get_log_mask_ptr(); 235 237 struct dentry *entry; 236 238 237 239 if (!p) 238 240 return -EINVAL; 239 241 240 - entry = debugfs_create_dir("debug", dpu_kms->dev->primary->debugfs_root); 241 - if (IS_ERR_OR_NULL(entry)) 242 - return -ENODEV; 242 + entry = debugfs_create_dir("debug", minor->debugfs_root); 243 243 244 - /* allow root to be NULL */ 245 244 debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p); 246 245 247 246 dpu_debugfs_danger_init(dpu_kms, entry); ··· 573 578 return ret; 574 579 } 575 580 576 - #ifdef CONFIG_DEBUG_FS 577 - static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) 578 - { 579 - return _dpu_debugfs_init(to_dpu_kms(kms)); 580 - } 581 - #endif 582 - 583 581 static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate, 584 582 struct drm_encoder *encoder) 585 583 { ··· 713 725 #endif 714 726 }; 715 727 716 - static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms) 728 + static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms) 717 729 { 718 730 struct msm_mmu *mmu; 731 + 732 + if (!dpu_kms->base.aspace) 733 + return; 719 734 720 735 mmu = dpu_kms->base.aspace->mmu; 721 736 ··· 726 735 ARRAY_SIZE(iommu_ports)); 727 736 msm_gem_address_space_put(dpu_kms->base.aspace); 728 737 729 - return 0; 738 + dpu_kms->base.aspace = NULL; 730 739 } 731 740 732 741 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) ··· 745 754 aspace = msm_gem_address_space_create(dpu_kms->dev->dev, 746 755 domain, "dpu1"); 747 756 if (IS_ERR(aspace)) { 748 - ret = PTR_ERR(aspace); 749 - goto fail; 757 + iommu_domain_free(domain); 758 + return PTR_ERR(aspace); 750 759 } 751 - 752 - dpu_kms->base.aspace = aspace; 753 760 754 761 ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, 755 762 ARRAY_SIZE(iommu_ports)); 756 763 if (ret) { 757 764 DPU_ERROR("failed to attach iommu %d\n", ret); 758 765 msm_gem_address_space_put(aspace); 759 - goto fail; 766 + return ret; 760 767 } 761 768 769 + dpu_kms->base.aspace = aspace; 762 770 return 0; 763 - fail: 764 - _dpu_kms_mmu_destroy(dpu_kms); 765 - 766 - return ret; 767 771 } 768 772 769 773 static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
+1 -5
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
··· 197 197 * @mode: File mode within debugfs 198 198 * @parent: Parent directory entry within debugfs, can be NULL 199 199 * @regset: Pointer to persistent register block definition 200 - * 201 - * Return: dentry pointer for newly created file, use either debugfs_remove() 202 - * or debugfs_remove_recursive() (on a parent directory) to remove the 203 - * file 204 200 */ 205 - void *dpu_debugfs_create_regset32(const char *name, umode_t mode, 201 + void dpu_debugfs_create_regset32(const char *name, umode_t mode, 206 202 void *parent, struct dpu_debugfs_regset32 *regset); 207 203 208 204 /**
+54 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
··· 4 4 */ 5 5 6 6 #include "dpu_kms.h" 7 + #include <linux/interconnect.h> 7 8 8 9 #define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base) 9 10 10 11 #define HW_INTR_STATUS 0x0010 12 + 13 + /* Max BW defined in KBps */ 14 + #define MAX_BW 6800000 11 15 12 16 struct dpu_irq_controller { 13 17 unsigned long enabled_mask; ··· 25 21 u32 hwversion; 26 22 struct dss_module_power mp; 27 23 struct dpu_irq_controller irq_controller; 24 + struct icc_path *path[2]; 25 + u32 num_paths; 28 26 }; 27 + 28 + static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev, 29 + struct dpu_mdss *dpu_mdss) 30 + { 31 + struct icc_path *path0 = of_icc_get(dev->dev, "mdp0-mem"); 32 + struct icc_path *path1 = of_icc_get(dev->dev, "mdp1-mem"); 33 + 34 + if (IS_ERR_OR_NULL(path0)) 35 + return PTR_ERR_OR_ZERO(path0); 36 + 37 + dpu_mdss->path[0] = path0; 38 + dpu_mdss->num_paths = 1; 39 + 40 + if (!IS_ERR_OR_NULL(path1)) { 41 + dpu_mdss->path[1] = path1; 42 + dpu_mdss->num_paths++; 43 + } 44 + 45 + return 0; 46 + } 47 + 48 + static void dpu_mdss_icc_request_bw(struct msm_mdss *mdss) 49 + { 50 + struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); 51 + int i; 52 + u64 avg_bw = dpu_mdss->num_paths ? MAX_BW / dpu_mdss->num_paths : 0; 53 + 54 + for (i = 0; i < dpu_mdss->num_paths; i++) 55 + icc_set_bw(dpu_mdss->path[i], avg_bw, kBps_to_icc(MAX_BW)); 56 + } 29 57 30 58 static void dpu_mdss_irq(struct irq_desc *desc) 31 59 { ··· 172 136 struct dss_module_power *mp = &dpu_mdss->mp; 173 137 int ret; 174 138 139 + dpu_mdss_icc_request_bw(mdss); 140 + 175 141 ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); 176 142 if (ret) 177 143 DPU_ERROR("clock enable failed, ret:%d\n", ret); ··· 185 147 { 186 148 struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); 187 149 struct dss_module_power *mp = &dpu_mdss->mp; 188 - int ret; 150 + int ret, i; 189 151 190 152 ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); 191 153 if (ret) 192 154 DPU_ERROR("clock disable failed, ret:%d\n", ret); 155 + 156 + for (i = 0; i < dpu_mdss->num_paths; i++) 157 + icc_set_bw(dpu_mdss->path[i], 0, 0); 193 158 194 159 return ret; 195 160 } ··· 204 163 struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss); 205 164 struct dss_module_power *mp = &dpu_mdss->mp; 206 165 int irq; 166 + int i; 207 167 208 168 pm_runtime_suspend(dev->dev); 209 169 pm_runtime_disable(dev->dev); ··· 213 171 irq_set_chained_handler_and_data(irq, NULL, NULL); 214 172 msm_dss_put_clk(mp->clk_config, mp->num_clk); 215 173 devm_kfree(&pdev->dev, mp->clk_config); 174 + 175 + for (i = 0; i < dpu_mdss->num_paths; i++) 176 + icc_put(dpu_mdss->path[i]); 216 177 217 178 if (dpu_mdss->mmio) 218 179 devm_iounmap(&pdev->dev, dpu_mdss->mmio); ··· 256 211 } 257 212 dpu_mdss->mmio_len = resource_size(res); 258 213 214 + ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss); 215 + if (ret) 216 + return ret; 217 + 259 218 mp = &dpu_mdss->mp; 260 219 ret = msm_dss_parse_clock(pdev, mp); 261 220 if (ret) { ··· 281 232 irq_set_chained_handler_and_data(irq, dpu_mdss_irq, 282 233 dpu_mdss); 283 234 235 + priv->mdss = &dpu_mdss->base; 236 + 284 237 pm_runtime_enable(dev->dev); 238 + 239 + dpu_mdss_icc_request_bw(priv->mdss); 285 240 286 241 pm_runtime_get_sync(dev->dev); 287 242 dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio); 288 243 pm_runtime_put_sync(dev->dev); 289 - 290 - priv->mdss = &dpu_mdss->base; 291 244 292 245 return ret; 293 246
+3 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
··· 21 21 #include <linux/debugfs.h> 22 22 #include <linux/dma-buf.h> 23 23 24 + #include <drm/drm_damage_helper.h> 24 25 #include <drm/drm_atomic_uapi.h> 25 26 26 27 #include "msm_drv.h" ··· 1325 1324 debugfs_create_dir(pdpu->pipe_name, 1326 1325 plane->dev->primary->debugfs_root); 1327 1326 1328 - if (!pdpu->debugfs_root) 1329 - return -ENOMEM; 1330 - 1331 1327 /* don't error check these */ 1332 1328 debugfs_create_x32("features", 0600, 1333 1329 pdpu->debugfs_root, &pdpu->features); ··· 1532 1534 ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max); 1533 1535 if (ret) 1534 1536 DPU_ERROR("failed to install zpos property, rc = %d\n", ret); 1537 + 1538 + drm_plane_enable_fb_damage_clips(plane); 1535 1539 1536 1540 /* success! finalize initialization */ 1537 1541 drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+7 -15
drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
··· 138 138 ) 139 139 140 140 TRACE_EVENT(dpu_perf_crtc_update, 141 - TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc, 142 - u64 bw_ctl_ebi, u32 core_clk_rate, 143 - bool stop_req, u32 update_bus, u32 update_clk), 144 - TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate, 145 - stop_req, update_bus, update_clk), 141 + TP_PROTO(u32 crtc, u64 bw_ctl, u32 core_clk_rate, 142 + bool stop_req, bool update_bus, bool update_clk), 143 + TP_ARGS(crtc, bw_ctl, core_clk_rate, stop_req, update_bus, update_clk), 146 144 TP_STRUCT__entry( 147 145 __field(u32, crtc) 148 - __field(u64, bw_ctl_mnoc) 149 - __field(u64, bw_ctl_llcc) 150 - __field(u64, bw_ctl_ebi) 146 + __field(u64, bw_ctl) 151 147 __field(u32, core_clk_rate) 152 148 __field(bool, stop_req) 153 149 __field(u32, update_bus) ··· 151 155 ), 152 156 TP_fast_assign( 153 157 __entry->crtc = crtc; 154 - __entry->bw_ctl_mnoc = bw_ctl_mnoc; 155 - __entry->bw_ctl_llcc = bw_ctl_llcc; 156 - __entry->bw_ctl_ebi = bw_ctl_ebi; 158 + __entry->bw_ctl = bw_ctl; 157 159 __entry->core_clk_rate = core_clk_rate; 158 160 __entry->stop_req = stop_req; 159 161 __entry->update_bus = update_bus; 160 162 __entry->update_clk = update_clk; 161 163 ), 162 164 TP_printk( 163 - "crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d", 165 + "crtc=%d bw_ctl=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d", 164 166 __entry->crtc, 165 - __entry->bw_ctl_mnoc, 166 - __entry->bw_ctl_llcc, 167 - __entry->bw_ctl_ebi, 167 + __entry->bw_ctl, 168 168 __entry->core_clk_rate, 169 169 __entry->stop_req, 170 170 __entry->update_bus,
-4
drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
··· 310 310 int i, j; 311 311 312 312 entry = debugfs_create_dir("vbif", debugfs_root); 313 - if (IS_ERR_OR_NULL(entry)) 314 - return; 315 313 316 314 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { 317 315 struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; ··· 317 319 snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id); 318 320 319 321 debugfs_vbif = debugfs_create_dir(vbif_name, entry); 320 - if (IS_ERR_OR_NULL(debugfs_vbif)) 321 - continue; 322 322 323 323 debugfs_create_u32("features", 0600, debugfs_vbif, 324 324 (u32 *)&vbif->features);
+3
drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
··· 15 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 16 */ 17 17 18 + #include <drm/drm_damage_helper.h> 18 19 #include "mdp4_kms.h" 19 20 20 21 #define DOWN_SCALE_MAX 8 ··· 391 390 drm_plane_helper_add(plane, &mdp4_plane_helper_funcs); 392 391 393 392 mdp4_plane_install_properties(plane, &plane->base); 393 + 394 + drm_plane_enable_fb_damage_clips(plane); 394 395 395 396 return plane; 396 397
+1 -1
drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
··· 713 713 if (cfg_handler) 714 714 mdp5_cfg_destroy(cfg_handler); 715 715 716 - return NULL; 716 + return ERR_PTR(ret); 717 717 } 718 718 719 719 static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
+38
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
··· 16 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 17 */ 18 18 19 + #include <linux/interconnect.h> 19 20 #include <linux/of_irq.h> 20 21 21 22 #include "msm_drv.h" ··· 1049 1048 .unbind = mdp5_unbind, 1050 1049 }; 1051 1050 1051 + static int mdp5_setup_interconnect(struct platform_device *pdev) 1052 + { 1053 + struct icc_path *path0 = of_icc_get(&pdev->dev, "mdp0-mem"); 1054 + struct icc_path *path1 = of_icc_get(&pdev->dev, "mdp1-mem"); 1055 + struct icc_path *path_rot = of_icc_get(&pdev->dev, "rotator-mem"); 1056 + 1057 + if (IS_ERR(path0)) 1058 + return PTR_ERR(path0); 1059 + 1060 + if (!path0) { 1061 + /* no interconnect support is not necessarily a fatal 1062 + * condition, the platform may simply not have an 1063 + * interconnect driver yet. But warn about it in case 1064 + * bootloader didn't setup bus clocks high enough for 1065 + * scanout. 1066 + */ 1067 + dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n"); 1068 + return 0; 1069 + } 1070 + 1071 + icc_set_bw(path0, 0, MBps_to_icc(6400)); 1072 + 1073 + if (!IS_ERR_OR_NULL(path1)) 1074 + icc_set_bw(path1, 0, MBps_to_icc(6400)); 1075 + if (!IS_ERR_OR_NULL(path_rot)) 1076 + icc_set_bw(path_rot, 0, MBps_to_icc(6400)); 1077 + 1078 + return 0; 1079 + } 1080 + 1052 1081 static int mdp5_dev_probe(struct platform_device *pdev) 1053 1082 { 1083 + int ret; 1084 + 1054 1085 DBG(""); 1086 + 1087 + ret = mdp5_setup_interconnect(pdev); 1088 + if (ret) 1089 + return ret; 1090 + 1055 1091 return component_add(&pdev->dev, &mdp5_ops); 1056 1092 } 1057 1093
+3
drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
··· 16 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 17 */ 18 18 19 + #include <drm/drm_damage_helper.h> 19 20 #include <drm/drm_print.h> 20 21 #include "mdp5_kms.h" 21 22 ··· 1099 1098 drm_plane_helper_add(plane, &mdp5_plane_helper_funcs); 1100 1099 1101 1100 mdp5_plane_install_properties(plane, &plane->base); 1101 + 1102 + drm_plane_enable_fb_damage_clips(plane); 1102 1103 1103 1104 return plane; 1104 1105
+2
drivers/gpu/drm/msm/dsi/dsi.c
··· 242 242 goto fail; 243 243 } 244 244 245 + msm_dsi_manager_setup_encoder(msm_dsi->id); 246 + 245 247 priv->bridges[priv->num_bridges++] = msm_dsi->bridge; 246 248 priv->connectors[priv->num_connectors++] = msm_dsi->connector; 247 249
+3 -4
drivers/gpu/drm/msm/dsi/dsi.h
··· 71 71 */ 72 72 struct drm_panel *panel; 73 73 struct drm_bridge *external_bridge; 74 - unsigned long device_flags; 75 74 76 75 struct device *phy_dev; 77 76 bool phy_enabled; ··· 88 89 struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); 89 90 int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); 90 91 bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); 91 - void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags); 92 + void msm_dsi_manager_setup_encoder(int id); 92 93 int msm_dsi_manager_register(struct msm_dsi *msm_dsi); 93 94 void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); 94 95 bool msm_dsi_manager_validate_current_config(u8 id); ··· 160 161 int msm_dsi_host_power_off(struct mipi_dsi_host *host); 161 162 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, 162 163 const struct drm_display_mode *mode); 163 - struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, 164 - unsigned long *panel_flags); 164 + struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host); 165 + unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host); 165 166 struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host); 166 167 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer); 167 168 void msm_dsi_host_unregister(struct mipi_dsi_host *host);
+21
drivers/gpu/drm/msm/dsi/dsi_cfg.c
··· 110 110 .num_dsi = 2, 111 111 }; 112 112 113 + static const char * const dsi_msm8998_bus_clk_names[] = { 114 + "iface", "bus", "core", 115 + }; 116 + 117 + static const struct msm_dsi_config msm8998_dsi_cfg = { 118 + .io_offset = DSI_6G_REG_SHIFT, 119 + .reg_cfg = { 120 + .num = 2, 121 + .regs = { 122 + {"vdd", 367000, 16 }, /* 0.9 V */ 123 + {"vdda", 62800, 2 }, /* 1.2 V */ 124 + }, 125 + }, 126 + .bus_clk_names = dsi_msm8998_bus_clk_names, 127 + .num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names), 128 + .io_start = { 0xc994000, 0xc996000 }, 129 + .num_dsi = 2, 130 + }; 131 + 113 132 static const char * const dsi_sdm845_bus_clk_names[] = { 114 133 "iface", "bus", 115 134 }; ··· 197 178 &msm8916_dsi_cfg, &msm_dsi_6g_host_ops}, 198 179 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, 199 180 &msm8996_dsi_cfg, &msm_dsi_6g_host_ops}, 181 + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0, 182 + &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops}, 200 183 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, 201 184 &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, 202 185 };
+1
drivers/gpu/drm/msm/dsi/dsi_cfg.h
··· 17 17 #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 18 18 #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 19 19 #define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001 20 + #define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000 20 21 #define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 21 22 22 23 #define MSM_DSI_V2_VER_MINOR_8064 0x0
+7 -12
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 1041 1041 ret = wait_for_completion_timeout(&msm_host->video_comp, 1042 1042 msecs_to_jiffies(70)); 1043 1043 1044 - if (ret <= 0) 1044 + if (ret == 0) 1045 1045 DRM_DEV_ERROR(dev, "wait for video done timed out\n"); 1046 1046 1047 1047 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); ··· 1589 1589 msm_host->lanes = dsi->lanes; 1590 1590 msm_host->format = dsi->format; 1591 1591 msm_host->mode_flags = dsi->mode_flags; 1592 - 1593 - msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags); 1594 1592 1595 1593 /* Some gpios defined in panel DT need to be controlled by host */ 1596 1594 ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); ··· 2432 2434 return 0; 2433 2435 } 2434 2436 2435 - struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, 2436 - unsigned long *panel_flags) 2437 + struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host) 2437 2438 { 2438 - struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2439 - struct drm_panel *panel; 2439 + return of_drm_find_panel(to_msm_dsi_host(host)->device_node); 2440 + } 2440 2441 2441 - panel = of_drm_find_panel(msm_host->device_node); 2442 - if (panel_flags) 2443 - *panel_flags = msm_host->mode_flags; 2444 - 2445 - return panel; 2442 + unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host) 2443 + { 2444 + return to_msm_dsi_host(host)->mode_flags; 2446 2445 } 2447 2446 2448 2447 struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
+78 -81
drivers/gpu/drm/msm/dsi/dsi_manager.c
··· 225 225 return dsi_bridge->id; 226 226 } 227 227 228 + static bool dsi_mgr_is_cmd_mode(struct msm_dsi *msm_dsi) 229 + { 230 + unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host); 231 + return !(host_flags & MIPI_DSI_MODE_VIDEO); 232 + } 233 + 234 + void msm_dsi_manager_setup_encoder(int id) 235 + { 236 + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 237 + struct msm_drm_private *priv = msm_dsi->dev->dev_private; 238 + struct msm_kms *kms = priv->kms; 239 + struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi); 240 + 241 + if (encoder && kms->funcs->set_encoder_mode) 242 + kms->funcs->set_encoder_mode(kms, encoder, 243 + dsi_mgr_is_cmd_mode(msm_dsi)); 244 + } 245 + 246 + static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id) 247 + { 248 + struct msm_drm_private *priv = conn->dev->dev_private; 249 + struct msm_kms *kms = priv->kms; 250 + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 251 + struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); 252 + struct msm_dsi *master_dsi, *slave_dsi; 253 + struct drm_panel *panel; 254 + 255 + if (IS_DUAL_DSI() && !IS_MASTER_DSI_LINK(id)) { 256 + master_dsi = other_dsi; 257 + slave_dsi = msm_dsi; 258 + } else { 259 + master_dsi = msm_dsi; 260 + slave_dsi = other_dsi; 261 + } 262 + 263 + /* 264 + * There is only 1 panel in the global panel list for dual DSI mode. 265 + * Therefore slave dsi should get the drm_panel instance from master 266 + * dsi. 267 + */ 268 + panel = msm_dsi_host_get_panel(master_dsi->host); 269 + if (IS_ERR(panel)) { 270 + DRM_ERROR("Could not find panel for %u (%ld)\n", msm_dsi->id, 271 + PTR_ERR(panel)); 272 + return PTR_ERR(panel); 273 + } 274 + 275 + if (!panel || !IS_DUAL_DSI()) 276 + goto out; 277 + 278 + drm_object_attach_property(&conn->base, 279 + conn->dev->mode_config.tile_property, 0); 280 + 281 + /* 282 + * Set split display info to kms once dual DSI panel is connected to 283 + * both hosts. 284 + */ 285 + if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) { 286 + kms->funcs->set_split_display(kms, master_dsi->encoder, 287 + slave_dsi->encoder, 288 + dsi_mgr_is_cmd_mode(msm_dsi)); 289 + } 290 + 291 + out: 292 + msm_dsi->panel = panel; 293 + return 0; 294 + } 295 + 228 296 static enum drm_connector_status dsi_mgr_connector_detect( 229 297 struct drm_connector *connector, bool force) 230 298 { 231 299 int id = dsi_mgr_connector_get_id(connector); 232 300 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 233 - struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); 234 - struct msm_drm_private *priv = connector->dev->dev_private; 235 - struct msm_kms *kms = priv->kms; 236 - 237 - DBG("id=%d", id); 238 - if (!msm_dsi->panel) { 239 - msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host, 240 - &msm_dsi->device_flags); 241 - 242 - /* There is only 1 panel in the global panel list 243 - * for dual DSI mode. Therefore slave dsi should get 244 - * the drm_panel instance from master dsi, and 245 - * keep using the panel flags got from the current DSI link. 246 - */ 247 - if (!msm_dsi->panel && IS_DUAL_DSI() && 248 - !IS_MASTER_DSI_LINK(id) && other_dsi) 249 - msm_dsi->panel = msm_dsi_host_get_panel( 250 - other_dsi->host, NULL); 251 - 252 - 253 - if (msm_dsi->panel && kms->funcs->set_encoder_mode) { 254 - bool cmd_mode = !(msm_dsi->device_flags & 255 - MIPI_DSI_MODE_VIDEO); 256 - struct drm_encoder *encoder = 257 - msm_dsi_get_encoder(msm_dsi); 258 - 259 - kms->funcs->set_encoder_mode(kms, encoder, cmd_mode); 260 - } 261 - 262 - if (msm_dsi->panel && IS_DUAL_DSI()) 263 - drm_object_attach_property(&connector->base, 264 - connector->dev->mode_config.tile_property, 0); 265 - 266 - /* Set split display info to kms once dual DSI panel is 267 - * connected to both hosts. 268 - */ 269 - if (msm_dsi->panel && IS_DUAL_DSI() && 270 - other_dsi && other_dsi->panel) { 271 - bool cmd_mode = !(msm_dsi->device_flags & 272 - MIPI_DSI_MODE_VIDEO); 273 - struct drm_encoder *encoder = msm_dsi_get_encoder( 274 - dsi_mgr_get_dsi(DSI_ENCODER_MASTER)); 275 - struct drm_encoder *slave_enc = msm_dsi_get_encoder( 276 - dsi_mgr_get_dsi(DSI_ENCODER_SLAVE)); 277 - 278 - if (kms->funcs->set_split_display) 279 - kms->funcs->set_split_display(kms, encoder, 280 - slave_enc, cmd_mode); 281 - else 282 - pr_err("mdp does not support dual DSI\n"); 283 - } 284 - } 285 301 286 302 return msm_dsi->panel ? connector_status_connected : 287 303 connector_status_disconnected; ··· 611 595 612 596 drm_connector_attach_encoder(connector, msm_dsi->encoder); 613 597 598 + ret = msm_dsi_manager_panel_init(connector, id); 599 + if (ret) { 600 + DRM_DEV_ERROR(msm_dsi->dev->dev, "init panel failed %d\n", ret); 601 + goto fail; 602 + } 603 + 614 604 return connector; 605 + 606 + fail: 607 + connector->funcs->destroy(msm_dsi->connector); 608 + return ERR_PTR(ret); 615 609 } 616 610 617 611 bool msm_dsi_manager_validate_current_config(u8 id) ··· 775 749 msm_dsi_host_cmd_xfer_commit(host, dma_base, len); 776 750 777 751 return true; 778 - } 779 - 780 - void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags) 781 - { 782 - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 783 - struct drm_device *dev = msm_dsi->dev; 784 - struct msm_drm_private *priv; 785 - struct msm_kms *kms; 786 - struct drm_encoder *encoder; 787 - bool cmd_mode; 788 - 789 - /* 790 - * drm_device pointer is assigned to msm_dsi only in the modeset_init 791 - * path. If mipi_dsi_attach() happens in DSI driver's probe path 792 - * (generally the case when we're connected to a drm_panel of the type 793 - * mipi_dsi_device), this would be NULL. In such cases, try to set the 794 - * encoder mode in the DSI connector's detect() op. 795 - */ 796 - if (!dev) 797 - return; 798 - 799 - priv = dev->dev_private; 800 - kms = priv->kms; 801 - encoder = msm_dsi_get_encoder(msm_dsi); 802 - cmd_mode = !(device_flags & 803 - MIPI_DSI_MODE_VIDEO); 804 - 805 - if (encoder && kms->funcs->set_encoder_mode) 806 - kms->funcs->set_encoder_mode(kms, encoder, cmd_mode); 807 752 } 808 753 809 754 int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
+5 -1
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
··· 499 499 #ifdef CONFIG_DRM_MSM_DSI_10NM_PHY 500 500 { .compatible = "qcom,dsi-phy-10nm", 501 501 .data = &dsi_phy_10nm_cfgs }, 502 + { .compatible = "qcom,dsi-phy-10nm-8998", 503 + .data = &dsi_phy_10nm_8998_cfgs }, 502 504 #endif 503 505 {} 504 506 }; ··· 610 608 goto fail; 611 609 612 610 phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id); 613 - if (IS_ERR_OR_NULL(phy->pll)) 611 + if (IS_ERR_OR_NULL(phy->pll)) { 614 612 DRM_DEV_INFO(dev, 615 613 "%s: pll init failed: %ld, need separate pll clk driver\n", 616 614 __func__, PTR_ERR(phy->pll)); 615 + phy->pll = NULL; 616 + } 617 617 618 618 dsi_phy_disable_resource(phy); 619 619
+5
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
··· 13 13 #define dsi_phy_read(offset) msm_readl((offset)) 14 14 #define dsi_phy_write(offset, data) msm_writel((data), (offset)) 15 15 16 + /* v3.0.0 10nm implementation that requires the old timings settings */ 17 + #define V3_0_0_10NM_OLD_TIMINGS_QUIRK BIT(0) 18 + 16 19 struct msm_dsi_phy_ops { 17 20 int (*init) (struct msm_dsi_phy *phy); 18 21 int (*enable)(struct msm_dsi_phy *phy, int src_pll_id, ··· 36 33 bool src_pll_truthtable[DSI_MAX][DSI_MAX]; 37 34 const resource_size_t io_start[DSI_MAX]; 38 35 const int num_dsi_phy; 36 + const int quirks; 39 37 }; 40 38 41 39 extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; ··· 45 41 extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; 46 42 extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; 47 43 extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs; 44 + extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs; 48 45 49 46 struct msm_dsi_dphy_timing { 50 47 u32 clk_pre;
+27 -3
drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
··· 42 42 u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 }; 43 43 void __iomem *lane_base = phy->lane_base; 44 44 45 + if (phy->cfg->quirks & V3_0_0_10NM_OLD_TIMINGS_QUIRK) 46 + tx_dctrl[3] = 0x02; 47 + 45 48 /* Strength ctrl settings */ 46 49 for (i = 0; i < 5; i++) { 47 50 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i), ··· 77 74 tx_dctrl[i]); 78 75 } 79 76 80 - /* Toggle BIT 0 to release freeze I/0 */ 81 - dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05); 82 - dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04); 77 + if (!(phy->cfg->quirks & V3_0_0_10NM_OLD_TIMINGS_QUIRK)) { 78 + /* Toggle BIT 0 to release freeze I/0 */ 79 + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05); 80 + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04); 81 + } 83 82 } 84 83 85 84 static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, ··· 225 220 }, 226 221 .io_start = { 0xae94400, 0xae96400 }, 227 222 .num_dsi_phy = 2, 223 + }; 224 + 225 + const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = { 226 + .type = MSM_DSI_PHY_10NM, 227 + .src_pll_truthtable = { {false, false}, {true, false} }, 228 + .reg_cfg = { 229 + .num = 1, 230 + .regs = { 231 + {"vdds", 36000, 32}, 232 + }, 233 + }, 234 + .ops = { 235 + .enable = dsi_10nm_phy_enable, 236 + .disable = dsi_10nm_phy_disable, 237 + .init = dsi_10nm_phy_init, 238 + }, 239 + .io_start = { 0xc994400, 0xc996400 }, 240 + .num_dsi_phy = 2, 241 + .quirks = V3_0_0_10NM_OLD_TIMINGS_QUIRK, 228 242 };
+73 -33
drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
··· 104 104 struct dsi_pll_regs reg_setup; 105 105 106 106 /* private clocks: */ 107 - struct clk_hw *hws[NUM_DSI_CLOCKS_MAX]; 108 - u32 num_hws; 107 + struct clk_hw *out_div_clk_hw; 108 + struct clk_hw *bit_clk_hw; 109 + struct clk_hw *byte_clk_hw; 110 + struct clk_hw *by_2_bit_clk_hw; 111 + struct clk_hw *post_out_div_clk_hw; 112 + struct clk_hw *pclk_mux_hw; 113 + struct clk_hw *out_dsiclk_hw; 109 114 110 115 /* clock-provider: */ 111 116 struct clk_hw_onecell_data *hw_data; ··· 622 617 static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll) 623 618 { 624 619 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); 620 + struct device *dev = &pll_10nm->pdev->dev; 625 621 626 622 DBG("DSI PLL%d", pll_10nm->id); 623 + of_clk_del_provider(dev->of_node); 624 + 625 + clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw); 626 + clk_hw_unregister_mux(pll_10nm->pclk_mux_hw); 627 + clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw); 628 + clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw); 629 + clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw); 630 + clk_hw_unregister_divider(pll_10nm->bit_clk_hw); 631 + clk_hw_unregister_divider(pll_10nm->out_div_clk_hw); 632 + clk_hw_unregister(&pll_10nm->base.clk_hw); 627 633 } 628 634 629 635 /* ··· 655 639 .ops = &clk_ops_dsi_pll_10nm_vco, 656 640 }; 657 641 struct device *dev = &pll_10nm->pdev->dev; 658 - struct clk_hw **hws = pll_10nm->hws; 659 642 struct clk_hw_onecell_data *hw_data; 660 643 struct clk_hw *hw; 661 - int num = 0; 662 644 int ret; 663 645 664 646 DBG("DSI%d", pll_10nm->id); ··· 674 660 if (ret) 675 661 return ret; 676 662 677 - hws[num++] = &pll_10nm->base.clk_hw; 678 - 679 663 snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id); 680 664 snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id); 681 665 ··· 682 670 pll_10nm->mmio + 683 671 REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, 684 672 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); 685 - if (IS_ERR(hw)) 686 - return PTR_ERR(hw); 673 + if (IS_ERR(hw)) { 674 + ret = PTR_ERR(hw); 675 + goto err_base_clk_hw; 676 + } 687 677 688 - hws[num++] = hw; 678 + pll_10nm->out_div_clk_hw = hw; 689 679 690 680 snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id); 691 681 snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id); ··· 699 685 REG_DSI_10nm_PHY_CMN_CLK_CFG0, 700 686 0, 4, CLK_DIVIDER_ONE_BASED, 701 687 &pll_10nm->postdiv_lock); 702 - if (IS_ERR(hw)) 703 - return PTR_ERR(hw); 688 + if (IS_ERR(hw)) { 689 + ret = PTR_ERR(hw); 690 + goto err_out_div_clk_hw; 691 + } 704 692 705 - hws[num++] = hw; 693 + pll_10nm->bit_clk_hw = hw; 706 694 707 695 snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id); 708 696 snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id); ··· 712 696 /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ 713 697 hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 714 698 CLK_SET_RATE_PARENT, 1, 8); 715 - if (IS_ERR(hw)) 716 - return PTR_ERR(hw); 699 + if (IS_ERR(hw)) { 700 + ret = PTR_ERR(hw); 701 + goto err_bit_clk_hw; 702 + } 717 703 718 - hws[num++] = hw; 704 + pll_10nm->byte_clk_hw = hw; 719 705 hw_data->hws[DSI_BYTE_PLL_CLK] = hw; 720 706 721 707 snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id); ··· 725 707 726 708 hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 727 709 0, 1, 2); 728 - if (IS_ERR(hw)) 729 - return PTR_ERR(hw); 710 + if (IS_ERR(hw)) { 711 + ret = PTR_ERR(hw); 712 + goto err_byte_clk_hw; 713 + } 730 714 731 - hws[num++] = hw; 715 + pll_10nm->by_2_bit_clk_hw = hw; 732 716 733 717 snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id); 734 718 snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id); 735 719 736 720 hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 737 721 0, 1, 4); 738 - if (IS_ERR(hw)) 739 - return PTR_ERR(hw); 722 + if (IS_ERR(hw)) { 723 + ret = PTR_ERR(hw); 724 + goto err_by_2_bit_clk_hw; 725 + } 740 726 741 - hws[num++] = hw; 727 + pll_10nm->post_out_div_clk_hw = hw; 742 728 743 729 snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id); 744 730 snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id); ··· 756 734 }, 4, 0, pll_10nm->phy_cmn_mmio + 757 735 REG_DSI_10nm_PHY_CMN_CLK_CFG1, 758 736 0, 2, 0, NULL); 759 - if (IS_ERR(hw)) 760 - return PTR_ERR(hw); 737 + if (IS_ERR(hw)) { 738 + ret = PTR_ERR(hw); 739 + goto err_post_out_div_clk_hw; 740 + } 761 741 762 - hws[num++] = hw; 742 + pll_10nm->pclk_mux_hw = hw; 763 743 764 744 snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id); 765 745 snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id); ··· 772 748 REG_DSI_10nm_PHY_CMN_CLK_CFG0, 773 749 4, 4, CLK_DIVIDER_ONE_BASED, 774 750 &pll_10nm->postdiv_lock); 775 - if (IS_ERR(hw)) 776 - return PTR_ERR(hw); 751 + if (IS_ERR(hw)) { 752 + ret = PTR_ERR(hw); 753 + goto err_pclk_mux_hw; 754 + } 777 755 778 - hws[num++] = hw; 756 + pll_10nm->out_dsiclk_hw = hw; 779 757 hw_data->hws[DSI_PIXEL_PLL_CLK] = hw; 780 - 781 - pll_10nm->num_hws = num; 782 758 783 759 hw_data->num = NUM_PROVIDED_CLKS; 784 760 pll_10nm->hw_data = hw_data; ··· 787 763 pll_10nm->hw_data); 788 764 if (ret) { 789 765 DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret); 790 - return ret; 766 + goto err_dsiclk_hw; 791 767 } 792 768 793 769 return 0; 770 + 771 + err_dsiclk_hw: 772 + clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw); 773 + err_pclk_mux_hw: 774 + clk_hw_unregister_mux(pll_10nm->pclk_mux_hw); 775 + err_post_out_div_clk_hw: 776 + clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw); 777 + err_by_2_bit_clk_hw: 778 + clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw); 779 + err_byte_clk_hw: 780 + clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw); 781 + err_bit_clk_hw: 782 + clk_hw_unregister_divider(pll_10nm->bit_clk_hw); 783 + err_out_div_clk_hw: 784 + clk_hw_unregister_divider(pll_10nm->out_div_clk_hw); 785 + err_base_clk_hw: 786 + clk_hw_unregister(&pll_10nm->base.clk_hw); 787 + 788 + return ret; 794 789 } 795 790 796 791 struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id) ··· 817 774 struct dsi_pll_10nm *pll_10nm; 818 775 struct msm_dsi_pll *pll; 819 776 int ret; 820 - 821 - if (!pdev) 822 - return ERR_PTR(-ENODEV); 823 777 824 778 pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL); 825 779 if (!pll_10nm)
+27 -7
drivers/gpu/drm/msm/msm_drv.c
··· 259 259 struct msm_mdss *mdss = priv->mdss; 260 260 int i; 261 261 262 + /* 263 + * Shutdown the hw if we're far enough along where things might be on. 264 + * If we run this too early, we'll end up panicking in any variety of 265 + * places. Since we don't register the drm device until late in 266 + * msm_drm_init, drm_dev->registered is used as an indicator that the 267 + * shutdown will be successful. 268 + */ 269 + if (ddev->registered) { 270 + drm_dev_unregister(ddev); 271 + drm_atomic_helper_shutdown(ddev); 272 + } 273 + 262 274 /* We must cancel and cleanup any pending vblank enable/disable 263 275 * work before drm_irq_uninstall() to avoid work re-enabling an 264 276 * irq after uninstall has disabled it. 265 277 */ 266 278 267 279 flush_workqueue(priv->wq); 268 - destroy_workqueue(priv->wq); 269 280 270 281 /* clean up event worker threads */ 271 282 for (i = 0; i < priv->num_crtcs; i++) { ··· 290 279 291 280 drm_kms_helper_poll_fini(ddev); 292 281 293 - drm_dev_unregister(ddev); 294 - 295 282 msm_perf_debugfs_cleanup(priv); 296 283 msm_rd_debugfs_cleanup(priv); 297 284 ··· 297 288 if (fbdev && priv->fbdev) 298 289 msm_fbdev_free(ddev); 299 290 #endif 300 - drm_atomic_helper_shutdown(ddev); 291 + 301 292 drm_mode_config_cleanup(ddev); 302 293 303 294 pm_runtime_get_sync(dev); ··· 322 313 ddev->dev_private = NULL; 323 314 drm_dev_put(ddev); 324 315 316 + destroy_workqueue(priv->wq); 325 317 kfree(priv); 326 318 327 319 return 0; ··· 621 611 622 612 static int context_init(struct drm_device *dev, struct drm_file *file) 623 613 { 614 + struct msm_drm_private *priv = dev->dev_private; 624 615 struct msm_file_private *ctx; 625 616 626 617 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ··· 630 619 631 620 msm_submitqueue_init(dev, ctx); 632 621 622 + ctx->aspace = priv->gpu->aspace; 633 623 file->driver_priv = ctx; 634 624 635 625 return 0; ··· 1329 1317 1330 1318 ret = add_gpu_components(&pdev->dev, &match); 1331 1319 if (ret) 1332 - return ret; 1320 + goto fail; 1333 1321 1334 1322 /* on all devices that I am aware of, iommu's which can map 1335 1323 * any address the cpu can see are used: 1336 1324 */ 1337 1325 ret = dma_set_mask_and_coherent(&pdev->dev, ~0); 1338 1326 if (ret) 1339 - return ret; 1327 + goto fail; 1340 1328 1341 - return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); 1329 + ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); 1330 + if (ret) 1331 + goto fail; 1332 + 1333 + return 0; 1334 + 1335 + fail: 1336 + of_platform_depopulate(&pdev->dev); 1337 + return ret; 1342 1338 } 1343 1339 1344 1340 static int msm_pdev_remove(struct platform_device *pdev)
+1
drivers/gpu/drm/msm/msm_drv.h
··· 68 68 rwlock_t queuelock; 69 69 struct list_head submitqueues; 70 70 int queueid; 71 + struct msm_gem_address_space *aspace; 71 72 }; 72 73 73 74 enum msm_mdp_plane_property {
+2
drivers/gpu/drm/msm/msm_fb.c
··· 16 16 */ 17 17 18 18 #include <drm/drm_crtc.h> 19 + #include <drm/drm_damage_helper.h> 19 20 #include <drm/drm_gem_framebuffer_helper.h> 20 21 #include <drm/drm_probe_helper.h> 21 22 ··· 36 35 static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { 37 36 .create_handle = drm_gem_fb_create_handle, 38 37 .destroy = drm_gem_fb_destroy, 38 + .dirty = drm_atomic_helper_dirtyfb, 39 39 }; 40 40 41 41 #ifdef CONFIG_DEBUG_FS
+4 -2
drivers/gpu/drm/msm/msm_gem.c
··· 352 352 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 353 353 354 354 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 355 - msm_gem_purge_vma(vma->aspace, vma); 356 - msm_gem_close_vma(vma->aspace, vma); 355 + if (vma->aspace) { 356 + msm_gem_purge_vma(vma->aspace, vma); 357 + msm_gem_close_vma(vma->aspace, vma); 358 + } 357 359 del_vma(vma); 358 360 } 359 361 }
+1
drivers/gpu/drm/msm/msm_gem.h
··· 141 141 struct msm_gem_submit { 142 142 struct drm_device *dev; 143 143 struct msm_gpu *gpu; 144 + struct msm_gem_address_space *aspace; 144 145 struct list_head node; /* node in ring submit list */ 145 146 struct list_head bo_list; 146 147 struct ww_acquire_ctx ticket;
+8 -5
drivers/gpu/drm/msm/msm_gem_submit.c
··· 32 32 #define BO_PINNED 0x2000 33 33 34 34 static struct msm_gem_submit *submit_create(struct drm_device *dev, 35 - struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue, 36 - uint32_t nr_bos, uint32_t nr_cmds) 35 + struct msm_gpu *gpu, struct msm_gem_address_space *aspace, 36 + struct msm_gpu_submitqueue *queue, uint32_t nr_bos, 37 + uint32_t nr_cmds) 37 38 { 38 39 struct msm_gem_submit *submit; 39 40 uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) + ··· 48 47 return NULL; 49 48 50 49 submit->dev = dev; 50 + submit->aspace = aspace; 51 51 submit->gpu = gpu; 52 52 submit->fence = NULL; 53 53 submit->cmd = (void *)&submit->bos[nr_bos]; ··· 162 160 struct msm_gem_object *msm_obj = submit->bos[i].obj; 163 161 164 162 if (submit->bos[i].flags & BO_PINNED) 165 - msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace); 163 + msm_gem_unpin_iova(&msm_obj->base, submit->aspace); 166 164 167 165 if (submit->bos[i].flags & BO_LOCKED) 168 166 ww_mutex_unlock(&msm_obj->base.resv->lock); ··· 266 264 267 265 /* if locking succeeded, pin bo: */ 268 266 ret = msm_gem_get_and_pin_iova(&msm_obj->base, 269 - submit->gpu->aspace, &iova); 267 + submit->aspace, &iova); 270 268 271 269 if (ret) 272 270 break; ··· 479 477 } 480 478 } 481 479 482 - submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds); 480 + submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos, 481 + args->nr_cmds); 483 482 if (!submit) { 484 483 ret = -ENOMEM; 485 484 goto out_unlock;
+2 -3
drivers/gpu/drm/msm/msm_gpu.c
··· 684 684 struct msm_gem_object *msm_obj = submit->bos[i].obj; 685 685 /* move to inactive: */ 686 686 msm_gem_move_to_inactive(&msm_obj->base); 687 - msm_gem_unpin_iova(&msm_obj->base, gpu->aspace); 687 + msm_gem_unpin_iova(&msm_obj->base, submit->aspace); 688 688 drm_gem_object_put(&msm_obj->base); 689 689 } 690 690 ··· 768 768 769 769 /* submit takes a reference to the bo and iova until retired: */ 770 770 drm_gem_object_get(&msm_obj->base); 771 - msm_gem_get_and_pin_iova(&msm_obj->base, 772 - submit->gpu->aspace, &iova); 771 + msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova); 773 772 774 773 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) 775 774 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
+1 -1
drivers/gpu/drm/msm/msm_iommu.c
··· 30 30 struct msm_iommu *iommu = arg; 31 31 if (iommu->base.handler) 32 32 return iommu->base.handler(iommu->base.arg, iova, flags); 33 - pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); 33 + pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags); 34 34 return 0; 35 35 } 36 36
+2 -13
drivers/gpu/drm/msm/msm_perf.c
··· 205 205 { 206 206 struct msm_drm_private *priv = minor->dev->dev_private; 207 207 struct msm_perf_state *perf; 208 - struct dentry *ent; 209 208 210 209 /* only create on first minor: */ 211 210 if (priv->perf) ··· 219 220 mutex_init(&perf->read_lock); 220 221 priv->perf = perf; 221 222 222 - ent = debugfs_create_file("perf", S_IFREG | S_IRUGO, 223 - minor->debugfs_root, perf, &perf_debugfs_fops); 224 - if (!ent) { 225 - DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/perf\n", 226 - minor->debugfs_root); 227 - goto fail; 228 - } 229 - 223 + debugfs_create_file("perf", S_IFREG | S_IRUGO, minor->debugfs_root, 224 + perf, &perf_debugfs_fops); 230 225 return 0; 231 - 232 - fail: 233 - msm_perf_debugfs_cleanup(priv); 234 - return -1; 235 226 } 236 227 237 228 void msm_perf_debugfs_cleanup(struct msm_drm_private *priv)
+2 -14
drivers/gpu/drm/msm/msm_rd.c
··· 244 244 static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name) 245 245 { 246 246 struct msm_rd_state *rd; 247 - struct dentry *ent; 248 - int ret = 0; 249 247 250 248 rd = kzalloc(sizeof(*rd), GFP_KERNEL); 251 249 if (!rd) ··· 256 258 257 259 init_waitqueue_head(&rd->fifo_event); 258 260 259 - ent = debugfs_create_file(name, S_IFREG | S_IRUGO, 260 - minor->debugfs_root, rd, &rd_debugfs_fops); 261 - if (!ent) { 262 - DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/%s\n", 263 - minor->debugfs_root, name); 264 - ret = -ENOMEM; 265 - goto fail; 266 - } 261 + debugfs_create_file(name, S_IFREG | S_IRUGO, minor->debugfs_root, rd, 262 + &rd_debugfs_fops); 267 263 268 264 return rd; 269 - 270 - fail: 271 - rd_cleanup(rd); 272 - return ERR_PTR(ret); 273 265 } 274 266 275 267 int msm_rd_debugfs_init(struct drm_minor *minor)