Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-msm-next-2021-08-12' of https://gitlab.freedesktop.org/drm/msm into drm-next

This is the main pull for v5.15, after the early pull request with
drm/scheduler conversion:

* New a6xx GPU support: a680 and 7c3
* dsi: 7nm phi, sc7280 support, test pattern generator support
* mdp4 fixes for older hw like the nexus7
* displayport fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGs_tyanTeDGMH1X+Uf4wdyy7jYj-CinGXXVETiYOESahw@mail.gmail.com

+1193 -585
+17
Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
··· 64 64 Indicates if the DSI controller is driving a panel which needs 65 65 2 DSI links. 66 66 67 + assigned-clocks: 68 + minItems: 2 69 + maxItems: 2 70 + description: | 71 + Parents of "byte" and "pixel" for the given platform. 72 + 73 + assigned-clock-parents: 74 + minItems: 2 75 + maxItems: 2 76 + description: | 77 + The Byte clock and Pixel clock PLL outputs provided by a DSI PHY block. 78 + 67 79 power-domains: 68 80 maxItems: 1 69 81 ··· 131 119 - clock-names 132 120 - phys 133 121 - phy-names 122 + - assigned-clocks 123 + - assigned-clock-parents 134 124 - power-domains 135 125 - operating-points-v2 136 126 - ports ··· 172 158 173 159 phys = <&dsi0_phy>; 174 160 phy-names = "dsi"; 161 + 162 + assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>; 163 + assigned-clock-parents = <&dsi_phy 0>, <&dsi_phy 1>; 175 164 176 165 power-domains = <&rpmhpd SC7180_CX>; 177 166 operating-points-v2 = <&dsi_opp_table>;
+72
Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/msm/dsi-phy-7nm.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Qualcomm Display DSI 7nm PHY 8 + 9 + maintainers: 10 + - Jonathan Marek <jonathan@marek.ca> 11 + 12 + allOf: 13 + - $ref: dsi-phy-common.yaml# 14 + 15 + properties: 16 + compatible: 17 + oneOf: 18 + - const: qcom,dsi-phy-7nm 19 + - const: qcom,dsi-phy-7nm-8150 20 + - const: qcom,sc7280-dsi-phy-7nm 21 + 22 + reg: 23 + items: 24 + - description: dsi phy register set 25 + - description: dsi phy lane register set 26 + - description: dsi pll register set 27 + 28 + reg-names: 29 + items: 30 + - const: dsi_phy 31 + - const: dsi_phy_lane 32 + - const: dsi_pll 33 + 34 + vdds-supply: 35 + description: | 36 + Connected to VDD_A_DSI_PLL_0P9 pin (or VDDA_DSI{0,1}_PLL_0P9 for sm8150) 37 + 38 + phy-type: 39 + description: D-PHY (default) or C-PHY mode 40 + enum: [ 10, 11 ] 41 + default: 10 42 + 43 + required: 44 + - compatible 45 + - reg 46 + - reg-names 47 + - vdds-supply 48 + 49 + unevaluatedProperties: false 50 + 51 + examples: 52 + - | 53 + #include <dt-bindings/clock/qcom,dispcc-sm8250.h> 54 + #include <dt-bindings/clock/qcom,rpmh.h> 55 + 56 + dsi-phy@ae94400 { 57 + compatible = "qcom,dsi-phy-7nm"; 58 + reg = <0x0ae94400 0x200>, 59 + <0x0ae94600 0x280>, 60 + <0x0ae94900 0x260>; 61 + reg-names = "dsi_phy", 62 + "dsi_phy_lane", 63 + "dsi_pll"; 64 + 65 + #clock-cells = <1>; 66 + #phy-cells = <0>; 67 + 68 + vdds-supply = <&vreg_l5a_0p88>; 69 + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, 70 + <&rpmhcc RPMH_CXO_CLK>; 71 + clock-names = "iface", "ref"; 72 + };
+3 -3
drivers/gpu/drm/msm/Kconfig
··· 116 116 Choose this option if DSI PHY on SDM845 is used on the platform. 117 117 118 118 config DRM_MSM_DSI_7NM_PHY 119 - bool "Enable DSI 7nm PHY driver in MSM DRM (used by SM8150/SM8250)" 119 + bool "Enable DSI 7nm PHY driver in MSM DRM" 120 120 depends on DRM_MSM_DSI 121 121 default y 122 122 help 123 - Choose this option if DSI PHY on SM8150/SM8250 is used on the 124 - platform. 123 + Choose this option if DSI PHY on SM8150/SM8250/SC7280 is used on 124 + the platform.
+24 -5
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 18 18 19 19 #define GPU_PAS_ID 13 20 20 21 + static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) 22 + { 23 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 24 + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); 25 + 26 + if (a5xx_gpu->has_whereami) { 27 + OUT_PKT7(ring, CP_WHERE_AM_I, 2); 28 + OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring))); 29 + OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring))); 30 + } 31 + } 32 + 21 33 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, 22 34 bool sync) 23 35 { ··· 42 30 * Most flush operations need to issue a WHERE_AM_I opcode to sync up 43 31 * the rptr shadow 44 32 */ 45 - if (a5xx_gpu->has_whereami && sync) { 46 - OUT_PKT7(ring, CP_WHERE_AM_I, 2); 47 - OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring))); 48 - OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring))); 49 - } 33 + if (sync) 34 + update_shadow_rptr(gpu, ring); 50 35 51 36 spin_lock_irqsave(&ring->preempt_lock, flags); 52 37 ··· 177 168 ibs++; 178 169 break; 179 170 } 171 + 172 + /* 173 + * Periodically update shadow-wptr if needed, so that we 174 + * can see partial progress of submits with large # of 175 + * cmds.. otherwise we could needlessly stall waiting for 176 + * ringbuffer state, simply due to looking at a shadow 177 + * rptr value that has not been updated 178 + */ 179 + if ((ibs % 32) == 0) 180 + update_shadow_rptr(gpu, ring); 180 181 } 181 182 182 183 /*
+8 -4
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 519 519 if (!pdcptr) 520 520 goto err; 521 521 522 - if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) 522 + if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu)) 523 523 pdc_in_aop = true; 524 - else if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu)) 524 + else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu)) 525 525 pdc_address_offset = 0x30090; 526 526 else 527 527 pdc_address_offset = 0x30080; ··· 933 933 934 934 /* Use a known rate to bring up the GMU */ 935 935 clk_set_rate(gmu->core_clk, 200000000); 936 + clk_set_rate(gmu->hub_clk, 150000000); 936 937 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 937 938 if (ret) { 938 939 pm_runtime_put(gmu->gxpd); ··· 1394 1393 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, 1395 1394 gmu->nr_clocks, "gmu"); 1396 1395 1396 + gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, 1397 + gmu->nr_clocks, "hub"); 1398 + 1397 1399 return 0; 1398 1400 } 1399 1401 ··· 1508 1504 * are otherwise unused by a660. 1509 1505 */ 1510 1506 gmu->dummy.size = SZ_4K; 1511 - if (adreno_is_a660(adreno_gpu)) { 1507 + if (adreno_is_a660_family(adreno_gpu)) { 1512 1508 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000); 1513 1509 if (ret) 1514 1510 goto err_memory; ··· 1526 1522 SZ_16M - SZ_16K, 0x04000); 1527 1523 if (ret) 1528 1524 goto err_memory; 1529 - } else if (adreno_is_a640(adreno_gpu)) { 1525 + } else if (adreno_is_a640_family(adreno_gpu)) { 1530 1526 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, 1531 1527 SZ_256K - SZ_16K, 0x04000); 1532 1528 if (ret)
+1
drivers/gpu/drm/msm/adreno/a6xx_gmu.h
··· 66 66 int nr_clocks; 67 67 struct clk_bulk_data *clocks; 68 68 struct clk *core_clk; 69 + struct clk *hub_clk; 69 70 70 71 /* current performance index set externally */ 71 72 int current_perf_index;
+84 -49
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 52 52 return true; 53 53 } 54 54 55 - static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) 55 + static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) 56 56 { 57 57 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 58 58 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 59 - uint32_t wptr; 60 - unsigned long flags; 61 59 62 60 /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */ 63 61 if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) { 64 - struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 65 - 66 62 OUT_PKT7(ring, CP_WHERE_AM_I, 2); 67 63 OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring))); 68 64 OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring))); 69 65 } 66 + } 67 + 68 + static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) 69 + { 70 + uint32_t wptr; 71 + unsigned long flags; 72 + 73 + update_shadow_rptr(gpu, ring); 70 74 71 75 spin_lock_irqsave(&ring->preempt_lock, flags); 72 76 ··· 149 145 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 150 146 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 151 147 struct msm_ringbuffer *ring = submit->ring; 152 - unsigned int i; 148 + unsigned int i, ibs = 0; 153 149 154 150 a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); 155 151 ··· 185 181 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); 186 182 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); 187 183 OUT_RING(ring, submit->cmd[i].size); 184 + ibs++; 188 185 break; 189 186 } 187 + 188 + /* 189 + * Periodically update shadow-wptr if needed, so that we 190 + * can see partial progress of submits with large # of 191 + * cmds.. otherwise we could needlessly stall waiting for 192 + * ringbuffer state, simply due to looking at a shadow 193 + * rptr value that has not been updated 194 + */ 195 + if ((ibs % 32) == 0) 196 + update_shadow_rptr(gpu, ring); 190 197 } 191 198 192 199 get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), ··· 667 652 regs = a650_protect; 668 653 count = ARRAY_SIZE(a650_protect); 669 654 count_max = 48; 670 - } else if (adreno_is_a660(adreno_gpu)) { 655 + } else if (adreno_is_a660_family(adreno_gpu)) { 671 656 regs = a660_protect; 672 657 count = ARRAY_SIZE(a660_protect); 673 658 count_max = 48; ··· 698 683 if (adreno_is_a618(adreno_gpu)) 699 684 return; 700 685 701 - if (adreno_is_a640(adreno_gpu)) 686 + if (adreno_is_a640_family(adreno_gpu)) 702 687 amsbc = 1; 703 688 704 689 if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) { 705 690 /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */ 706 691 lower_bit = 3; 692 + amsbc = 1; 693 + rgb565_predicator = 1; 694 + uavflagprd_inv = 2; 695 + } 696 + 697 + if (adreno_is_7c3(adreno_gpu)) { 698 + lower_bit = 1; 707 699 amsbc = 1; 708 700 rgb565_predicator = 1; 709 701 uavflagprd_inv = 2; ··· 762 740 { 763 741 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 764 742 struct msm_gpu *gpu = &adreno_gpu->base; 743 + const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE]; 765 744 u32 *buf = msm_gem_get_vaddr(obj); 766 745 bool ret = false; 767 746 ··· 779 756 * 780 757 * a660 targets have all the critical security fixes from the start 781 758 */ 782 - if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) || 783 - adreno_is_a640(adreno_gpu)) { 759 + if (!strcmp(sqe_name, "a630_sqe.fw")) { 784 760 /* 785 761 * If the lowest nibble is 0xa that is an indication that this 786 762 * microcode has been patched. The actual version is in dword ··· 800 778 DRM_DEV_ERROR(&gpu->pdev->dev, 801 779 "a630 SQE ucode is too old. Have version %x need at least %x\n", 802 780 buf[0] & 0xfff, 0x190); 803 - } else if (adreno_is_a650(adreno_gpu)) { 781 + } else if (!strcmp(sqe_name, "a650_sqe.fw")) { 804 782 if ((buf[0] & 0xfff) >= 0x095) { 805 783 ret = true; 806 784 goto out; ··· 809 787 DRM_DEV_ERROR(&gpu->pdev->dev, 810 788 "a650 SQE ucode is too old. Have version %x need at least %x\n", 811 789 buf[0] & 0xfff, 0x095); 812 - } else if (adreno_is_a660(adreno_gpu)) { 790 + } else if (!strcmp(sqe_name, "a660_sqe.fw")) { 813 791 ret = true; 814 792 } else { 815 793 DRM_DEV_ERROR(&gpu->pdev->dev, ··· 919 897 a6xx_set_hwcg(gpu, true); 920 898 921 899 /* VBIF/GBIF start*/ 922 - if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) { 900 + if (adreno_is_a640_family(adreno_gpu) || 901 + adreno_is_a650_family(adreno_gpu)) { 923 902 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); 924 903 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); 925 904 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); ··· 958 935 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); 959 936 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); 960 937 961 - if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) 938 + if (adreno_is_a640_family(adreno_gpu) || 939 + adreno_is_a650_family(adreno_gpu)) 962 940 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); 963 941 else 964 942 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); 965 943 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); 966 944 967 - if (adreno_is_a660(adreno_gpu)) 945 + if (adreno_is_a660_family(adreno_gpu)) 968 946 gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); 969 947 970 948 /* Setting the mem pool size */ ··· 976 952 */ 977 953 if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) 978 954 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); 979 - else if (adreno_is_a640(adreno_gpu)) 955 + else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu)) 980 956 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200); 957 + else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) 958 + gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); 981 959 else 982 960 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000); 983 961 ··· 1016 990 /* Protect registers from the CP */ 1017 991 a6xx_set_cp_protect(gpu); 1018 992 1019 - if (adreno_is_a660(adreno_gpu)) { 993 + if (adreno_is_a660_family(adreno_gpu)) { 1020 994 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1); 1021 995 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0); 1022 - /* Set dualQ + disable afull for A660 GPU but not for A635 */ 1023 - gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); 1024 996 } 997 + 998 + /* Set dualQ + disable afull for A660 GPU */ 999 + if (adreno_is_a660(adreno_gpu)) 1000 + gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); 1025 1001 1026 1002 /* Enable expanded apriv for targets that support it */ 1027 1003 if (gpu->hw_apriv) { ··· 1411 1383 { 1412 1384 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1413 1385 struct msm_gpu *gpu = &adreno_gpu->base; 1414 - u32 cntl1_regval = 0; 1386 + u32 gpu_scid, cntl1_regval = 0; 1415 1387 1416 1388 if (IS_ERR(a6xx_gpu->llc_mmio)) 1417 1389 return; 1418 1390 1419 1391 if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { 1420 - u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); 1392 + gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); 1421 1393 1422 1394 gpu_scid &= 0x1f; 1423 1395 cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) | ··· 1437 1409 } 1438 1410 } 1439 1411 1440 - if (cntl1_regval) { 1441 - /* 1442 - * Program the slice IDs for the various GPU blocks and GPU MMU 1443 - * pagetables 1444 - */ 1445 - if (a6xx_gpu->have_mmu500) 1446 - gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), 1447 - cntl1_regval); 1448 - else { 1449 - a6xx_llc_write(a6xx_gpu, 1450 - REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval); 1412 + if (!cntl1_regval) 1413 + return; 1451 1414 1452 - /* 1453 - * Program cacheability overrides to not allocate cache 1454 - * lines on a write miss 1455 - */ 1456 - a6xx_llc_rmw(a6xx_gpu, 1457 - REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03); 1458 - } 1415 + /* 1416 + * Program the slice IDs for the various GPU blocks and GPU MMU 1417 + * pagetables 1418 + */ 1419 + if (!a6xx_gpu->have_mmu500) { 1420 + a6xx_llc_write(a6xx_gpu, 1421 + REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval); 1422 + 1423 + /* 1424 + * Program cacheability overrides to not allocate cache 1425 + * lines on a write miss 1426 + */ 1427 + a6xx_llc_rmw(a6xx_gpu, 1428 + REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03); 1429 + return; 1459 1430 } 1431 + 1432 + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); 1433 + 1434 + /* On A660, the SCID programming for UCHE traffic is done in 1435 + * A6XX_GBIF_SCACHE_CNTL0[14:10] 1436 + */ 1437 + if (adreno_is_a660_family(adreno_gpu)) 1438 + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | 1439 + (1 << 8), (gpu_scid << 10) | (1 << 8)); 1460 1440 } 1461 1441 1462 1442 static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) ··· 1703 1667 return UINT_MAX; 1704 1668 } 1705 1669 1706 - static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse) 1670 + static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse) 1707 1671 { 1708 1672 u32 val = UINT_MAX; 1709 1673 1710 - if (revn == 618) 1674 + if (adreno_cmp_rev(ADRENO_REV(6, 1, 8, ANY_ID), rev)) 1711 1675 val = a618_get_speed_bin(fuse); 1712 1676 1713 1677 if (val == UINT_MAX) { ··· 1720 1684 return (1 << val); 1721 1685 } 1722 1686 1723 - static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu, 1724 - u32 revn) 1687 + static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev) 1725 1688 { 1726 1689 u32 supp_hw = UINT_MAX; 1727 - u16 speedbin; 1690 + u32 speedbin; 1728 1691 int ret; 1729 1692 1730 - ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin); 1693 + ret = nvmem_cell_read_variable_le_u32(dev, "speed_bin", &speedbin); 1731 1694 /* 1732 1695 * -ENOENT means that the platform doesn't support speedbin which is 1733 1696 * fine ··· 1739 1704 ret); 1740 1705 goto done; 1741 1706 } 1742 - speedbin = le16_to_cpu(speedbin); 1743 1707 1744 - supp_hw = fuse_to_supp_hw(dev, revn, speedbin); 1708 + supp_hw = fuse_to_supp_hw(dev, rev, speedbin); 1745 1709 1746 1710 done: 1747 1711 ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1); ··· 1806 1772 */ 1807 1773 info = adreno_info(config->rev); 1808 1774 1809 - if (info && (info->revn == 650 || info->revn == 660)) 1775 + if (info && (info->revn == 650 || info->revn == 660 || 1776 + adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev))) 1810 1777 adreno_gpu->base.hw_apriv = true; 1811 1778 1812 1779 a6xx_llc_slices_init(pdev, a6xx_gpu); 1813 1780 1814 - ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn); 1781 + ret = a6xx_set_supported_hw(&pdev->dev, config->rev); 1815 1782 if (ret) { 1816 1783 a6xx_destroy(&(a6xx_gpu->base.base)); 1817 1784 return ERR_PTR(ret);
+33 -1
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
··· 382 382 msg->cnoc_cmds_data[1][0] = 0x60000001; 383 383 } 384 384 385 + static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 386 + { 387 + /* 388 + * Send a single "off" entry just to get things running 389 + * TODO: bus scaling 390 + */ 391 + msg->bw_level_num = 1; 392 + 393 + msg->ddr_cmds_num = 3; 394 + msg->ddr_wait_bitmask = 0x07; 395 + 396 + msg->ddr_cmds_addrs[0] = 0x50004; 397 + msg->ddr_cmds_addrs[1] = 0x50000; 398 + msg->ddr_cmds_addrs[2] = 0x50088; 399 + 400 + msg->ddr_cmds_data[0][0] = 0x40000000; 401 + msg->ddr_cmds_data[0][1] = 0x40000000; 402 + msg->ddr_cmds_data[0][2] = 0x40000000; 403 + 404 + /* 405 + * These are the CX (CNOC) votes - these are used by the GMU but the 406 + * votes are known and fixed for the target 407 + */ 408 + msg->cnoc_cmds_num = 1; 409 + msg->cnoc_wait_bitmask = 0x01; 410 + 411 + msg->cnoc_cmds_addrs[0] = 0x5006c; 412 + msg->cnoc_cmds_data[0][0] = 0x40000000; 413 + msg->cnoc_cmds_data[1][0] = 0x60000001; 414 + } 385 415 static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 386 416 { 387 417 /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ ··· 458 428 459 429 if (adreno_is_a618(adreno_gpu)) 460 430 a618_build_bw_table(&msg); 461 - else if (adreno_is_a640(adreno_gpu)) 431 + else if (adreno_is_a640_family(adreno_gpu)) 462 432 a640_build_bw_table(&msg); 463 433 else if (adreno_is_a650(adreno_gpu)) 464 434 a650_build_bw_table(&msg); 435 + else if (adreno_is_7c3(adreno_gpu)) 436 + adreno_7c3_build_bw_table(&msg); 465 437 else if (adreno_is_a660(adreno_gpu)) 466 438 a660_build_bw_table(&msg); 467 439 else
+34 -6
drivers/gpu/drm/msm/adreno/adreno_device.c
··· 8 8 9 9 #include "adreno_gpu.h" 10 10 11 - #define ANY_ID 0xff 12 - 13 11 bool hang_debug = false; 14 12 MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); 15 13 module_param_named(hang_debug, hang_debug, bool, 0600); ··· 298 300 .init = a6xx_gpu_init, 299 301 .zapfw = "a660_zap.mdt", 300 302 .hwcg = a660_hwcg, 303 + }, { 304 + .rev = ADRENO_REV(6, 3, 5, ANY_ID), 305 + .name = "Adreno 7c Gen 3", 306 + .fw = { 307 + [ADRENO_FW_SQE] = "a660_sqe.fw", 308 + [ADRENO_FW_GMU] = "a660_gmu.bin", 309 + }, 310 + .gmem = SZ_512K, 311 + .inactive_period = DRM_MSM_INACTIVE_PERIOD, 312 + .init = a6xx_gpu_init, 313 + .hwcg = a660_hwcg, 314 + }, { 315 + .rev = ADRENO_REV(6, 8, 0, ANY_ID), 316 + .revn = 680, 317 + .name = "A680", 318 + .fw = { 319 + [ADRENO_FW_SQE] = "a630_sqe.fw", 320 + [ADRENO_FW_GMU] = "a640_gmu.bin", 321 + }, 322 + .gmem = SZ_2M, 323 + .inactive_period = DRM_MSM_INACTIVE_PERIOD, 324 + .init = a6xx_gpu_init, 325 + .zapfw = "a640_zap.mdt", 326 + .hwcg = a640_hwcg, 301 327 }, 302 328 }; 303 329 ··· 347 325 return (entry == ANY_ID) || (entry == id); 348 326 } 349 327 328 + bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2) 329 + { 330 + 331 + return _rev_match(rev1.core, rev2.core) && 332 + _rev_match(rev1.major, rev2.major) && 333 + _rev_match(rev1.minor, rev2.minor) && 334 + _rev_match(rev1.patchid, rev2.patchid); 335 + } 336 + 350 337 const struct adreno_info *adreno_info(struct adreno_rev rev) 351 338 { 352 339 int i; ··· 363 332 /* identify gpu: */ 364 333 for (i = 0; i < ARRAY_SIZE(gpulist); i++) { 365 334 const struct adreno_info *info = &gpulist[i]; 366 - if (_rev_match(info->rev.core, rev.core) && 367 - _rev_match(info->rev.major, rev.major) && 368 - _rev_match(info->rev.minor, rev.minor) && 369 - _rev_match(info->rev.patchid, rev.patchid)) 335 + if (adreno_cmp_rev(info->rev, rev)) 370 336 return info; 371 337 } 372 338
+19 -3
drivers/gpu/drm/msm/adreno/adreno_gpu.h
··· 42 42 uint8_t patchid; 43 43 }; 44 44 45 + #define ANY_ID 0xff 46 + 45 47 #define ADRENO_REV(core, major, minor, patchid) \ 46 48 ((struct adreno_rev){ core, major, minor, patchid }) 47 49 ··· 143 141 __ret; \ 144 142 }) 145 143 144 + bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2); 145 + 146 146 static inline bool adreno_is_a2xx(struct adreno_gpu *gpu) 147 147 { 148 148 return (gpu->revn < 300); ··· 241 237 return gpu->revn == 630; 242 238 } 243 239 244 - static inline int adreno_is_a640(struct adreno_gpu *gpu) 240 + static inline int adreno_is_a640_family(struct adreno_gpu *gpu) 245 241 { 246 - return gpu->revn == 640; 242 + return (gpu->revn == 640) || (gpu->revn == 680); 247 243 } 248 244 249 245 static inline int adreno_is_a650(struct adreno_gpu *gpu) ··· 251 247 return gpu->revn == 650; 252 248 } 253 249 250 + static inline int adreno_is_7c3(struct adreno_gpu *gpu) 251 + { 252 + /* The order of args is important here to handle ANY_ID correctly */ 253 + return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev); 254 + } 255 + 254 256 static inline int adreno_is_a660(struct adreno_gpu *gpu) 255 257 { 256 258 return gpu->revn == 660; 257 259 } 258 260 261 + static inline int adreno_is_a660_family(struct adreno_gpu *gpu) 262 + { 263 + return adreno_is_a660(gpu) || adreno_is_7c3(gpu); 264 + } 265 + 259 266 /* check for a650, a660, or any derivatives */ 260 267 static inline int adreno_is_a650_family(struct adreno_gpu *gpu) 261 268 { 262 - return gpu->revn == 650 || gpu->revn == 620 || gpu->revn == 660; 269 + return gpu->revn == 650 || gpu->revn == 620 || 270 + adreno_is_a660_family(gpu); 263 271 } 264 272 265 273 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
+30 -13
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
··· 30 30 #include "dpu_core_perf.h" 31 31 #include "dpu_trace.h" 32 32 33 - #define DPU_DRM_BLEND_OP_NOT_DEFINED 0 34 - #define DPU_DRM_BLEND_OP_OPAQUE 1 35 - #define DPU_DRM_BLEND_OP_PREMULTIPLIED 2 36 - #define DPU_DRM_BLEND_OP_COVERAGE 3 37 - #define DPU_DRM_BLEND_OP_MAX 4 38 - 39 33 /* layer mixer index on dpu_crtc */ 40 34 #define LEFT_MIXER 0 41 35 #define RIGHT_MIXER 1 ··· 140 146 { 141 147 struct dpu_hw_mixer *lm = mixer->hw_lm; 142 148 uint32_t blend_op; 149 + uint32_t fg_alpha, bg_alpha; 150 + 151 + fg_alpha = pstate->base.alpha >> 8; 152 + bg_alpha = 0xff - fg_alpha; 143 153 144 154 /* default to opaque blending */ 145 - blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 146 - DPU_BLEND_BG_ALPHA_BG_CONST; 147 - 148 - if (format->alpha_enable) { 155 + if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE || 156 + !format->alpha_enable) { 157 + blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 158 + DPU_BLEND_BG_ALPHA_BG_CONST; 159 + } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { 160 + blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 161 + DPU_BLEND_BG_ALPHA_FG_PIXEL; 162 + if (fg_alpha != 0xff) { 163 + bg_alpha = fg_alpha; 164 + blend_op |= DPU_BLEND_BG_MOD_ALPHA | 165 + DPU_BLEND_BG_INV_MOD_ALPHA; 166 + } else { 167 + blend_op |= DPU_BLEND_BG_INV_ALPHA; 168 + } 169 + } else { 149 170 /* coverage blending */ 150 171 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | 151 - DPU_BLEND_BG_ALPHA_FG_PIXEL | 152 - DPU_BLEND_BG_INV_ALPHA; 172 + DPU_BLEND_BG_ALPHA_FG_PIXEL; 173 + if (fg_alpha != 0xff) { 174 + bg_alpha = fg_alpha; 175 + blend_op |= DPU_BLEND_FG_MOD_ALPHA | 176 + DPU_BLEND_FG_INV_MOD_ALPHA | 177 + DPU_BLEND_BG_MOD_ALPHA | 178 + DPU_BLEND_BG_INV_MOD_ALPHA; 179 + } else { 180 + blend_op |= DPU_BLEND_BG_INV_ALPHA; 181 + } 153 182 } 154 183 155 184 lm->ops.setup_blend_config(lm, pstate->stage, 156 - 0xFF, 0, blend_op); 185 + fg_alpha, bg_alpha, blend_op); 157 186 158 187 DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n", 159 188 &format->base.pixel_format, format->alpha_enable, blend_op);
+5 -7
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 274 274 275 275 /* return EWOULDBLOCK since we know the wait isn't necessary */ 276 276 if (phys_enc->enable_state == DPU_ENC_DISABLED) { 277 - DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d", 277 + DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d\n", 278 278 DRMID(phys_enc->parent), intr_idx, 279 279 irq->irq_idx); 280 280 return -EWOULDBLOCK; 281 281 } 282 282 283 283 if (irq->irq_idx < 0) { 284 - DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s", 284 + DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s\n", 285 285 DRMID(phys_enc->parent), intr_idx, 286 286 irq->name); 287 287 return 0; 288 288 } 289 289 290 - DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d", 290 + DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d\n", 291 291 DRMID(phys_enc->parent), intr_idx, 292 292 irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0, 293 293 atomic_read(wait_info->atomic_cnt)); ··· 303 303 if (irq_status) { 304 304 unsigned long flags; 305 305 306 - DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, " 307 - "irq=%d, pp=%d, atomic_cnt=%d", 306 + DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n", 308 307 DRMID(phys_enc->parent), intr_idx, 309 308 irq->irq_idx, 310 309 phys_enc->hw_pp->idx - PINGPONG_0, ··· 314 315 ret = 0; 315 316 } else { 316 317 ret = -ETIMEDOUT; 317 - DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, " 318 - "irq=%d, pp=%d, atomic_cnt=%d", 318 + DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n", 319 319 DRMID(phys_enc->parent), intr_idx, 320 320 irq->irq_idx, 321 321 phys_enc->hw_pp->idx - PINGPONG_0,
+5
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
··· 974 974 .amortizable_threshold = 25, 975 975 .min_prefill_lines = 24, 976 976 .danger_lut_tbl = {0xf, 0xffff, 0x0}, 977 + .safe_lut_tbl = {0xfff0, 0xf000, 0xffff}, 977 978 .qos_lut_tbl = { 978 979 {.nentry = ARRAY_SIZE(sdm845_qos_linear), 979 980 .entries = sdm845_qos_linear ··· 1002 1001 .min_dram_ib = 1600000, 1003 1002 .min_prefill_lines = 24, 1004 1003 .danger_lut_tbl = {0xff, 0xffff, 0x0}, 1004 + .safe_lut_tbl = {0xfff0, 0xff00, 0xffff}, 1005 1005 .qos_lut_tbl = { 1006 1006 {.nentry = ARRAY_SIZE(sc7180_qos_linear), 1007 1007 .entries = sc7180_qos_linear ··· 1030 1028 .min_dram_ib = 800000, 1031 1029 .min_prefill_lines = 24, 1032 1030 .danger_lut_tbl = {0xf, 0xffff, 0x0}, 1031 + .safe_lut_tbl = {0xfff8, 0xf000, 0xffff}, 1033 1032 .qos_lut_tbl = { 1034 1033 {.nentry = ARRAY_SIZE(sm8150_qos_linear), 1035 1034 .entries = sm8150_qos_linear ··· 1059 1056 .min_dram_ib = 800000, 1060 1057 .min_prefill_lines = 35, 1061 1058 .danger_lut_tbl = {0xf, 0xffff, 0x0}, 1059 + .safe_lut_tbl = {0xfff0, 0xff00, 0xffff}, 1062 1060 .qos_lut_tbl = { 1063 1061 {.nentry = ARRAY_SIZE(sc7180_qos_linear), 1064 1062 .entries = sc7180_qos_linear ··· 1088 1084 .min_dram_ib = 1600000, 1089 1085 .min_prefill_lines = 24, 1090 1086 .danger_lut_tbl = {0xffff, 0xffff, 0x0}, 1087 + .safe_lut_tbl = {0xff00, 0xff00, 0xffff}, 1091 1088 .qos_lut_tbl = { 1092 1089 {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), 1093 1090 .entries = sc7180_qos_macrotile
+6 -4
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
··· 345 345 int i; 346 346 347 347 for (i = 0; i < ctx->mixer_count; i++) { 348 - DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0); 349 - DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0); 350 - DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0); 351 - DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0); 348 + enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id; 349 + 350 + DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0); 351 + DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0); 352 + DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0); 353 + DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0); 352 354 } 353 355 354 356 DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
+57 -45
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
··· 471 471 struct dpu_kms *dpu_kms) 472 472 { 473 473 struct drm_encoder *encoder = NULL; 474 + struct msm_display_info info; 474 475 int i, rc = 0; 475 476 476 477 if (!(priv->dsi[0] || priv->dsi[1])) 477 478 return rc; 478 479 479 - /*TODO: Support two independent DSI connectors */ 480 - encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); 481 - if (IS_ERR(encoder)) { 482 - DPU_ERROR("encoder init failed for dsi display\n"); 483 - return PTR_ERR(encoder); 484 - } 485 - 486 - priv->encoders[priv->num_encoders++] = encoder; 487 - 480 + /* 481 + * We support following confiurations: 482 + * - Single DSI host (dsi0 or dsi1) 483 + * - Two independent DSI hosts 484 + * - Bonded DSI0 and DSI1 hosts 485 + * 486 + * TODO: Support swapping DSI0 and DSI1 in the bonded setup. 487 + */ 488 488 for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { 489 + int other = (i + 1) % 2; 490 + 489 491 if (!priv->dsi[i]) 490 492 continue; 493 + 494 + if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && 495 + !msm_dsi_is_master_dsi(priv->dsi[i])) 496 + continue; 497 + 498 + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); 499 + if (IS_ERR(encoder)) { 500 + DPU_ERROR("encoder init failed for dsi display\n"); 501 + return PTR_ERR(encoder); 502 + } 503 + 504 + priv->encoders[priv->num_encoders++] = encoder; 505 + 506 + memset(&info, 0, sizeof(info)); 507 + info.intf_type = encoder->encoder_type; 491 508 492 509 rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder); 493 510 if (rc) { ··· 512 495 i, rc); 513 496 break; 514 497 } 498 + 499 + info.h_tile_instance[info.num_of_h_tiles++] = i; 500 + info.capabilities = msm_dsi_is_cmd_mode(priv->dsi[i]) ? 501 + MSM_DISPLAY_CAP_CMD_MODE : 502 + MSM_DISPLAY_CAP_VID_MODE; 503 + 504 + if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) { 505 + rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder); 506 + if (rc) { 507 + DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", 508 + other, rc); 509 + break; 510 + } 511 + 512 + info.h_tile_instance[info.num_of_h_tiles++] = other; 513 + } 514 + 515 + rc = dpu_encoder_setup(dev, encoder, &info); 516 + if (rc) 517 + DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", 518 + encoder->base.id, rc); 515 519 } 516 520 517 521 return rc; ··· 543 505 struct dpu_kms *dpu_kms) 544 506 { 545 507 struct drm_encoder *encoder = NULL; 508 + struct msm_display_info info; 546 509 int rc = 0; 547 510 548 511 if (!priv->dp) ··· 555 516 return PTR_ERR(encoder); 556 517 } 557 518 519 + memset(&info, 0, sizeof(info)); 558 520 rc = msm_dp_modeset_init(priv->dp, dev, encoder); 559 521 if (rc) { 560 522 DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); ··· 564 524 } 565 525 566 526 priv->encoders[priv->num_encoders++] = encoder; 527 + 528 + info.num_of_h_tiles = 1; 529 + info.capabilities = MSM_DISPLAY_CAP_VID_MODE; 530 + info.intf_type = encoder->encoder_type; 531 + rc = dpu_encoder_setup(dev, encoder, &info); 532 + if (rc) 533 + DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", 534 + encoder->base.id, rc); 567 535 return rc; 568 536 } 569 537 ··· 774 726 msm_kms_destroy(&dpu_kms->base); 775 727 } 776 728 777 - static void _dpu_kms_set_encoder_mode(struct msm_kms *kms, 778 - struct drm_encoder *encoder, 779 - bool cmd_mode) 780 - { 781 - struct msm_display_info info; 782 - struct msm_drm_private *priv = encoder->dev->dev_private; 783 - int i, rc = 0; 784 - 785 - memset(&info, 0, sizeof(info)); 786 - 787 - info.intf_type = encoder->encoder_type; 788 - info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE : 789 - MSM_DISPLAY_CAP_VID_MODE; 790 - 791 - switch (info.intf_type) { 792 - case DRM_MODE_ENCODER_DSI: 793 - /* TODO: No support for DSI swap */ 794 - for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { 795 - if (priv->dsi[i]) { 796 - info.h_tile_instance[info.num_of_h_tiles] = i; 797 - info.num_of_h_tiles++; 798 - } 799 - } 800 - break; 801 - case DRM_MODE_ENCODER_TMDS: 802 - info.num_of_h_tiles = 1; 803 - break; 804 - } 805 - 806 - rc = dpu_encoder_setup(encoder->dev, encoder, &info); 807 - if (rc) 808 - DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", 809 - encoder->base.id, rc); 810 - } 811 - 812 729 static irqreturn_t dpu_irq(struct msm_kms *kms) 813 730 { 814 731 struct dpu_kms *dpu_kms = to_dpu_kms(kms); ··· 876 863 .get_format = dpu_get_msm_format, 877 864 .round_pixclk = dpu_kms_round_pixclk, 878 865 .destroy = dpu_kms_destroy, 879 - .set_encoder_mode = _dpu_kms_set_encoder_mode, 880 866 .snapshot = dpu_kms_mdp_snapshot, 881 867 #ifdef CONFIG_DEBUG_FS 882 868 .debugfs_init = dpu_kms_debugfs_init,
+7 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
··· 1339 1339 return; 1340 1340 } 1341 1341 1342 - pstate->base.plane = plane; 1343 - 1344 - plane->state = &pstate->base; 1342 + __drm_atomic_helper_plane_reset(plane, &pstate->base); 1345 1343 } 1346 1344 1347 1345 #ifdef CONFIG_DEBUG_FS ··· 1644 1646 ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max); 1645 1647 if (ret) 1646 1648 DPU_ERROR("failed to install zpos property, rc = %d\n", ret); 1649 + 1650 + drm_plane_create_alpha_property(plane); 1651 + drm_plane_create_blend_mode_property(plane, 1652 + BIT(DRM_MODE_BLEND_PIXEL_NONE) | 1653 + BIT(DRM_MODE_BLEND_PREMULTI) | 1654 + BIT(DRM_MODE_BLEND_COVERAGE)); 1647 1655 1648 1656 drm_plane_create_rotation_property(plane, 1649 1657 DRM_MODE_ROTATE_0,
+41 -44
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
··· 19 19 { 20 20 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 21 21 struct drm_device *dev = mdp4_kms->dev; 22 - uint32_t version, major, minor, dmap_cfg, vg_cfg; 22 + u32 dmap_cfg, vg_cfg; 23 23 unsigned long clk; 24 24 int ret = 0; 25 25 26 26 pm_runtime_get_sync(dev->dev); 27 - 28 - mdp4_enable(mdp4_kms); 29 - version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); 30 - mdp4_disable(mdp4_kms); 31 - 32 - major = FIELD(version, MDP4_VERSION_MAJOR); 33 - minor = FIELD(version, MDP4_VERSION_MINOR); 34 - 35 - DBG("found MDP4 version v%d.%d", major, minor); 36 - 37 - if (major != 4) { 38 - DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n", 39 - major, minor); 40 - ret = -ENXIO; 41 - goto out; 42 - } 43 - 44 - mdp4_kms->rev = minor; 45 27 46 28 if (mdp4_kms->rev > 1) { 47 29 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff); ··· 70 88 if (mdp4_kms->rev > 1) 71 89 mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1); 72 90 73 - out: 74 91 pm_runtime_put_sync(dev->dev); 75 92 76 93 return ret; ··· 89 108 90 109 static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) 91 110 { 92 - int i; 93 - struct drm_crtc *crtc; 94 - struct drm_crtc_state *crtc_state; 95 - 96 - /* see 119ecb7fd */ 97 - for_each_new_crtc_in_state(state, crtc, crtc_state, i) 98 - drm_crtc_vblank_get(crtc); 99 111 } 100 112 101 113 static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask) ··· 107 133 108 134 static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask) 109 135 { 110 - struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 111 - struct drm_crtc *crtc; 112 - 113 - /* see 119ecb7fd */ 114 - for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask) 115 - drm_crtc_vblank_put(crtc); 116 136 } 117 137 118 138 static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, ··· 379 411 return ret; 380 412 } 381 413 414 + static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms, 415 + u32 *major, u32 *minor) 416 + { 417 + struct drm_device *dev = mdp4_kms->dev; 418 + u32 version; 419 + 420 + mdp4_enable(mdp4_kms); 421 + version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); 422 + mdp4_disable(mdp4_kms); 423 + 424 + *major = FIELD(version, MDP4_VERSION_MAJOR); 425 + *minor = FIELD(version, MDP4_VERSION_MINOR); 426 + 427 + DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor); 428 + } 429 + 382 430 struct msm_kms *mdp4_kms_init(struct drm_device *dev) 383 431 { 384 432 struct platform_device *pdev = to_platform_device(dev->dev); 385 433 struct mdp4_platform_config *config = mdp4_get_config(pdev); 434 + struct msm_drm_private *priv = dev->dev_private; 386 435 struct mdp4_kms *mdp4_kms; 387 436 struct msm_kms *kms = NULL; 388 437 struct msm_gem_address_space *aspace; 389 438 int irq, ret; 439 + u32 major, minor; 390 440 391 441 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); 392 442 if (!mdp4_kms) { ··· 419 433 goto fail; 420 434 } 421 435 422 - kms = &mdp4_kms->base.base; 436 + priv->kms = &mdp4_kms->base.base; 437 + kms = priv->kms; 423 438 424 439 mdp4_kms->dev = dev; 425 440 ··· 466 479 if (IS_ERR(mdp4_kms->pclk)) 467 480 mdp4_kms->pclk = NULL; 468 481 469 - if (mdp4_kms->rev >= 2) { 470 - mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk"); 471 - if (IS_ERR(mdp4_kms->lut_clk)) { 472 - DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n"); 473 - ret = PTR_ERR(mdp4_kms->lut_clk); 474 - goto fail; 475 - } 476 - } 477 - 478 482 mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk"); 479 483 if (IS_ERR(mdp4_kms->axi_clk)) { 480 484 DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n"); ··· 474 496 } 475 497 476 498 clk_set_rate(mdp4_kms->clk, config->max_clk); 477 - if (mdp4_kms->lut_clk) 499 + 500 + read_mdp_hw_revision(mdp4_kms, &major, &minor); 501 + 502 + if (major != 4) { 503 + DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n", 504 + major, minor); 505 + ret = -ENXIO; 506 + goto fail; 507 + } 508 + 509 + mdp4_kms->rev = minor; 510 + 511 + if (mdp4_kms->rev >= 2) { 512 + mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk"); 513 + if (IS_ERR(mdp4_kms->lut_clk)) { 514 + DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n"); 515 + ret = PTR_ERR(mdp4_kms->lut_clk); 516 + goto fail; 517 + } 478 518 clk_set_rate(mdp4_kms->lut_clk, config->max_clk); 519 + } 479 520 480 521 pm_runtime_enable(dev->dev); 481 522 mdp4_kms->rpm_enabled = true;
+1 -1
drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
··· 737 737 } 738 738 739 739 /* 740 - * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI 740 + * In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI 741 741 * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when 742 742 * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. 743 743 * Single FLUSH is supported from hw rev v3.0.
+3 -8
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
··· 209 209 slave_encoder); 210 210 } 211 211 212 - static void mdp5_set_encoder_mode(struct msm_kms *kms, 213 - struct drm_encoder *encoder, 214 - bool cmd_mode) 215 - { 216 - mdp5_encoder_set_intf_mode(encoder, cmd_mode); 217 - } 218 - 219 212 static void mdp5_kms_destroy(struct msm_kms *kms) 220 213 { 221 214 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); ··· 280 287 .get_format = mdp_get_format, 281 288 .round_pixclk = mdp5_round_pixclk, 282 289 .set_split_display = mdp5_set_split_display, 283 - .set_encoder_mode = mdp5_set_encoder_mode, 284 290 .destroy = mdp5_kms_destroy, 285 291 #ifdef CONFIG_DEBUG_FS 286 292 .debugfs_init = mdp5_kms_debugfs_init, ··· 440 448 } 441 449 442 450 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); 451 + if (!ret) 452 + mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id])); 453 + 443 454 break; 444 455 } 445 456 default:
-1
drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
··· 16 16 #include <linux/delay.h> 17 17 #include <linux/spinlock.h> 18 18 #include <linux/ktime.h> 19 - #include <linux/debugfs.h> 20 19 #include <linux/uaccess.h> 21 20 #include <linux/dma-buf.h> 22 21 #include <linux/slab.h>
+3
drivers/gpu/drm/msm/dp/dp_aux.c
··· 353 353 if (!(aux->retry_cnt % MAX_AUX_RETRIES)) 354 354 dp_catalog_aux_update_cfg(aux->catalog); 355 355 } 356 + /* reset aux if link is in connected state */ 357 + if (dp_catalog_link_is_connected(aux->catalog)) 358 + dp_catalog_aux_reset(aux->catalog); 356 359 } else { 357 360 aux->retry_cnt = 0; 358 361 switch (aux->aux_error_num) {
+6 -2
drivers/gpu/drm/msm/dp/dp_catalog.c
··· 372 372 struct dp_catalog_private *catalog = container_of(dp_catalog, 373 373 struct dp_catalog_private, dp_catalog); 374 374 375 + DRM_DEBUG_DP("enable=%d\n", enable); 375 376 if (enable) { 376 377 /* 377 378 * To make sure link reg writes happens before other operation, ··· 581 580 582 581 config = (en ? config | intr_mask : config & ~intr_mask); 583 582 583 + DRM_DEBUG_DP("intr_mask=%#x config=%#x\n", intr_mask, config); 584 584 dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK, 585 585 config & DP_DP_HPD_INT_MASK); 586 586 } ··· 612 610 u32 status; 613 611 614 612 status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); 613 + DRM_DEBUG_DP("aux status: %#x\n", status); 615 614 status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT; 616 615 status &= DP_DP_HPD_STATE_STATUS_BITS_MASK; 617 616 ··· 688 685 /* Make sure to clear the current pattern before starting a new one */ 689 686 dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0); 690 687 688 + DRM_DEBUG_DP("pattern: %#x\n", pattern); 691 689 switch (pattern) { 692 690 case DP_PHY_TEST_PATTERN_D10_2: 693 691 dp_write_link(catalog, REG_DP_STATE_CTRL, ··· 749 745 DP_STATE_CTRL_LINK_TRAINING_PATTERN4); 750 746 break; 751 747 default: 752 - DRM_DEBUG_DP("No valid test pattern requested:0x%x\n", pattern); 748 + DRM_DEBUG_DP("No valid test pattern requested: %#x\n", pattern); 753 749 break; 754 750 } 755 751 } ··· 932 928 select = dp_catalog->audio_data; 933 929 acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14); 934 930 935 - DRM_DEBUG_DP("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl); 931 + DRM_DEBUG_DP("select: %#x, acr_ctrl: %#x\n", select, acr_ctrl); 936 932 937 933 dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); 938 934 }
+90 -51
drivers/gpu/drm/msm/dp/dp_ctrl.c
··· 81 81 struct completion video_comp; 82 82 }; 83 83 84 - struct dp_cr_status { 85 - u8 lane_0_1; 86 - u8 lane_2_3; 87 - }; 88 - 89 - #define DP_LANE0_1_CR_DONE 0x11 90 - 91 84 static int dp_aux_link_configure(struct drm_dp_aux *aux, 92 85 struct dp_link_info *link) 93 86 { ··· 113 120 IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES)) 114 121 pr_warn("PUSH_IDLE pattern timedout\n"); 115 122 116 - pr_debug("mainlink off done\n"); 123 + DRM_DEBUG_DP("mainlink off done\n"); 117 124 } 118 125 119 126 static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) ··· 1004 1011 u32 voltage_swing_level = link->phy_params.v_level; 1005 1012 u32 pre_emphasis_level = link->phy_params.p_level; 1006 1013 1014 + DRM_DEBUG_DP("voltage level: %d emphasis level: %d\n", voltage_swing_level, 1015 + pre_emphasis_level); 1007 1016 ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog, 1008 1017 voltage_swing_level, pre_emphasis_level); 1009 1018 ··· 1073 1078 } 1074 1079 1075 1080 static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, 1076 - struct dp_cr_status *cr, int *training_step) 1081 + int *training_step) 1077 1082 { 1078 1083 int tries, old_v_level, ret = 0; 1079 1084 u8 link_status[DP_LINK_STATUS_SIZE]; ··· 1101 1106 ret = dp_ctrl_read_link_status(ctrl, link_status); 1102 1107 if (ret) 1103 1108 return ret; 1104 - 1105 - cr->lane_0_1 = link_status[0]; 1106 - cr->lane_2_3 = link_status[1]; 1107 1109 1108 1110 if (drm_dp_clock_recovery_ok(link_status, 1109 1111 ctrl->link->link_params.num_lanes)) { ··· 1178 1186 } 1179 1187 1180 1188 static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, 1181 - struct dp_cr_status *cr, int *training_step) 1189 + int *training_step) 1182 1190 { 1183 1191 int tries = 0, ret = 0; 1184 1192 char pattern; ··· 1194 1202 else 1195 1203 pattern = DP_TRAINING_PATTERN_2; 1196 1204 1197 - ret = dp_ctrl_update_vx_px(ctrl); 1198 - if (ret) 1199 - return ret; 1200 - 1201 1205 ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern); 1202 1206 if (ret) 1203 1207 return ret; ··· 1206 1218 ret = dp_ctrl_read_link_status(ctrl, link_status); 1207 1219 if (ret) 1208 1220 return ret; 1209 - cr->lane_0_1 = link_status[0]; 1210 - cr->lane_2_3 = link_status[1]; 1211 1221 1212 1222 if (drm_dp_channel_eq_ok(link_status, 1213 1223 ctrl->link->link_params.num_lanes)) { ··· 1225 1239 static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl); 1226 1240 1227 1241 static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, 1228 - struct dp_cr_status *cr, int *training_step) 1242 + int *training_step) 1229 1243 { 1230 1244 int ret = 0; 1231 1245 u8 encoding = DP_SET_ANSI_8B10B; ··· 1241 1255 drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, 1242 1256 &encoding, 1); 1243 1257 1244 - ret = dp_ctrl_link_train_1(ctrl, cr, training_step); 1258 + ret = dp_ctrl_link_train_1(ctrl, training_step); 1245 1259 if (ret) { 1246 1260 DRM_ERROR("link training #1 failed. ret=%d\n", ret); 1247 1261 goto end; ··· 1250 1264 /* print success info as this is a result of user initiated action */ 1251 1265 DRM_DEBUG_DP("link training #1 successful\n"); 1252 1266 1253 - ret = dp_ctrl_link_train_2(ctrl, cr, training_step); 1267 + ret = dp_ctrl_link_train_2(ctrl, training_step); 1254 1268 if (ret) { 1255 1269 DRM_ERROR("link training #2 failed. ret=%d\n", ret); 1256 1270 goto end; ··· 1266 1280 } 1267 1281 1268 1282 static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, 1269 - struct dp_cr_status *cr, int *training_step) 1283 + int *training_step) 1270 1284 { 1271 1285 int ret = 0; 1272 1286 ··· 1281 1295 * a link training pattern, we have to first do soft reset. 1282 1296 */ 1283 1297 1284 - ret = dp_ctrl_link_train(ctrl, cr, training_step); 1298 + ret = dp_ctrl_link_train(ctrl, training_step); 1285 1299 1286 1300 return ret; 1287 1301 } ··· 1368 1382 if (reset) 1369 1383 dp_catalog_ctrl_reset(ctrl->catalog); 1370 1384 1385 + DRM_DEBUG_DP("flip=%d\n", flip); 1371 1386 dp_catalog_ctrl_phy_reset(ctrl->catalog); 1372 1387 phy_init(phy); 1373 1388 dp_catalog_ctrl_enable_irq(ctrl->catalog, true); ··· 1479 1492 static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) 1480 1493 { 1481 1494 int ret = 0; 1482 - struct dp_cr_status cr; 1483 1495 int training_step = DP_TRAINING_NONE; 1484 1496 1485 1497 dp_ctrl_push_idle(&ctrl->dp_ctrl); 1486 1498 1499 + ctrl->link->phy_params.p_level = 0; 1500 + ctrl->link->phy_params.v_level = 0; 1501 + 1487 1502 ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; 1488 1503 1489 - ret = dp_ctrl_setup_main_link(ctrl, &cr, &training_step); 1504 + ret = dp_ctrl_setup_main_link(ctrl, &training_step); 1490 1505 if (ret) 1491 1506 goto end; 1492 1507 ··· 1515 1526 * running. Add the global reset just before disabling the 1516 1527 * link clocks and core clocks. 1517 1528 */ 1518 - ret = dp_ctrl_off(&ctrl->dp_ctrl); 1529 + ret = dp_ctrl_off_link_stream(&ctrl->dp_ctrl); 1519 1530 if (ret) { 1520 1531 DRM_ERROR("failed to disable DP controller\n"); 1521 1532 return ret; ··· 1619 1630 } 1620 1631 } 1621 1632 1633 + static bool dp_ctrl_clock_recovery_any_ok( 1634 + const u8 link_status[DP_LINK_STATUS_SIZE], 1635 + int lane_count) 1636 + { 1637 + int reduced_cnt; 1638 + 1639 + if (lane_count <= 1) 1640 + return false; 1641 + 1642 + /* 1643 + * only interested in the lane number after reduced 1644 + * lane_count = 4, then only interested in 2 lanes 1645 + * lane_count = 2, then only interested in 1 lane 1646 + */ 1647 + reduced_cnt = lane_count >> 1; 1648 + 1649 + return drm_dp_clock_recovery_ok(link_status, reduced_cnt); 1650 + } 1651 + 1652 + static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl) 1653 + { 1654 + u8 link_status[DP_LINK_STATUS_SIZE]; 1655 + int num_lanes = ctrl->link->link_params.num_lanes; 1656 + 1657 + dp_ctrl_read_link_status(ctrl, link_status); 1658 + 1659 + return drm_dp_channel_eq_ok(link_status, num_lanes); 1660 + } 1661 + 1622 1662 int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) 1623 1663 { 1624 1664 int rc = 0; ··· 1655 1637 u32 rate = 0; 1656 1638 int link_train_max_retries = 5; 1657 1639 u32 const phy_cts_pixel_clk_khz = 148500; 1658 - struct dp_cr_status cr; 1640 + u8 link_status[DP_LINK_STATUS_SIZE]; 1659 1641 unsigned int training_step; 1660 1642 1661 1643 if (!dp_ctrl) ··· 1682 1664 ctrl->link->link_params.rate, 1683 1665 ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate); 1684 1666 1667 + ctrl->link->phy_params.p_level = 0; 1668 + ctrl->link->phy_params.v_level = 0; 1669 + 1685 1670 rc = dp_ctrl_enable_mainlink_clocks(ctrl); 1686 1671 if (rc) 1687 1672 return rc; ··· 1698 1677 } 1699 1678 1700 1679 training_step = DP_TRAINING_NONE; 1701 - rc = dp_ctrl_setup_main_link(ctrl, &cr, &training_step); 1680 + rc = dp_ctrl_setup_main_link(ctrl, &training_step); 1702 1681 if (rc == 0) { 1703 1682 /* training completed successfully */ 1704 1683 break; 1705 1684 } else if (training_step == DP_TRAINING_1) { 1706 1685 /* link train_1 failed */ 1707 - if (!dp_catalog_link_is_connected(ctrl->catalog)) { 1686 + if (!dp_catalog_link_is_connected(ctrl->catalog)) 1708 1687 break; 1709 - } 1688 + 1689 + dp_ctrl_read_link_status(ctrl, link_status); 1710 1690 1711 1691 rc = dp_ctrl_link_rate_down_shift(ctrl); 1712 1692 if (rc < 0) { /* already in RBR = 1.6G */ 1713 - if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) { 1693 + if (dp_ctrl_clock_recovery_any_ok(link_status, 1694 + ctrl->link->link_params.num_lanes)) { 1714 1695 /* 1715 1696 * some lanes are ready, 1716 1697 * reduce lane number ··· 1728 1705 } 1729 1706 } 1730 1707 } else if (training_step == DP_TRAINING_2) { 1731 - /* link train_2 failed, lower lane rate */ 1732 - if (!dp_catalog_link_is_connected(ctrl->catalog)) { 1708 + /* link train_2 failed */ 1709 + if (!dp_catalog_link_is_connected(ctrl->catalog)) 1733 1710 break; 1734 - } 1735 1711 1736 - rc = dp_ctrl_link_lane_down_shift(ctrl); 1712 + dp_ctrl_read_link_status(ctrl, link_status); 1713 + 1714 + if (!drm_dp_clock_recovery_ok(link_status, 1715 + ctrl->link->link_params.num_lanes)) 1716 + rc = dp_ctrl_link_rate_down_shift(ctrl); 1717 + else 1718 + rc = dp_ctrl_link_lane_down_shift(ctrl); 1719 + 1737 1720 if (rc < 0) { 1738 1721 /* end with failure */ 1739 1722 break; /* lane == 1 already */ ··· 1750 1721 if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) 1751 1722 return rc; 1752 1723 1753 - /* stop txing train pattern */ 1754 - dp_ctrl_clear_training_pattern(ctrl); 1724 + if (rc == 0) { /* link train successfully */ 1725 + /* 1726 + * do not stop train pattern here 1727 + * stop link training at on_stream 1728 + * to pass compliance test 1729 + */ 1730 + } else { 1731 + /* 1732 + * link training failed 1733 + * end txing train pattern here 1734 + */ 1735 + dp_ctrl_clear_training_pattern(ctrl); 1755 1736 1756 - /* 1757 - * keep transmitting idle pattern until video ready 1758 - * to avoid main link from loss of sync 1759 - */ 1760 - if (rc == 0) /* link train successfully */ 1761 - dp_ctrl_push_idle(dp_ctrl); 1762 - else { 1763 - /* link training failed */ 1764 1737 dp_ctrl_deinitialize_mainlink(ctrl); 1765 1738 rc = -ECONNRESET; 1766 1739 } ··· 1770 1739 return rc; 1771 1740 } 1772 1741 1742 + static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl) 1743 + { 1744 + int training_step = DP_TRAINING_NONE; 1745 + 1746 + return dp_ctrl_setup_main_link(ctrl, &training_step); 1747 + } 1748 + 1773 1749 int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) 1774 1750 { 1775 - u32 rate = 0; 1776 1751 int ret = 0; 1777 1752 bool mainlink_ready = false; 1778 1753 struct dp_ctrl_private *ctrl; ··· 1788 1751 1789 1752 ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); 1790 1753 1791 - rate = ctrl->panel->link_info.rate; 1792 - 1793 - ctrl->link->link_params.rate = rate; 1794 - ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes; 1795 1754 ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; 1796 1755 1797 1756 DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n", ··· 1801 1768 goto end; 1802 1769 } 1803 1770 } 1771 + 1772 + if (!dp_ctrl_channel_eq_ok(ctrl)) 1773 + dp_ctrl_link_retrain(ctrl); 1774 + 1775 + /* stop txing train pattern to end link training */ 1776 + dp_ctrl_clear_training_pattern(ctrl); 1804 1777 1805 1778 ret = dp_ctrl_enable_stream_clocks(ctrl); 1806 1779 if (ret) {
+45 -32
drivers/gpu/drm/msm/dp/dp_display.c
··· 55 55 EV_HPD_INIT_SETUP, 56 56 EV_HPD_PLUG_INT, 57 57 EV_IRQ_HPD_INT, 58 - EV_HPD_REPLUG_INT, 59 58 EV_HPD_UNPLUG_INT, 60 59 EV_USER_NOTIFICATION, 61 60 EV_CONNECT_PENDING_TIMEOUT, ··· 100 101 struct dp_usbpd_cb usbpd_cb; 101 102 struct dp_display_mode dp_mode; 102 103 struct msm_dp dp_display; 103 - 104 - bool encoder_mode_set; 105 104 106 105 /* wait for audio signaling */ 107 106 struct completion audio_comp; ··· 264 267 265 268 static bool dp_display_is_sink_count_zero(struct dp_display_private *dp) 266 269 { 270 + DRM_DEBUG_DP("present=%#x sink_count=%d\n", dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT], 271 + dp->link->sink_count); 267 272 return dp_display_is_ds_bridge(dp->panel) && 268 273 (dp->link->sink_count == 0); 269 274 } ··· 282 283 } 283 284 284 285 285 - static void dp_display_set_encoder_mode(struct dp_display_private *dp) 286 - { 287 - struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private; 288 - struct msm_kms *kms = priv->kms; 289 - 290 - if (!dp->encoder_mode_set && dp->dp_display.encoder && 291 - kms->funcs->set_encoder_mode) { 292 - kms->funcs->set_encoder_mode(kms, 293 - dp->dp_display.encoder, false); 294 - 295 - dp->encoder_mode_set = true; 296 - } 297 - } 298 - 299 286 static int dp_display_send_hpd_notification(struct dp_display_private *dp, 300 287 bool hpd) 301 288 { ··· 297 312 298 313 dp->dp_display.is_connected = hpd; 299 314 315 + DRM_DEBUG_DP("hpd=%d\n", hpd); 300 316 dp_display_send_hpd_event(&dp->dp_display); 301 317 302 318 return 0; ··· 347 361 { 348 362 bool flip = false; 349 363 364 + DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized); 350 365 if (dp->core_initialized) { 351 366 DRM_DEBUG_DP("DP core already initialized\n"); 352 367 return; ··· 355 368 356 369 if (dp->usbpd->orientation == ORIENTATION_CC2) 357 370 flip = true; 358 - 359 - dp_display_set_encoder_mode(dp); 360 371 361 372 dp_power_init(dp->power, flip); 362 373 dp_ctrl_host_init(dp->ctrl, flip, reset); ··· 450 465 { 451 466 u32 sink_request = dp->link->sink_request; 452 467 468 + DRM_DEBUG_DP("%d\n", sink_request); 453 469 if (dp->hpd_state == ST_DISCONNECTED) { 454 470 if (sink_request & DP_LINK_STATUS_UPDATED) { 471 + DRM_DEBUG_DP("Disconnected sink_request: %d\n", sink_request); 455 472 DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n"); 456 473 return -EINVAL; 457 474 } ··· 485 498 rc = dp_link_process_request(dp->link); 486 499 if (!rc) { 487 500 sink_request = dp->link->sink_request; 501 + DRM_DEBUG_DP("hpd_state=%d sink_request=%d\n", dp->hpd_state, sink_request); 488 502 if (sink_request & DS_PORT_STATUS_CHANGED) 489 503 rc = dp_display_handle_port_ststus_changed(dp); 490 504 else ··· 508 520 mutex_lock(&dp->event_mutex); 509 521 510 522 state = dp->hpd_state; 523 + DRM_DEBUG_DP("hpd_state=%d\n", state); 511 524 if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) { 512 525 mutex_unlock(&dp->event_mutex); 513 526 return 0; ··· 644 655 /* start sentinel checking in case of missing uevent */ 645 656 dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND); 646 657 658 + DRM_DEBUG_DP("hpd_state=%d\n", state); 647 659 /* signal the disconnect event early to ensure proper teardown */ 648 660 dp_display_handle_plugged_change(g_dp_display, false); 649 661 ··· 703 713 if (ret == -ECONNRESET) { /* cable unplugged */ 704 714 dp->core_initialized = false; 705 715 } 716 + DRM_DEBUG_DP("hpd_state=%d\n", state); 706 717 707 718 mutex_unlock(&dp->event_mutex); 708 719 ··· 845 854 846 855 dp_display = g_dp_display; 847 856 857 + DRM_DEBUG_DP("sink_count=%d\n", dp->link->sink_count); 848 858 if (dp_display->power_on) { 849 859 DRM_DEBUG_DP("Link already setup, return\n"); 850 860 return 0; ··· 907 915 908 916 dp_display->power_on = false; 909 917 918 + DRM_DEBUG_DP("sink count: %d\n", dp->link->sink_count); 910 919 return 0; 911 920 } 912 921 ··· 1007 1014 void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp) 1008 1015 { 1009 1016 struct dp_display_private *dp_display; 1010 - struct drm_device *drm; 1011 1017 1012 1018 dp_display = container_of(dp, struct dp_display_private, dp_display); 1013 - drm = dp->drm_dev; 1014 1019 1015 1020 /* 1016 1021 * if we are reading registers we need the link clocks to be on ··· 1109 1118 case EV_IRQ_HPD_INT: 1110 1119 dp_irq_hpd_handle(dp_priv, todo->data); 1111 1120 break; 1112 - case EV_HPD_REPLUG_INT: 1113 - /* do nothing */ 1114 - break; 1115 1121 case EV_USER_NOTIFICATION: 1116 1122 dp_display_send_hpd_notification(dp_priv, 1117 1123 todo->data); ··· 1150 1162 1151 1163 hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog); 1152 1164 1165 + DRM_DEBUG_DP("hpd isr status=%#x\n", hpd_isr_status); 1153 1166 if (hpd_isr_status & 0x0F) { 1154 1167 /* hpd related interrupts */ 1155 - if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK || 1156 - hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) { 1168 + if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK) 1157 1169 dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); 1158 - } 1159 1170 1160 1171 if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) { 1161 1172 /* stop sentinel connect pending checking */ ··· 1162 1175 dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); 1163 1176 } 1164 1177 1165 - if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) 1166 - dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0); 1178 + if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) { 1179 + dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); 1180 + dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3); 1181 + } 1167 1182 1168 1183 if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK) 1169 1184 dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); ··· 1274 1285 struct platform_device *pdev = to_platform_device(dev); 1275 1286 struct msm_dp *dp_display = platform_get_drvdata(pdev); 1276 1287 struct dp_display_private *dp; 1277 - u32 status; 1288 + int sink_count = 0; 1278 1289 1279 1290 dp = container_of(dp_display, struct dp_display_private, dp_display); 1280 1291 1281 1292 mutex_lock(&dp->event_mutex); 1293 + 1294 + DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n", 1295 + dp->core_initialized, dp_display->power_on); 1282 1296 1283 1297 /* start from disconnected state */ 1284 1298 dp->hpd_state = ST_DISCONNECTED; ··· 1291 1299 1292 1300 dp_catalog_ctrl_hpd_config(dp->catalog); 1293 1301 1294 - status = dp_catalog_link_is_connected(dp->catalog); 1302 + /* 1303 + * set sink to normal operation mode -- D0 1304 + * before dpcd read 1305 + */ 1306 + dp_link_psm_config(dp->link, &dp->panel->link_info, false); 1295 1307 1308 + if (dp_catalog_link_is_connected(dp->catalog)) { 1309 + sink_count = drm_dp_read_sink_count(dp->aux); 1310 + if (sink_count < 0) 1311 + sink_count = 0; 1312 + } 1313 + 1314 + dp->link->sink_count = sink_count; 1296 1315 /* 1297 1316 * can not declared display is connected unless 1298 1317 * HDMI cable is plugged in and sink_count of 1299 1318 * dongle become 1 1300 1319 */ 1301 - if (status && dp->link->sink_count) 1320 + if (dp->link->sink_count) 1302 1321 dp->dp_display.is_connected = true; 1303 1322 else 1304 1323 dp->dp_display.is_connected = false; 1324 + 1325 + DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n", 1326 + dp->link->sink_count, dp->dp_display.is_connected, 1327 + dp->core_initialized, dp_display->power_on); 1305 1328 1306 1329 mutex_unlock(&dp->event_mutex); 1307 1330 ··· 1333 1326 1334 1327 mutex_lock(&dp->event_mutex); 1335 1328 1329 + DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n", 1330 + dp->core_initialized, dp_display->power_on); 1331 + 1336 1332 if (dp->core_initialized == true) { 1337 1333 /* mainlink enabled */ 1338 1334 if (dp_power_clk_status(dp->power, DP_CTRL_PM)) ··· 1348 1338 1349 1339 /* host_init will be called at pm_resume */ 1350 1340 dp->core_initialized = false; 1341 + 1342 + DRM_DEBUG_DP("After, core_inited=%d power_on=%d\n", 1343 + dp->core_initialized, dp_display->power_on); 1351 1344 1352 1345 mutex_unlock(&dp->event_mutex); 1353 1346
+18 -32
drivers/gpu/drm/msm/dp/dp_link.c
··· 1027 1027 1028 1028 if (link->request.test_requested == DP_TEST_LINK_EDID_READ) { 1029 1029 dp_link->sink_request |= DP_TEST_LINK_EDID_READ; 1030 - return ret; 1031 - } 1032 - 1033 - ret = dp_link_process_ds_port_status_change(link); 1034 - if (!ret) { 1030 + } else if (!dp_link_process_ds_port_status_change(link)) { 1035 1031 dp_link->sink_request |= DS_PORT_STATUS_CHANGED; 1036 - return ret; 1037 - } 1038 - 1039 - ret = dp_link_process_link_training_request(link); 1040 - if (!ret) { 1032 + } else if (!dp_link_process_link_training_request(link)) { 1041 1033 dp_link->sink_request |= DP_TEST_LINK_TRAINING; 1042 - return ret; 1043 - } 1044 - 1045 - ret = dp_link_process_phy_test_pattern_request(link); 1046 - if (!ret) { 1034 + } else if (!dp_link_process_phy_test_pattern_request(link)) { 1047 1035 dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; 1048 - return ret; 1036 + } else { 1037 + ret = dp_link_process_link_status_update(link); 1038 + if (!ret) { 1039 + dp_link->sink_request |= DP_LINK_STATUS_UPDATED; 1040 + } else { 1041 + if (dp_link_is_video_pattern_requested(link)) { 1042 + ret = 0; 1043 + dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; 1044 + } 1045 + if (dp_link_is_audio_pattern_requested(link)) { 1046 + dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; 1047 + ret = -EINVAL; 1048 + } 1049 + } 1049 1050 } 1050 1051 1051 - ret = dp_link_process_link_status_update(link); 1052 - if (!ret) { 1053 - dp_link->sink_request |= DP_LINK_STATUS_UPDATED; 1054 - return ret; 1055 - } 1056 - 1057 - if (dp_link_is_video_pattern_requested(link)) { 1058 - ret = 0; 1059 - dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; 1060 - } 1061 - 1062 - if (dp_link_is_audio_pattern_requested(link)) { 1063 - dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; 1064 - return -EINVAL; 1065 - } 1066 - 1052 + DRM_DEBUG_DP("sink request=%#x", dp_link->sink_request); 1067 1053 return ret; 1068 1054 } 1069 1055
+7 -2
drivers/gpu/drm/msm/dp/dp_panel.c
··· 271 271 { 272 272 struct edid *last_block; 273 273 u8 *raw_edid; 274 - bool is_edid_corrupt; 274 + bool is_edid_corrupt = false; 275 275 276 276 if (!edid) { 277 277 DRM_ERROR("invalid edid input\n"); ··· 303 303 panel = container_of(dp_panel, struct dp_panel_private, dp_panel); 304 304 305 305 if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) { 306 - u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid); 306 + u8 checksum; 307 + 308 + if (dp_panel->edid) 309 + checksum = dp_panel_get_edid_checksum(dp_panel->edid); 310 + else 311 + checksum = dp_panel->connector->real_edid_checksum; 307 312 308 313 dp_link_send_edid_checksum(panel->link, checksum); 309 314 dp_link_send_test_response(panel->link);
+3
drivers/gpu/drm/msm/dp/dp_power.c
··· 208 208 209 209 int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type) 210 210 { 211 + DRM_DEBUG_DP("core_clk_on=%d link_clk_on=%d stream_clk_on=%d\n", 212 + dp_power->core_clks_on, dp_power->link_clks_on, dp_power->stream_clks_on); 213 + 211 214 if (pm_type == DP_CORE_PM) 212 215 return dp_power->core_clks_on; 213 216
+10 -5
drivers/gpu/drm/msm/dsi/dsi.c
··· 13 13 return msm_dsi->encoder; 14 14 } 15 15 16 + bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi) 17 + { 18 + unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host); 19 + 20 + return !(host_flags & MIPI_DSI_MODE_VIDEO); 21 + } 22 + 16 23 static int dsi_get_phy(struct msm_dsi *msm_dsi) 17 24 { 18 25 struct platform_device *pdev = msm_dsi->pdev; ··· 33 26 } 34 27 35 28 phy_pdev = of_find_device_by_node(phy_node); 36 - if (phy_pdev) 29 + if (phy_pdev) { 37 30 msm_dsi->phy = platform_get_drvdata(phy_pdev); 31 + msm_dsi->phy_dev = &phy_pdev->dev; 32 + } 38 33 39 34 of_node_put(phy_node); 40 35 ··· 44 35 DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); 45 36 return -EPROBE_DEFER; 46 37 } 47 - 48 - msm_dsi->phy_dev = get_device(&phy_pdev->dev); 49 38 50 39 return 0; 51 40 } ··· 250 243 msm_dsi->connector = NULL; 251 244 goto fail; 252 245 } 253 - 254 - msm_dsi_manager_setup_encoder(msm_dsi->id); 255 246 256 247 priv->bridges[priv->num_bridges++] = msm_dsi->bridge; 257 248 priv->connectors[priv->num_connectors++] = msm_dsi->connector;
+10 -8
drivers/gpu/drm/msm/dsi/dsi.h
··· 80 80 struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); 81 81 int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); 82 82 bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); 83 - void msm_dsi_manager_setup_encoder(int id); 84 83 int msm_dsi_manager_register(struct msm_dsi *msm_dsi); 85 84 void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); 86 85 bool msm_dsi_manager_validate_current_config(u8 id); 86 + void msm_dsi_manager_tpg_enable(void); 87 87 88 88 /* msm dsi */ 89 89 static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi) ··· 109 109 int msm_dsi_host_disable(struct mipi_dsi_host *host); 110 110 int msm_dsi_host_power_on(struct mipi_dsi_host *host, 111 111 struct msm_dsi_phy_shared_timings *phy_shared_timings, 112 - bool is_dual_dsi); 112 + bool is_bonded_dsi, struct msm_dsi_phy *phy); 113 113 int msm_dsi_host_power_off(struct mipi_dsi_host *host); 114 114 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, 115 115 const struct drm_display_mode *mode); ··· 123 123 void msm_dsi_host_reset_phy(struct mipi_dsi_host *host); 124 124 void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, 125 125 struct msm_dsi_phy_clk_request *clk_req, 126 - bool is_dual_dsi); 126 + bool is_bonded_dsi); 127 127 void msm_dsi_host_destroy(struct mipi_dsi_host *host); 128 128 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, 129 129 struct drm_device *dev); ··· 145 145 int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova); 146 146 int dsi_clk_init_v2(struct msm_dsi_host *msm_host); 147 147 int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host); 148 - int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi); 149 - int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi); 148 + int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi); 149 + int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi); 150 150 void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host); 151 + void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host); 152 + 151 153 /* dsi phy */ 152 154 struct msm_dsi_phy; 153 155 struct msm_dsi_phy_shared_timings { ··· 166 164 void msm_dsi_phy_driver_register(void); 167 165 void msm_dsi_phy_driver_unregister(void); 168 166 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, 169 - struct msm_dsi_phy_clk_request *clk_req); 167 + struct msm_dsi_phy_clk_request *clk_req, 168 + struct msm_dsi_phy_shared_timings *shared_timings); 170 169 void msm_dsi_phy_disable(struct msm_dsi_phy *phy); 171 - void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, 172 - struct msm_dsi_phy_shared_timings *shared_timing); 173 170 void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, 174 171 enum msm_dsi_phy_usecase uc); 175 172 int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy, ··· 176 175 void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy); 177 176 int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy); 178 177 void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy); 178 + bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable); 179 179 180 180 #endif /* __DSI_CONNECTOR_H__ */ 181 181
+74
drivers/gpu/drm/msm/dsi/dsi.xml.h
··· 105 105 LANE_SWAP_3210 = 7, 106 106 }; 107 107 108 + enum video_config_bpp { 109 + VIDEO_CONFIG_18BPP = 0, 110 + VIDEO_CONFIG_24BPP = 1, 111 + }; 112 + 113 + enum video_pattern_sel { 114 + VID_PRBS = 0, 115 + VID_INCREMENTAL = 1, 116 + VID_FIXED = 2, 117 + VID_MDSS_GENERAL_PATTERN = 3, 118 + }; 119 + 120 + enum cmd_mdp_stream0_pattern_sel { 121 + CMD_MDP_PRBS = 0, 122 + CMD_MDP_INCREMENTAL = 1, 123 + CMD_MDP_FIXED = 2, 124 + CMD_MDP_MDSS_GENERAL_PATTERN = 3, 125 + }; 126 + 127 + enum cmd_dma_pattern_sel { 128 + CMD_DMA_PRBS = 0, 129 + CMD_DMA_INCREMENTAL = 1, 130 + CMD_DMA_FIXED = 2, 131 + CMD_DMA_CUSTOM_PATTERN_DMA_FIFO = 3, 132 + }; 133 + 108 134 #define DSI_IRQ_CMD_DMA_DONE 0x00000001 109 135 #define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002 110 136 #define DSI_IRQ_CMD_MDP_DONE 0x00000100 ··· 544 518 #define DSI_LANE_STATUS_DLN0_DIRECTION 0x00010000 545 519 546 520 #define REG_DSI_LANE_CTRL 0x000000a8 521 + #define DSI_LANE_CTRL_HS_REQ_SEL_PHY 0x01000000 547 522 #define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000 548 523 549 524 #define REG_DSI_LANE_SWAP_CTRL 0x000000ac ··· 590 563 591 564 #define REG_DSI_PHY_RESET 0x00000128 592 565 #define DSI_PHY_RESET_RESET 0x00000001 566 + 567 + #define REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL 0x00000160 568 + 569 + #define REG_DSI_TPG_MAIN_CONTROL 0x00000198 570 + #define DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN 0x00000100 571 + 572 + #define REG_DSI_TPG_VIDEO_CONFIG 0x000001a0 573 + #define DSI_TPG_VIDEO_CONFIG_BPP__MASK 0x00000003 574 + #define DSI_TPG_VIDEO_CONFIG_BPP__SHIFT 0 575 + static inline uint32_t DSI_TPG_VIDEO_CONFIG_BPP(enum video_config_bpp val) 576 + { 577 + return ((val) << DSI_TPG_VIDEO_CONFIG_BPP__SHIFT) & DSI_TPG_VIDEO_CONFIG_BPP__MASK; 578 + } 579 + #define DSI_TPG_VIDEO_CONFIG_RGB 0x00000004 580 + 581 + #define REG_DSI_TEST_PATTERN_GEN_CTRL 0x00000158 582 + #define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK 0x00030000 583 + #define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT 16 584 + static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL(enum cmd_dma_pattern_sel val) 585 + { 586 + return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK; 587 + } 588 + #define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK 0x00000300 589 + #define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT 8 590 + static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(enum cmd_mdp_stream0_pattern_sel val) 591 + { 592 + return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK; 593 + } 594 + #define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK 0x00000030 595 + #define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT 4 596 + static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(enum video_pattern_sel val) 597 + { 598 + return ((val) << DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK; 599 + } 600 + #define DSI_TEST_PATTERN_GEN_CTRL_TPG_DMA_FIFO_MODE 0x00000004 601 + #define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_TPG_EN 0x00000002 602 + #define DSI_TEST_PATTERN_GEN_CTRL_EN 0x00000001 603 + 604 + #define REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 0x00000168 605 + 606 + #define REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER 0x00000180 607 + #define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER 0x00000001 608 + 609 + #define REG_DSI_TPG_MAIN_CONTROL2 0x0000019c 610 + #define DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN 0x00000080 611 + #define DSI_TPG_MAIN_CONTROL2_CMD_MDP1_CHECKERED_RECTANGLE_PATTERN 0x00010000 612 + #define DSI_TPG_MAIN_CONTROL2_CMD_MDP2_CHECKERED_RECTANGLE_PATTERN 0x02000000 593 613 594 614 #define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c 595 615 #define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001
+24 -9
drivers/gpu/drm/msm/dsi/dsi_cfg.c
··· 32 32 static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { 33 33 .io_offset = DSI_6G_REG_SHIFT, 34 34 .reg_cfg = { 35 - .num = 4, 35 + .num = 3, 36 36 .regs = { 37 - {"gdsc", -1, -1}, 38 37 {"vdd", 150000, 100}, /* 3.0 V */ 39 38 {"vdda", 100000, 100}, /* 1.2 V */ 40 39 {"vddio", 100000, 100}, /* 1.8 V */ ··· 52 53 static const struct msm_dsi_config msm8916_dsi_cfg = { 53 54 .io_offset = DSI_6G_REG_SHIFT, 54 55 .reg_cfg = { 55 - .num = 3, 56 + .num = 2, 56 57 .regs = { 57 - {"gdsc", -1, -1}, 58 58 {"vdda", 100000, 100}, /* 1.2 V */ 59 59 {"vddio", 100000, 100}, /* 1.8 V */ 60 60 }, ··· 71 73 static const struct msm_dsi_config msm8976_dsi_cfg = { 72 74 .io_offset = DSI_6G_REG_SHIFT, 73 75 .reg_cfg = { 74 - .num = 3, 76 + .num = 2, 75 77 .regs = { 76 - {"gdsc", -1, -1}, 77 78 {"vdda", 100000, 100}, /* 1.2 V */ 78 79 {"vddio", 100000, 100}, /* 1.8 V */ 79 80 }, ··· 86 89 static const struct msm_dsi_config msm8994_dsi_cfg = { 87 90 .io_offset = DSI_6G_REG_SHIFT, 88 91 .reg_cfg = { 89 - .num = 7, 92 + .num = 6, 90 93 .regs = { 91 - {"gdsc", -1, -1}, 92 94 {"vdda", 100000, 100}, /* 1.25 V */ 93 95 {"vddio", 100000, 100}, /* 1.8 V */ 94 96 {"vcca", 10000, 100}, /* 1.0 V */ ··· 150 154 .reg_cfg = { 151 155 .num = 2, 152 156 .regs = { 153 - {"vdd", 73400, 32 }, /* 0.9 V */ 154 157 {"vdda", 12560, 4 }, /* 1.2 V */ 155 158 }, 156 159 }, ··· 191 196 }, 192 197 .bus_clk_names = dsi_sc7180_bus_clk_names, 193 198 .num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names), 199 + .io_start = { 0xae94000 }, 200 + .num_dsi = 1, 201 + }; 202 + 203 + static const char * const dsi_sc7280_bus_clk_names[] = { 204 + "iface", "bus", 205 + }; 206 + 207 + static const struct msm_dsi_config sc7280_dsi_cfg = { 208 + .io_offset = DSI_6G_REG_SHIFT, 209 + .reg_cfg = { 210 + .num = 1, 211 + .regs = { 212 + {"vdda", 8350, 0 }, /* 1.2 V */ 213 + }, 214 + }, 215 + .bus_clk_names = dsi_sc7280_bus_clk_names, 216 + .num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names), 194 217 .io_start = { 0xae94000 }, 195 218 .num_dsi = 1, 196 219 }; ··· 280 267 &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, 281 268 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1, 282 269 &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops}, 270 + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0, 271 + &sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops}, 283 272 }; 284 273 285 274 const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
+2 -1
drivers/gpu/drm/msm/dsi/dsi_cfg.h
··· 24 24 #define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000 25 25 #define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000 26 26 #define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001 27 + #define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000 27 28 28 29 #define MSM_DSI_V2_VER_MINOR_8064 0x0 29 30 ··· 48 47 void* (*tx_buf_get)(struct msm_dsi_host *msm_host); 49 48 void (*tx_buf_put)(struct msm_dsi_host *msm_host); 50 49 int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova); 51 - int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_dual_dsi); 50 + int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_bonded_dsi); 52 51 }; 53 52 54 53 struct msm_dsi_cfg_handler {
+118 -43
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 27 27 #include "dsi_cfg.h" 28 28 #include "msm_kms.h" 29 29 #include "msm_gem.h" 30 + #include "phy/dsi_phy.h" 30 31 31 32 #define DSI_RESET_TOGGLE_DELAY_MS 20 32 33 ··· 168 167 int dlane_swap; 169 168 int num_data_lanes; 170 169 170 + /* from phy DT */ 171 + bool cphy_mode; 172 + 171 173 u32 dma_cmd_ctrl_restore; 172 174 173 175 bool registered; ··· 207 203 { 208 204 const struct msm_dsi_cfg_handler *cfg_hnd = NULL; 209 205 struct device *dev = &msm_host->pdev->dev; 210 - struct regulator *gdsc_reg; 211 206 struct clk *ahb_clk; 212 207 int ret; 213 208 u32 major = 0, minor = 0; 214 209 215 - gdsc_reg = regulator_get(dev, "gdsc"); 216 - if (IS_ERR(gdsc_reg)) { 217 - pr_err("%s: cannot get gdsc\n", __func__); 218 - goto exit; 219 - } 220 - 221 210 ahb_clk = msm_clk_get(msm_host->pdev, "iface"); 222 211 if (IS_ERR(ahb_clk)) { 223 212 pr_err("%s: cannot get interface clock\n", __func__); 224 - goto put_gdsc; 213 + goto exit; 225 214 } 226 215 227 216 pm_runtime_get_sync(dev); 228 217 229 - ret = regulator_enable(gdsc_reg); 230 - if (ret) { 231 - pr_err("%s: unable to enable gdsc\n", __func__); 232 - goto put_gdsc; 233 - } 234 - 235 218 ret = clk_prepare_enable(ahb_clk); 236 219 if (ret) { 237 220 pr_err("%s: unable to enable ahb_clk\n", __func__); 238 - goto disable_gdsc; 221 + goto runtime_put; 239 222 } 240 223 241 224 ret = dsi_get_version(msm_host->ctrl_base, &major, &minor); ··· 237 246 238 247 disable_clks: 239 248 clk_disable_unprepare(ahb_clk); 240 - disable_gdsc: 241 - regulator_disable(gdsc_reg); 249 + runtime_put: 242 250 pm_runtime_put_sync(dev); 243 - put_gdsc: 244 - regulator_put(gdsc_reg); 245 251 exit: 246 252 return cfg_hnd; 247 253 } ··· 498 510 499 511 int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host) 500 512 { 513 + u32 byte_intf_rate; 501 514 int ret; 502 515 503 516 DBG("Set clk rates: pclk=%d, byteclk=%d", ··· 518 529 } 519 530 520 531 if (msm_host->byte_intf_clk) { 521 - ret = clk_set_rate(msm_host->byte_intf_clk, 522 - msm_host->byte_clk_rate / 2); 532 + /* For CPHY, byte_intf_clk is same as byte_clk */ 533 + if (msm_host->cphy_mode) 534 + byte_intf_rate = msm_host->byte_clk_rate; 535 + else 536 + byte_intf_rate = msm_host->byte_clk_rate / 2; 537 + 538 + ret = clk_set_rate(msm_host->byte_intf_clk, byte_intf_rate); 523 539 if (ret) { 524 540 pr_err("%s: Failed to set rate byte intf clk, %d\n", 525 541 __func__, ret); ··· 673 679 clk_disable_unprepare(msm_host->byte_clk); 674 680 } 675 681 676 - static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi) 682 + static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 677 683 { 678 684 struct drm_display_mode *mode = msm_host->mode; 679 685 u32 pclk_rate; ··· 681 687 pclk_rate = mode->clock * 1000; 682 688 683 689 /* 684 - * For dual DSI mode, the current DRM mode has the complete width of the 690 + * For bonded DSI mode, the current DRM mode has the complete width of the 685 691 * panel. Since, the complete panel is driven by two DSI controllers, 686 692 * the clock rates have to be split between the two dsi controllers. 687 693 * Adjust the byte and pixel clock rates for each dsi host accordingly. 688 694 */ 689 - if (is_dual_dsi) 695 + if (is_bonded_dsi) 690 696 pclk_rate /= 2; 691 697 692 698 return pclk_rate; 693 699 } 694 700 695 - static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi) 701 + static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 696 702 { 697 703 u8 lanes = msm_host->lanes; 698 704 u32 bpp = dsi_get_bpp(msm_host->format); 699 - u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi); 705 + u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi); 700 706 u64 pclk_bpp = (u64)pclk_rate * bpp; 701 707 702 708 if (lanes == 0) { ··· 704 710 lanes = 1; 705 711 } 706 712 707 - do_div(pclk_bpp, (8 * lanes)); 713 + /* CPHY "byte_clk" is in units of 16 bits */ 714 + if (msm_host->cphy_mode) 715 + do_div(pclk_bpp, (16 * lanes)); 716 + else 717 + do_div(pclk_bpp, (8 * lanes)); 708 718 709 719 msm_host->pixel_clk_rate = pclk_rate; 710 720 msm_host->byte_clk_rate = pclk_bpp; ··· 718 720 719 721 } 720 722 721 - int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi) 723 + int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 722 724 { 723 725 if (!msm_host->mode) { 724 726 pr_err("%s: mode not set\n", __func__); 725 727 return -EINVAL; 726 728 } 727 729 728 - dsi_calc_pclk(msm_host, is_dual_dsi); 730 + dsi_calc_pclk(msm_host, is_bonded_dsi); 729 731 msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk); 730 732 return 0; 731 733 } 732 734 733 - int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi) 735 + int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 734 736 { 735 737 u32 bpp = dsi_get_bpp(msm_host->format); 736 738 u64 pclk_bpp; 737 739 unsigned int esc_mhz, esc_div; 738 740 unsigned long byte_mhz; 739 741 740 - dsi_calc_pclk(msm_host, is_dual_dsi); 742 + dsi_calc_pclk(msm_host, is_bonded_dsi); 741 743 742 - pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp; 744 + pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_bonded_dsi) * bpp; 743 745 do_div(pclk_bpp, 8); 744 746 msm_host->src_clk_rate = pclk_bpp; 745 747 ··· 832 834 } 833 835 834 836 static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, 835 - struct msm_dsi_phy_shared_timings *phy_shared_timings) 837 + struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy *phy) 836 838 { 837 839 u32 flags = msm_host->mode_flags; 838 840 enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; ··· 927 929 928 930 if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) { 929 931 lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL); 932 + 933 + if (msm_dsi_phy_set_continuous_clock(phy, enable)) 934 + lane_ctrl &= ~DSI_LANE_CTRL_HS_REQ_SEL_PHY; 935 + 930 936 dsi_write(msm_host, REG_DSI_LANE_CTRL, 931 937 lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST); 932 938 } ··· 938 936 data |= DSI_CTRL_ENABLE; 939 937 940 938 dsi_write(msm_host, REG_DSI_CTRL, data); 939 + 940 + if (msm_host->cphy_mode) 941 + dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0)); 941 942 } 942 943 943 - static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi) 944 + static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 944 945 { 945 946 struct drm_display_mode *mode = msm_host->mode; 946 947 u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */ ··· 961 956 DBG(""); 962 957 963 958 /* 964 - * For dual DSI mode, the current DRM mode has 959 + * For bonded DSI mode, the current DRM mode has 965 960 * the complete width of the panel. Since, the complete 966 961 * panel is driven by two DSI controllers, the horizontal 967 962 * timings have to be split between the two dsi controllers. 968 963 * Adjust the DSI host timing values accordingly. 969 964 */ 970 - if (is_dual_dsi) { 965 + if (is_bonded_dsi) { 971 966 h_total /= 2; 972 967 hs_end /= 2; 973 968 ha_start /= 2; ··· 2231 2226 struct clk *byte_clk_provider, *pixel_clk_provider; 2232 2227 int ret; 2233 2228 2229 + msm_host->cphy_mode = src_phy->cphy_mode; 2230 + 2234 2231 ret = msm_dsi_phy_get_clk_provider(src_phy, 2235 2232 &byte_clk_provider, &pixel_clk_provider); 2236 2233 if (ret) { ··· 2292 2285 2293 2286 void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, 2294 2287 struct msm_dsi_phy_clk_request *clk_req, 2295 - bool is_dual_dsi) 2288 + bool is_bonded_dsi) 2296 2289 { 2297 2290 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2298 2291 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2299 2292 int ret; 2300 2293 2301 - ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi); 2294 + ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_bonded_dsi); 2302 2295 if (ret) { 2303 2296 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); 2304 2297 return; 2305 2298 } 2306 2299 2307 - clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; 2300 + /* CPHY transmits 16 bits over 7 clock cycles 2301 + * "byte_clk" is in units of 16-bits (see dsi_calc_pclk), 2302 + * so multiply by 7 to get the "bitclk rate" 2303 + */ 2304 + if (msm_host->cphy_mode) 2305 + clk_req->bitclk_rate = msm_host->byte_clk_rate * 7; 2306 + else 2307 + clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; 2308 2308 clk_req->escclk_rate = msm_host->esc_clk_rate; 2309 2309 } 2310 2310 ··· 2368 2354 2369 2355 int msm_dsi_host_power_on(struct mipi_dsi_host *host, 2370 2356 struct msm_dsi_phy_shared_timings *phy_shared_timings, 2371 - bool is_dual_dsi) 2357 + bool is_bonded_dsi, struct msm_dsi_phy *phy) 2372 2358 { 2373 2359 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2374 2360 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; ··· 2406 2392 goto fail_disable_clk; 2407 2393 } 2408 2394 2409 - dsi_timing_setup(msm_host, is_dual_dsi); 2395 + dsi_timing_setup(msm_host, is_bonded_dsi); 2410 2396 dsi_sw_reset(msm_host); 2411 - dsi_ctrl_config(msm_host, true, phy_shared_timings); 2397 + dsi_ctrl_config(msm_host, true, phy_shared_timings, phy); 2412 2398 2413 2399 if (msm_host->disp_en_gpio) 2414 2400 gpiod_set_value(msm_host->disp_en_gpio, 1); ··· 2439 2425 goto unlock_ret; 2440 2426 } 2441 2427 2442 - dsi_ctrl_config(msm_host, false, NULL); 2428 + dsi_ctrl_config(msm_host, false, NULL, NULL); 2443 2429 2444 2430 if (msm_host->disp_en_gpio) 2445 2431 gpiod_set_value(msm_host->disp_en_gpio, 0); ··· 2508 2494 msm_host->ctrl_base, "dsi%d_ctrl", msm_host->id); 2509 2495 2510 2496 pm_runtime_put_sync(&msm_host->pdev->dev); 2497 + } 2498 + 2499 + static void msm_dsi_host_video_test_pattern_setup(struct msm_dsi_host *msm_host) 2500 + { 2501 + u32 reg; 2502 + 2503 + reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); 2504 + 2505 + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, 0xff); 2506 + /* draw checkered rectangle pattern */ 2507 + dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL, 2508 + DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN); 2509 + /* use 24-bit RGB test pttern */ 2510 + dsi_write(msm_host, REG_DSI_TPG_VIDEO_CONFIG, 2511 + DSI_TPG_VIDEO_CONFIG_BPP(VIDEO_CONFIG_24BPP) | 2512 + DSI_TPG_VIDEO_CONFIG_RGB); 2513 + 2514 + reg |= DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(VID_MDSS_GENERAL_PATTERN); 2515 + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg); 2516 + 2517 + DBG("Video test pattern setup done\n"); 2518 + } 2519 + 2520 + static void msm_dsi_host_cmd_test_pattern_setup(struct msm_dsi_host *msm_host) 2521 + { 2522 + u32 reg; 2523 + 2524 + reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); 2525 + 2526 + /* initial value for test pattern */ 2527 + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0, 0xff); 2528 + 2529 + reg |= DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(CMD_MDP_MDSS_GENERAL_PATTERN); 2530 + 2531 + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg); 2532 + /* draw checkered rectangle pattern */ 2533 + dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL2, 2534 + DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN); 2535 + 2536 + DBG("Cmd test pattern setup done\n"); 2537 + } 2538 + 2539 + void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host) 2540 + { 2541 + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2542 + bool is_video_mode = !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO); 2543 + u32 reg; 2544 + 2545 + if (is_video_mode) 2546 + msm_dsi_host_video_test_pattern_setup(msm_host); 2547 + else 2548 + msm_dsi_host_cmd_test_pattern_setup(msm_host); 2549 + 2550 + reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); 2551 + /* enable the test pattern generator */ 2552 + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, (reg | DSI_TEST_PATTERN_GEN_CTRL_EN)); 2553 + 2554 + /* for command mode need to trigger one frame from tpg */ 2555 + if (!is_video_mode) 2556 + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, 2557 + DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER); 2511 2558 }
+74 -72
drivers/gpu/drm/msm/dsi/dsi_manager.c
··· 21 21 struct msm_dsi_manager { 22 22 struct msm_dsi *dsi[DSI_MAX]; 23 23 24 - bool is_dual_dsi; 24 + bool is_bonded_dsi; 25 25 bool is_sync_needed; 26 26 int master_dsi_link_id; 27 27 }; 28 28 29 29 static struct msm_dsi_manager msm_dsim_glb; 30 30 31 - #define IS_DUAL_DSI() (msm_dsim_glb.is_dual_dsi) 31 + #define IS_BONDED_DSI() (msm_dsim_glb.is_bonded_dsi) 32 32 #define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed) 33 33 #define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id) 34 34 ··· 42 42 return msm_dsim_glb.dsi[(id + 1) % DSI_MAX]; 43 43 } 44 44 45 - static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id) 45 + static int dsi_mgr_parse_of(struct device_node *np, int id) 46 46 { 47 47 struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; 48 48 49 - /* We assume 2 dsi nodes have the same information of dual-dsi and 50 - * sync-mode, and only one node specifies master in case of dual mode. 49 + /* We assume 2 dsi nodes have the same information of bonded dsi and 50 + * sync-mode, and only one node specifies master in case of bonded mode. 51 51 */ 52 - if (!msm_dsim->is_dual_dsi) 53 - msm_dsim->is_dual_dsi = of_property_read_bool( 54 - np, "qcom,dual-dsi-mode"); 52 + if (!msm_dsim->is_bonded_dsi) 53 + msm_dsim->is_bonded_dsi = of_property_read_bool(np, "qcom,dual-dsi-mode"); 55 54 56 - if (msm_dsim->is_dual_dsi) { 55 + if (msm_dsim->is_bonded_dsi) { 57 56 if (of_property_read_bool(np, "qcom,master-dsi")) 58 57 msm_dsim->master_dsi_link_id = id; 59 58 if (!msm_dsim->is_sync_needed) ··· 71 72 struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); 72 73 int ret; 73 74 74 - if (!IS_DUAL_DSI()) { 75 + if (!IS_BONDED_DSI()) { 75 76 ret = msm_dsi_host_register(msm_dsi->host, true); 76 77 if (ret) 77 78 return ret; ··· 99 100 if (ret) 100 101 return ret; 101 102 102 - /* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */ 103 + /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */ 103 104 msm_dsi_phy_set_usecase(clk_master_dsi->phy, 104 105 MSM_DSI_PHY_MASTER); 105 106 msm_dsi_phy_set_usecase(clk_slave_dsi->phy, ··· 118 119 { 119 120 struct msm_dsi_phy_clk_request clk_req; 120 121 int ret; 121 - bool is_dual_dsi = IS_DUAL_DSI(); 122 + bool is_bonded_dsi = IS_BONDED_DSI(); 122 123 123 - msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi); 124 + msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_bonded_dsi); 124 125 125 - ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req); 126 - msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings); 126 + ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings); 127 127 128 128 return ret; 129 129 } ··· 136 138 struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); 137 139 int ret; 138 140 139 - /* In case of dual DSI, some registers in PHY1 have been programmed 141 + /* In case of bonded DSI, some registers in PHY1 have been programmed 140 142 * during PLL0 clock's set_rate. The PHY1 reset called by host1 here 141 143 * will silently reset those PHY1 registers. Therefore we need to reset 142 144 * and enable both PHYs before any PLL clock operation. 143 145 */ 144 - if (IS_DUAL_DSI() && mdsi && sdsi) { 146 + if (IS_BONDED_DSI() && mdsi && sdsi) { 145 147 if (!mdsi->phy_enabled && !sdsi->phy_enabled) { 146 148 msm_dsi_host_reset_phy(mdsi->host); 147 149 msm_dsi_host_reset_phy(sdsi->host); ··· 176 178 struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); 177 179 178 180 /* disable DSI phy 179 - * In dual-dsi configuration, the phy should be disabled for the 181 + * In bonded dsi configuration, the phy should be disabled for the 180 182 * first controller only when the second controller is disabled. 181 183 */ 182 184 msm_dsi->phy_enabled = false; 183 - if (IS_DUAL_DSI() && mdsi && sdsi) { 185 + if (IS_BONDED_DSI() && mdsi && sdsi) { 184 186 if (!mdsi->phy_enabled && !sdsi->phy_enabled) { 185 187 msm_dsi_phy_disable(sdsi->phy); 186 188 msm_dsi_phy_disable(mdsi->phy); ··· 215 217 return dsi_bridge->id; 216 218 } 217 219 218 - static bool dsi_mgr_is_cmd_mode(struct msm_dsi *msm_dsi) 219 - { 220 - unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host); 221 - return !(host_flags & MIPI_DSI_MODE_VIDEO); 222 - } 223 - 224 - void msm_dsi_manager_setup_encoder(int id) 225 - { 226 - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 227 - struct msm_drm_private *priv = msm_dsi->dev->dev_private; 228 - struct msm_kms *kms = priv->kms; 229 - struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi); 230 - 231 - if (encoder && kms->funcs->set_encoder_mode) 232 - kms->funcs->set_encoder_mode(kms, encoder, 233 - dsi_mgr_is_cmd_mode(msm_dsi)); 234 - } 235 - 236 220 static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id) 237 221 { 238 222 struct msm_drm_private *priv = conn->dev->dev_private; ··· 224 244 struct msm_dsi *master_dsi, *slave_dsi; 225 245 struct drm_panel *panel; 226 246 227 - if (IS_DUAL_DSI() && !IS_MASTER_DSI_LINK(id)) { 247 + if (IS_BONDED_DSI() && !IS_MASTER_DSI_LINK(id)) { 228 248 master_dsi = other_dsi; 229 249 slave_dsi = msm_dsi; 230 250 } else { ··· 233 253 } 234 254 235 255 /* 236 - * There is only 1 panel in the global panel list for dual DSI mode. 256 + * There is only 1 panel in the global panel list for bonded DSI mode. 237 257 * Therefore slave dsi should get the drm_panel instance from master 238 258 * dsi. 239 259 */ ··· 244 264 return PTR_ERR(panel); 245 265 } 246 266 247 - if (!panel || !IS_DUAL_DSI()) 267 + if (!panel || !IS_BONDED_DSI()) 248 268 goto out; 249 269 250 270 drm_object_attach_property(&conn->base, 251 271 conn->dev->mode_config.tile_property, 0); 252 272 253 273 /* 254 - * Set split display info to kms once dual DSI panel is connected to 274 + * Set split display info to kms once bonded DSI panel is connected to 255 275 * both hosts. 256 276 */ 257 277 if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) { 258 278 kms->funcs->set_split_display(kms, master_dsi->encoder, 259 279 slave_dsi->encoder, 260 - dsi_mgr_is_cmd_mode(msm_dsi)); 280 + msm_dsi_is_cmd_mode(msm_dsi)); 261 281 } 262 282 263 283 out: ··· 297 317 return 0; 298 318 299 319 /* 300 - * In dual DSI mode, we have one connector that can be 320 + * In bonded DSI mode, we have one connector that can be 301 321 * attached to the drm_panel. 302 322 */ 303 323 num = drm_panel_get_modes(panel, connector); ··· 346 366 struct mipi_dsi_host *host = msm_dsi->host; 347 367 struct drm_panel *panel = msm_dsi->panel; 348 368 struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX]; 349 - bool is_dual_dsi = IS_DUAL_DSI(); 369 + bool is_bonded_dsi = IS_BONDED_DSI(); 350 370 int ret; 351 371 352 372 DBG("id=%d", id); 353 373 if (!msm_dsi_device_connected(msm_dsi)) 354 374 return; 355 375 356 - /* Do nothing with the host if it is slave-DSI in case of dual DSI */ 357 - if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) 376 + /* Do nothing with the host if it is slave-DSI in case of bonded DSI */ 377 + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) 358 378 return; 359 379 360 380 ret = dsi_mgr_phy_enable(id, phy_shared_timings); 361 381 if (ret) 362 382 goto phy_en_fail; 363 383 364 - ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_dual_dsi); 384 + ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_bonded_dsi, msm_dsi->phy); 365 385 if (ret) { 366 386 pr_err("%s: power on host %d failed, %d\n", __func__, id, ret); 367 387 goto host_on_fail; 368 388 } 369 389 370 - if (is_dual_dsi && msm_dsi1) { 390 + if (is_bonded_dsi && msm_dsi1) { 371 391 ret = msm_dsi_host_power_on(msm_dsi1->host, 372 - &phy_shared_timings[DSI_1], is_dual_dsi); 392 + &phy_shared_timings[DSI_1], is_bonded_dsi, msm_dsi1->phy); 373 393 if (ret) { 374 394 pr_err("%s: power on host1 failed, %d\n", 375 395 __func__, ret); ··· 395 415 goto host_en_fail; 396 416 } 397 417 398 - if (is_dual_dsi && msm_dsi1) { 418 + if (is_bonded_dsi && msm_dsi1) { 399 419 ret = msm_dsi_host_enable(msm_dsi1->host); 400 420 if (ret) { 401 421 pr_err("%s: enable host1 failed, %d\n", __func__, ret); ··· 411 431 if (panel) 412 432 drm_panel_unprepare(panel); 413 433 panel_prep_fail: 414 - if (is_dual_dsi && msm_dsi1) 434 + if (is_bonded_dsi && msm_dsi1) 415 435 msm_dsi_host_power_off(msm_dsi1->host); 416 436 host1_on_fail: 417 437 msm_dsi_host_power_off(host); ··· 421 441 return; 422 442 } 423 443 444 + void msm_dsi_manager_tpg_enable(void) 445 + { 446 + struct msm_dsi *m_dsi = dsi_mgr_get_dsi(DSI_0); 447 + struct msm_dsi *s_dsi = dsi_mgr_get_dsi(DSI_1); 448 + 449 + /* if dual dsi, trigger tpg on master first then slave */ 450 + if (m_dsi) { 451 + msm_dsi_host_test_pattern_en(m_dsi->host); 452 + if (IS_BONDED_DSI() && s_dsi) 453 + msm_dsi_host_test_pattern_en(s_dsi->host); 454 + } 455 + } 456 + 424 457 static void dsi_mgr_bridge_enable(struct drm_bridge *bridge) 425 458 { 426 459 int id = dsi_mgr_bridge_get_id(bridge); 427 460 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 428 461 struct drm_panel *panel = msm_dsi->panel; 429 - bool is_dual_dsi = IS_DUAL_DSI(); 462 + bool is_bonded_dsi = IS_BONDED_DSI(); 430 463 int ret; 431 464 432 465 DBG("id=%d", id); 433 466 if (!msm_dsi_device_connected(msm_dsi)) 434 467 return; 435 468 436 - /* Do nothing with the host if it is slave-DSI in case of dual DSI */ 437 - if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) 469 + /* Do nothing with the host if it is slave-DSI in case of bonded DSI */ 470 + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) 438 471 return; 439 472 440 473 if (panel) { ··· 464 471 int id = dsi_mgr_bridge_get_id(bridge); 465 472 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 466 473 struct drm_panel *panel = msm_dsi->panel; 467 - bool is_dual_dsi = IS_DUAL_DSI(); 474 + bool is_bonded_dsi = IS_BONDED_DSI(); 468 475 int ret; 469 476 470 477 DBG("id=%d", id); 471 478 if (!msm_dsi_device_connected(msm_dsi)) 472 479 return; 473 480 474 - /* Do nothing with the host if it is slave-DSI in case of dual DSI */ 475 - if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) 481 + /* Do nothing with the host if it is slave-DSI in case of bonded DSI */ 482 + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) 476 483 return; 477 484 478 485 if (panel) { ··· 490 497 struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); 491 498 struct mipi_dsi_host *host = msm_dsi->host; 492 499 struct drm_panel *panel = msm_dsi->panel; 493 - bool is_dual_dsi = IS_DUAL_DSI(); 500 + bool is_bonded_dsi = IS_BONDED_DSI(); 494 501 int ret; 495 502 496 503 DBG("id=%d", id); ··· 499 506 return; 500 507 501 508 /* 502 - * Do nothing with the host if it is slave-DSI in case of dual DSI. 509 + * Do nothing with the host if it is slave-DSI in case of bonded DSI. 503 510 * It is safe to call dsi_mgr_phy_disable() here because a single PHY 504 511 * won't be diabled until both PHYs request disable. 505 512 */ 506 - if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) 513 + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) 507 514 goto disable_phy; 508 515 509 516 ret = msm_dsi_host_disable(host); 510 517 if (ret) 511 518 pr_err("%s: host %d disable failed, %d\n", __func__, id, ret); 512 519 513 - if (is_dual_dsi && msm_dsi1) { 520 + if (is_bonded_dsi && msm_dsi1) { 514 521 ret = msm_dsi_host_disable(msm_dsi1->host); 515 522 if (ret) 516 523 pr_err("%s: host1 disable failed, %d\n", __func__, ret); ··· 530 537 if (ret) 531 538 pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); 532 539 533 - if (is_dual_dsi && msm_dsi1) { 540 + if (is_bonded_dsi && msm_dsi1) { 534 541 ret = msm_dsi_host_power_off(msm_dsi1->host); 535 542 if (ret) 536 543 pr_err("%s: host1 power off failed, %d\n", ··· 549 556 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 550 557 struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); 551 558 struct mipi_dsi_host *host = msm_dsi->host; 552 - bool is_dual_dsi = IS_DUAL_DSI(); 559 + bool is_bonded_dsi = IS_BONDED_DSI(); 553 560 554 561 DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); 555 562 556 - if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) 563 + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) 557 564 return; 558 565 559 566 msm_dsi_host_set_display_mode(host, adjusted_mode); 560 - if (is_dual_dsi && other_dsi) 567 + if (is_bonded_dsi && other_dsi) 561 568 msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode); 562 569 } 563 570 ··· 633 640 634 641 bool msm_dsi_manager_validate_current_config(u8 id) 635 642 { 636 - bool is_dual_dsi = IS_DUAL_DSI(); 643 + bool is_bonded_dsi = IS_BONDED_DSI(); 637 644 638 645 /* 639 - * For dual DSI, we only have one drm panel. For this 646 + * For bonded DSI, we only have one drm panel. For this 640 647 * use case, we register only one bridge/connector. 641 648 * Skip bridge/connector initialisation if it is 642 - * slave-DSI for dual DSI configuration. 649 + * slave-DSI for bonded DSI configuration. 643 650 */ 644 - if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) { 651 + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) { 645 652 DBG("Skip bridge registration for slave DSI->id: %d\n", id); 646 653 return false; 647 654 } ··· 733 740 if (!msg->tx_buf || !msg->tx_len) 734 741 return 0; 735 742 736 - /* In dual master case, panel requires the same commands sent to 743 + /* In bonded master case, panel requires the same commands sent to 737 744 * both DSI links. Host issues the command trigger to both links 738 745 * when DSI_1 calls the cmd transfer function, no matter it happens 739 746 * before or after DSI_0 cmd transfer. ··· 802 809 803 810 msm_dsim->dsi[id] = msm_dsi; 804 811 805 - ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id); 812 + ret = dsi_mgr_parse_of(msm_dsi->pdev->dev.of_node, id); 806 813 if (ret) { 807 - pr_err("%s: failed to parse dual DSI info\n", __func__); 814 + pr_err("%s: failed to parse OF DSI info\n", __func__); 808 815 goto fail; 809 816 } 810 817 ··· 833 840 msm_dsim->dsi[msm_dsi->id] = NULL; 834 841 } 835 842 843 + bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi) 844 + { 845 + return IS_BONDED_DSI(); 846 + } 847 + 848 + bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi) 849 + { 850 + return IS_MASTER_DSI_LINK(msm_dsi->id); 851 + }
+69 -14
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
··· 5 5 6 6 #include <linux/clk-provider.h> 7 7 #include <linux/platform_device.h> 8 + #include <dt-bindings/phy/phy.h> 8 9 9 10 #include "dsi_phy.h" 10 11 ··· 462 461 return 0; 463 462 } 464 463 464 + int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, 465 + struct msm_dsi_phy_clk_request *clk_req) 466 + { 467 + const unsigned long bit_rate = clk_req->bitclk_rate; 468 + const unsigned long esc_rate = clk_req->escclk_rate; 469 + s32 ui, ui_x7; 470 + s32 tmax, tmin; 471 + s32 coeff = 1000; /* Precision, should avoid overflow */ 472 + s32 temp; 473 + 474 + if (!bit_rate || !esc_rate) 475 + return -EINVAL; 476 + 477 + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); 478 + ui_x7 = ui * 7; 479 + 480 + temp = S_DIV_ROUND_UP(38 * coeff, ui_x7); 481 + tmin = max_t(s32, temp, 0); 482 + temp = (95 * coeff) / ui_x7; 483 + tmax = max_t(s32, temp, 0); 484 + timing->clk_prepare = linear_inter(tmax, tmin, 50, 0, false); 485 + 486 + tmin = DIV_ROUND_UP(50 * coeff, ui_x7); 487 + tmax = 255; 488 + timing->hs_rqst = linear_inter(tmax, tmin, 1, 0, false); 489 + 490 + tmin = DIV_ROUND_UP(100 * coeff, ui_x7) - 1; 491 + tmax = 255; 492 + timing->hs_exit = linear_inter(tmax, tmin, 10, 0, false); 493 + 494 + tmin = 1; 495 + tmax = 32; 496 + timing->shared_timings.clk_post = linear_inter(tmax, tmin, 80, 0, false); 497 + 498 + tmin = min_t(s32, 64, S_DIV_ROUND_UP(262 * coeff, ui_x7) - 1); 499 + tmax = 64; 500 + timing->shared_timings.clk_pre = linear_inter(tmax, tmin, 20, 0, false); 501 + 502 + DBG("%d, %d, %d, %d, %d", 503 + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, 504 + timing->clk_prepare, timing->hs_exit, timing->hs_rqst); 505 + 506 + return 0; 507 + } 508 + 465 509 static int dsi_phy_regulator_init(struct msm_dsi_phy *phy) 466 510 { 467 511 struct regulator_bulk_data *s = phy->supplies; ··· 639 593 .data = &dsi_phy_7nm_cfgs }, 640 594 { .compatible = "qcom,dsi-phy-7nm-8150", 641 595 .data = &dsi_phy_7nm_8150_cfgs }, 596 + { .compatible = "qcom,sc7280-dsi-phy-7nm", 597 + .data = &dsi_phy_7nm_7280_cfgs }, 642 598 #endif 643 599 {} 644 600 }; ··· 673 625 { 674 626 struct msm_dsi_phy *phy; 675 627 struct device *dev = &pdev->dev; 676 - const struct of_device_id *match; 628 + u32 phy_type; 677 629 int ret; 678 630 679 631 phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); 680 632 if (!phy) 681 633 return -ENOMEM; 682 - 683 - match = of_match_node(dsi_phy_dt_match, dev->of_node); 684 - if (!match) 685 - return -ENODEV; 686 634 687 635 phy->provided_clocks = devm_kzalloc(dev, 688 636 struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS), ··· 688 644 689 645 phy->provided_clocks->num = NUM_PROVIDED_CLKS; 690 646 691 - phy->cfg = match->data; 647 + phy->cfg = of_device_get_match_data(&pdev->dev); 648 + if (!phy->cfg) 649 + return -ENODEV; 650 + 692 651 phy->pdev = pdev; 693 652 694 653 phy->id = dsi_phy_get_id(phy); ··· 704 657 705 658 phy->regulator_ldo_mode = of_property_read_bool(dev->of_node, 706 659 "qcom,dsi-phy-regulator-ldo-mode"); 660 + if (!of_property_read_u32(dev->of_node, "phy-type", &phy_type)) 661 + phy->cphy_mode = (phy_type == PHY_TYPE_CPHY); 707 662 708 663 phy->base = msm_ioremap_size(pdev, "dsi_phy", "DSI_PHY", &phy->base_size); 709 664 if (IS_ERR(phy->base)) { ··· 803 754 } 804 755 805 756 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, 806 - struct msm_dsi_phy_clk_request *clk_req) 757 + struct msm_dsi_phy_clk_request *clk_req, 758 + struct msm_dsi_phy_shared_timings *shared_timings) 807 759 { 808 760 struct device *dev = &phy->pdev->dev; 809 761 int ret; ··· 831 781 DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret); 832 782 goto phy_en_fail; 833 783 } 784 + 785 + memcpy(shared_timings, &phy->timing.shared_timings, 786 + sizeof(*shared_timings)); 834 787 835 788 /* 836 789 * Resetting DSI PHY silently changes its PLL registers to reset status, ··· 874 821 dsi_phy_disable_resource(phy); 875 822 } 876 823 877 - void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, 878 - struct msm_dsi_phy_shared_timings *shared_timings) 879 - { 880 - memcpy(shared_timings, &phy->timing.shared_timings, 881 - sizeof(*shared_timings)); 882 - } 883 - 884 824 void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, 885 825 enum msm_dsi_phy_usecase uc) 886 826 { 887 827 if (phy) 888 828 phy->usecase = uc; 829 + } 830 + 831 + /* Returns true if we have to clear DSI_LANE_CTRL.HS_REQ_SEL_PHY */ 832 + bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable) 833 + { 834 + if (!phy || !phy->cfg->ops.set_continuous_clock) 835 + return false; 836 + 837 + return phy->cfg->ops.set_continuous_clock(phy, enable); 889 838 } 890 839 891 840 int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
+5
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
··· 24 24 void (*disable)(struct msm_dsi_phy *phy); 25 25 void (*save_pll_state)(struct msm_dsi_phy *phy); 26 26 int (*restore_pll_state)(struct msm_dsi_phy *phy); 27 + bool (*set_continuous_clock)(struct msm_dsi_phy *phy, bool enable); 27 28 }; 28 29 29 30 struct msm_dsi_phy_cfg { ··· 52 51 extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs; 53 52 extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs; 54 53 extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs; 54 + extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs; 55 55 56 56 struct msm_dsi_dphy_timing { 57 57 u32 clk_zero; ··· 101 99 102 100 enum msm_dsi_phy_usecase usecase; 103 101 bool regulator_ldo_mode; 102 + bool cphy_mode; 104 103 105 104 struct clk_hw *vco_hw; 106 105 bool pll_on; ··· 121 118 int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, 122 119 struct msm_dsi_phy_clk_request *clk_req); 123 120 int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, 121 + struct msm_dsi_phy_clk_request *clk_req); 122 + int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, 124 123 struct msm_dsi_phy_clk_request *clk_req); 125 124 126 125 #endif /* __DSI_PHY_H__ */
+1 -1
drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
··· 84 84 #define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw) 85 85 86 86 /* 87 - * Global list of private DSI PLL struct pointers. We need this for Dual DSI 87 + * Global list of private DSI PLL struct pointers. We need this for bonded DSI 88 88 * mode, where the master PLL's clk_ops needs access the slave's private data 89 89 */ 90 90 static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
+4 -4
drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
··· 86 86 /* 87 87 * Private struct for N1/N2 post-divider clocks. These clocks are similar to 88 88 * the generic clk_divider class of clocks. The only difference is that it 89 - * also sets the slave DSI PLL's post-dividers if in Dual DSI mode 89 + * also sets the slave DSI PLL's post-dividers if in bonded DSI mode 90 90 */ 91 91 struct dsi_pll_14nm_postdiv { 92 92 struct clk_hw hw; ··· 102 102 #define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw) 103 103 104 104 /* 105 - * Global list of private DSI PLL struct pointers. We need this for Dual DSI 105 + * Global list of private DSI PLL struct pointers. We need this for bonded DSI 106 106 * mode, where the master PLL's clk_ops needs access the slave's private data 107 107 */ 108 108 static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX]; ··· 658 658 val |= value << shift; 659 659 dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val); 660 660 661 - /* If we're master in dual DSI mode, then the slave PLL's post-dividers 661 + /* If we're master in bonded DSI mode, then the slave PLL's post-dividers 662 662 * follow the master's post dividers 663 663 */ 664 664 if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) { ··· 1050 1050 .reg_cfg = { 1051 1051 .num = 1, 1052 1052 .regs = { 1053 - {"vcca", 17000, 32}, 1053 + {"vcca", 73400, 32}, 1054 1054 }, 1055 1055 }, 1056 1056 .ops = {
+145 -43
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
··· 83 83 #define to_pll_7nm(x) container_of(x, struct dsi_pll_7nm, clk_hw) 84 84 85 85 /* 86 - * Global list of private DSI PLL struct pointers. We need this for Dual DSI 86 + * Global list of private DSI PLL struct pointers. We need this for bonded DSI 87 87 * mode, where the master PLL's clk_ops needs access the slave's private data 88 88 */ 89 89 static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX]; ··· 256 256 (config->frac_div_start & 0x30000) >> 16); 257 257 dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); 258 258 dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); 259 - dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */ 259 + dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, pll->phy->cphy_mode ? 0x00 : 0x10); 260 260 dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, config->pll_clock_inverters); 261 261 } 262 262 ··· 642 642 643 643 /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ 644 644 hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 645 - CLK_SET_RATE_PARENT, 1, 8); 645 + CLK_SET_RATE_PARENT, 1, 646 + pll_7nm->phy->cphy_mode ? 7 : 8); 646 647 if (IS_ERR(hw)) { 647 648 ret = PTR_ERR(hw); 648 649 goto fail; ··· 664 663 snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id); 665 664 snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id); 666 665 667 - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 668 - 0, 1, 4); 666 + if (pll_7nm->phy->cphy_mode) 667 + hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 2, 7); 668 + else 669 + hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 4); 669 670 if (IS_ERR(hw)) { 670 671 ret = PTR_ERR(hw); 671 672 goto fail; 672 673 } 673 674 674 - snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id); 675 - snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id); 676 - snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id); 677 - snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id); 678 - snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id); 675 + /* in CPHY mode, pclk_mux will always have post_out_div as parent 676 + * don't register a pclk_mux clock and just use post_out_div instead 677 + */ 678 + if (pll_7nm->phy->cphy_mode) { 679 + u32 data; 679 680 680 - hw = devm_clk_hw_register_mux(dev, clk_name, 681 - ((const char *[]){ 682 - parent, parent2, parent3, parent4 683 - }), 4, 0, pll_7nm->phy->base + 684 - REG_DSI_7nm_PHY_CMN_CLK_CFG1, 685 - 0, 2, 0, NULL); 686 - if (IS_ERR(hw)) { 687 - ret = PTR_ERR(hw); 688 - goto fail; 681 + data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); 682 + dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3); 683 + 684 + snprintf(parent, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id); 685 + } else { 686 + snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id); 687 + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id); 688 + snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id); 689 + snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id); 690 + snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id); 691 + 692 + hw = devm_clk_hw_register_mux(dev, clk_name, 693 + ((const char *[]){ 694 + parent, parent2, parent3, parent4 695 + }), 4, 0, pll_7nm->phy->base + 696 + REG_DSI_7nm_PHY_CMN_CLK_CFG1, 697 + 0, 2, 0, NULL); 698 + if (IS_ERR(hw)) { 699 + ret = PTR_ERR(hw); 700 + goto fail; 701 + } 702 + 703 + snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id); 689 704 } 690 705 691 706 snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id); 692 - snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id); 693 707 694 708 /* PIX CLK DIV : DIV_CTRL_7_4*/ 695 709 hw = devm_clk_hw_register_divider(dev, clk_name, parent, ··· 829 813 struct msm_dsi_dphy_timing *timing = &phy->timing; 830 814 void __iomem *base = phy->base; 831 815 bool less_than_1500_mhz; 832 - u32 vreg_ctrl_0, glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; 816 + u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0; 817 + u32 glbl_pemph_ctrl_0; 818 + u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; 833 819 u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl; 834 820 u32 data; 835 821 836 822 DBG(""); 837 823 838 - if (msm_dsi_dphy_timing_calc_v4(timing, clk_req)) { 824 + if (phy->cphy_mode) 825 + ret = msm_dsi_cphy_timing_calc_v4(timing, clk_req); 826 + else 827 + ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req); 828 + if (ret) { 839 829 DRM_DEV_ERROR(&phy->pdev->dev, 840 - "%s: D-PHY timing calculation failed\n", __func__); 830 + "%s: PHY timing calculation failed\n", __func__); 841 831 return -EINVAL; 842 832 } 843 833 ··· 864 842 /* Alter PHY configurations if data rate less than 1.5GHZ*/ 865 843 less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000); 866 844 845 + /* For C-PHY, no low power settings for lower clk rate */ 846 + if (phy->cphy_mode) 847 + less_than_1500_mhz = false; 848 + 867 849 if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) { 868 850 vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52; 869 851 glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00; ··· 880 854 glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88; 881 855 glbl_rescode_top_ctrl = 0x03; 882 856 glbl_rescode_bot_ctrl = 0x3c; 857 + } 858 + 859 + if (phy->cphy_mode) { 860 + vreg_ctrl_0 = 0x51; 861 + vreg_ctrl_1 = 0x55; 862 + glbl_pemph_ctrl_0 = 0x11; 863 + lane_ctrl0 = 0x17; 864 + } else { 865 + vreg_ctrl_1 = 0x5c; 866 + glbl_pemph_ctrl_0 = 0x00; 867 + lane_ctrl0 = 0x1f; 883 868 } 884 869 885 870 /* de-assert digital and pll power down */ ··· 913 876 dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21); 914 877 dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84); 915 878 879 + if (phy->cphy_mode) 880 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_CTRL, BIT(6)); 881 + 916 882 /* Enable LDO */ 917 883 dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0); 918 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, 0x5c); 884 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, vreg_ctrl_1); 885 + 919 886 dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00); 920 887 dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL, 921 888 glbl_str_swi_cal_sel_ctrl); 922 889 dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0, 923 890 glbl_hstx_str_ctrl_0); 924 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, 0x00); 891 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, 892 + glbl_pemph_ctrl_0); 893 + if (phy->cphy_mode) 894 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1, 0x01); 925 895 dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL, 926 896 glbl_rescode_top_ctrl); 927 897 dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL, ··· 938 894 /* Remove power down from all blocks */ 939 895 dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f); 940 896 941 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0x1f); 897 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, lane_ctrl0); 942 898 943 899 /* Select full-rate mode */ 944 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40); 900 + if (!phy->cphy_mode) 901 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40); 945 902 946 903 ret = dsi_7nm_set_usecase(phy); 947 904 if (ret) { ··· 952 907 } 953 908 954 909 /* DSI PHY timings */ 955 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00); 956 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero); 957 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare); 958 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail); 959 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit); 960 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero); 961 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare); 962 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail); 963 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst); 964 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02); 965 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04); 966 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00); 967 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12, 968 - timing->shared_timings.clk_pre); 969 - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13, 970 - timing->shared_timings.clk_post); 910 + if (phy->cphy_mode) { 911 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00); 912 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit); 913 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, 914 + timing->shared_timings.clk_pre); 915 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare); 916 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, 917 + timing->shared_timings.clk_post); 918 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst); 919 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02); 920 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04); 921 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00); 922 + } else { 923 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00); 924 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero); 925 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare); 926 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail); 927 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit); 928 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero); 929 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare); 930 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail); 931 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst); 932 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02); 933 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04); 934 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00); 935 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12, 936 + timing->shared_timings.clk_pre); 937 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13, 938 + timing->shared_timings.clk_post); 939 + } 971 940 972 941 /* DSI lane settings */ 973 942 dsi_phy_hw_v4_0_lane_settings(phy); ··· 989 930 DBG("DSI%d PHY enabled", phy->id); 990 931 991 932 return 0; 933 + } 934 + 935 + static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable) 936 + { 937 + void __iomem *base = phy->base; 938 + u32 data; 939 + 940 + data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1); 941 + if (enable) 942 + data |= BIT(5) | BIT(6); 943 + else 944 + data &= ~(BIT(5) | BIT(6)); 945 + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1, data); 946 + 947 + return enable; 992 948 } 993 949 994 950 static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy) ··· 1046 972 .pll_init = dsi_pll_7nm_init, 1047 973 .save_pll_state = dsi_7nm_pll_save_state, 1048 974 .restore_pll_state = dsi_7nm_pll_restore_state, 975 + .set_continuous_clock = dsi_7nm_set_continuous_clock, 1049 976 }, 1050 977 .min_pll_rate = 600000000UL, 1051 978 #ifdef CONFIG_64BIT ··· 1073 998 .pll_init = dsi_pll_7nm_init, 1074 999 .save_pll_state = dsi_7nm_pll_save_state, 1075 1000 .restore_pll_state = dsi_7nm_pll_restore_state, 1001 + .set_continuous_clock = dsi_7nm_set_continuous_clock, 1076 1002 }, 1077 1003 .min_pll_rate = 1000000000UL, 1078 1004 .max_pll_rate = 3500000000UL, 1079 1005 .io_start = { 0xae94400, 0xae96400 }, 1080 1006 .num_dsi_phy = 2, 1007 + }; 1008 + 1009 + const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = { 1010 + .has_phy_lane = true, 1011 + .reg_cfg = { 1012 + .num = 1, 1013 + .regs = { 1014 + {"vdds", 37550, 0}, 1015 + }, 1016 + }, 1017 + .ops = { 1018 + .enable = dsi_7nm_phy_enable, 1019 + .disable = dsi_7nm_phy_disable, 1020 + .pll_init = dsi_pll_7nm_init, 1021 + .save_pll_state = dsi_7nm_pll_save_state, 1022 + .restore_pll_state = dsi_7nm_pll_restore_state, 1023 + }, 1024 + .min_pll_rate = 600000000UL, 1025 + #ifdef CONFIG_64BIT 1026 + .max_pll_rate = 5000000000ULL, 1027 + #else 1028 + .max_pll_rate = ULONG_MAX, 1029 + #endif 1030 + .io_start = { 0xae94400 }, 1031 + .num_dsi_phy = 1, 1032 + .quirks = DSI_PHY_7NM_QUIRK_V4_1, 1081 1033 };
+3 -12
drivers/gpu/drm/msm/msm_drv.c
··· 603 603 if (IS_ERR(priv->event_thread[i].worker)) { 604 604 ret = PTR_ERR(priv->event_thread[i].worker); 605 605 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n"); 606 + ret = PTR_ERR(priv->event_thread[i].worker); 606 607 goto err_msm_uninit; 607 608 } 608 609 ··· 1058 1057 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW), 1059 1058 }; 1060 1059 1061 - static const struct file_operations fops = { 1062 - .owner = THIS_MODULE, 1063 - .open = drm_open, 1064 - .release = drm_release, 1065 - .unlocked_ioctl = drm_ioctl, 1066 - .compat_ioctl = drm_compat_ioctl, 1067 - .poll = drm_poll, 1068 - .read = drm_read, 1069 - .llseek = no_llseek, 1070 - .mmap = msm_gem_mmap, 1071 - }; 1060 + DEFINE_DRM_GEM_FOPS(fops); 1072 1061 1073 1062 static const struct drm_driver msm_driver = { 1074 1063 .driver_features = DRIVER_GEM | ··· 1074 1083 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 1075 1084 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 1076 1085 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, 1077 - .gem_prime_mmap = msm_gem_prime_mmap, 1086 + .gem_prime_mmap = drm_gem_prime_mmap, 1078 1087 #ifdef CONFIG_DEBUG_FS 1079 1088 .debugfs_init = msm_debugfs_init, 1080 1089 #endif
+15 -3
drivers/gpu/drm/msm/msm_drv.h
··· 309 309 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); 310 310 int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); 311 311 void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); 312 - int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 313 312 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 314 313 struct dma_buf_attachment *attach, struct sg_table *sg); 315 314 int msm_gem_prime_pin(struct drm_gem_object *obj); ··· 349 350 int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, 350 351 struct drm_encoder *encoder); 351 352 void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi); 352 - 353 + bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi); 354 + bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi); 355 + bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi); 353 356 #else 354 357 static inline void __init msm_dsi_register(void) 355 358 { ··· 368 367 static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi) 369 368 { 370 369 } 371 - 370 + static inline bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi) 371 + { 372 + return false; 373 + } 374 + static inline bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi) 375 + { 376 + return false; 377 + } 378 + static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi) 379 + { 380 + return false; 381 + } 372 382 #endif 373 383 374 384 #ifdef CONFIG_DRM_MSM_DP
+2 -10
drivers/gpu/drm/msm/msm_fbdev.c
··· 8 8 #include <drm/drm_crtc.h> 9 9 #include <drm/drm_fb_helper.h> 10 10 #include <drm/drm_fourcc.h> 11 + #include <drm/drm_prime.h> 11 12 12 13 #include "msm_drv.h" 13 14 #include "msm_gem.h" 14 15 #include "msm_kms.h" 15 16 16 - extern int msm_gem_mmap_obj(struct drm_gem_object *obj, 17 - struct vm_area_struct *vma); 18 17 static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma); 19 18 20 19 /* ··· 47 48 struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par; 48 49 struct msm_fbdev *fbdev = to_msm_fbdev(helper); 49 50 struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0); 50 - int ret = 0; 51 51 52 - ret = drm_gem_mmap_obj(bo, bo->size, vma); 53 - if (ret) { 54 - pr_err("%s:drm_gem_mmap_obj fail\n", __func__); 55 - return ret; 56 - } 57 - 58 - return msm_gem_mmap_obj(bo, vma); 52 + return drm_gem_prime_mmap(bo, vma); 59 53 } 60 54 61 55 static int msm_fbdev_create(struct drm_fb_helper *helper,
+13 -27
drivers/gpu/drm/msm/msm_gem.c
··· 217 217 return prot; 218 218 } 219 219 220 - int msm_gem_mmap_obj(struct drm_gem_object *obj, 221 - struct vm_area_struct *vma) 222 - { 223 - struct msm_gem_object *msm_obj = to_msm_bo(obj); 224 - 225 - vma->vm_flags &= ~VM_PFNMAP; 226 - vma->vm_flags |= VM_MIXEDMAP; 227 - vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); 228 - 229 - return 0; 230 - } 231 - 232 - int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 233 - { 234 - int ret; 235 - 236 - ret = drm_gem_mmap(filp, vma); 237 - if (ret) { 238 - DBG("mmap failed: %d", ret); 239 - return ret; 240 - } 241 - 242 - return msm_gem_mmap_obj(vma->vm_private_data, vma); 243 - } 244 - 245 220 static vm_fault_t msm_gem_fault(struct vm_fault *vmf) 246 221 { 247 222 struct vm_area_struct *vma = vmf->vma; ··· 792 817 mutex_lock(&priv->mm_lock); 793 818 if (msm_obj->evictable) 794 819 mark_unevictable(msm_obj); 795 - list_del(&msm_obj->mm_list); 796 - list_add_tail(&msm_obj->mm_list, &gpu->active_list); 820 + list_move_tail(&msm_obj->mm_list, &gpu->active_list); 797 821 mutex_unlock(&priv->mm_lock); 798 822 } 799 823 } ··· 1051 1077 kfree(msm_obj); 1052 1078 } 1053 1079 1080 + static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 1081 + { 1082 + struct msm_gem_object *msm_obj = to_msm_bo(obj); 1083 + 1084 + vma->vm_flags &= ~VM_PFNMAP; 1085 + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; 1086 + vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); 1087 + 1088 + return 0; 1089 + } 1090 + 1054 1091 /* convenience method to construct a GEM buffer object, and userspace handle */ 1055 1092 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1056 1093 uint32_t size, uint32_t flags, uint32_t *handle, ··· 1099 1114 .get_sg_table = msm_gem_prime_get_sg_table, 1100 1115 .vmap = msm_gem_prime_vmap, 1101 1116 .vunmap = msm_gem_prime_vunmap, 1117 + .mmap = msm_gem_object_mmap, 1102 1118 .vm_ops = &vm_ops, 1103 1119 }; 1104 1120
-3
drivers/gpu/drm/msm/msm_gem.h
··· 106 106 }; 107 107 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) 108 108 109 - int msm_gem_mmap_obj(struct drm_gem_object *obj, 110 - struct vm_area_struct *vma); 111 - int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 112 109 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); 113 110 int msm_gem_get_iova(struct drm_gem_object *obj, 114 111 struct msm_gem_address_space *aspace, uint64_t *iova);
-11
drivers/gpu/drm/msm/msm_gem_prime.c
··· 39 39 msm_gem_put_vaddr(obj); 40 40 } 41 41 42 - int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 43 - { 44 - int ret; 45 - 46 - ret = drm_gem_mmap_obj(obj, obj->size, vma); 47 - if (ret < 0) 48 - return ret; 49 - 50 - return msm_gem_mmap_obj(vma->vm_private_data, vma); 51 - } 52 - 53 42 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 54 43 struct dma_buf_attachment *attach, struct sg_table *sg) 55 44 {
-3
drivers/gpu/drm/msm/msm_kms.h
··· 117 117 struct drm_encoder *encoder, 118 118 struct drm_encoder *slave_encoder, 119 119 bool is_cmd_mode); 120 - void (*set_encoder_mode)(struct msm_kms *kms, 121 - struct drm_encoder *encoder, 122 - bool cmd_mode); 123 120 /* cleanup: */ 124 121 void (*destroy)(struct msm_kms *kms); 125 122
+2
include/dt-bindings/phy/phy.h
··· 20 20 #define PHY_TYPE_XPCS 7 21 21 #define PHY_TYPE_SGMII 8 22 22 #define PHY_TYPE_QSGMII 9 23 + #define PHY_TYPE_DPHY 10 24 + #define PHY_TYPE_CPHY 11 23 25 24 26 #endif /* _DT_BINDINGS_PHY */