Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-msm-next-2020-03-22' of https://gitlab.freedesktop.org/drm/msm into drm-next

A bit smaller this time around.. there are still a couple uabi
additions for vulkan waiting in the wings, but I punted on them this
cycle due to running low on time. (They should be easy enough to
rebase, and if it is a problem for anyone I can push a next+uabi
branch so that tu work can proceed.)

The bigger change is refactoring dpu resource manager and moving dpu
to use atomic global state. Other than that, it is mostly cleanups
and fixes.

From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGuf1R4Xz-t9Z7_cwx9jD=b4wUvvwfqA5cHR8fCSXSd5XQ@mail.gmail.com
Signed-off-by: Dave Airlie <airlied@redhat.com>

+726 -753
-116
Documentation/devicetree/bindings/display/msm/gmu.txt
··· 1 - Qualcomm adreno/snapdragon GMU (Graphics management unit) 2 - 3 - The GMU is a programmable power controller for the GPU. the CPU controls the 4 - GMU which in turn handles power controls for the GPU. 5 - 6 - Required properties: 7 - - compatible: "qcom,adreno-gmu-XYZ.W", "qcom,adreno-gmu" 8 - for example: "qcom,adreno-gmu-630.2", "qcom,adreno-gmu" 9 - Note that you need to list the less specific "qcom,adreno-gmu" 10 - for generic matches and the more specific identifier to identify 11 - the specific device. 12 - - reg: Physical base address and length of the GMU registers. 13 - - reg-names: Matching names for the register regions 14 - * "gmu" 15 - * "gmu_pdc" 16 - * "gmu_pdc_seg" 17 - - interrupts: The interrupt signals from the GMU. 18 - - interrupt-names: Matching names for the interrupts 19 - * "hfi" 20 - * "gmu" 21 - - clocks: phandles to the device clocks 22 - - clock-names: Matching names for the clocks 23 - * "gmu" 24 - * "cxo" 25 - * "axi" 26 - * "mnoc" 27 - - power-domains: should be: 28 - <&clock_gpucc GPU_CX_GDSC> 29 - <&clock_gpucc GPU_GX_GDSC> 30 - - power-domain-names: Matching names for the power domains 31 - - iommus: phandle to the adreno iommu 32 - - operating-points-v2: phandle to the OPP operating points 33 - 34 - Optional properties: 35 - - sram: phandle to the On Chip Memory (OCMEM) that's present on some Snapdragon 36 - SoCs. See Documentation/devicetree/bindings/sram/qcom,ocmem.yaml. 37 - 38 - Example: 39 - 40 - / { 41 - ... 42 - 43 - gmu: gmu@506a000 { 44 - compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu"; 45 - 46 - reg = <0x506a000 0x30000>, 47 - <0xb280000 0x10000>, 48 - <0xb480000 0x10000>; 49 - reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq"; 50 - 51 - interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>, 52 - <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>; 53 - interrupt-names = "hfi", "gmu"; 54 - 55 - clocks = <&gpucc GPU_CC_CX_GMU_CLK>, 56 - <&gpucc GPU_CC_CXO_CLK>, 57 - <&gcc GCC_DDRSS_GPU_AXI_CLK>, 58 - <&gcc GCC_GPU_MEMNOC_GFX_CLK>; 59 - clock-names = "gmu", "cxo", "axi", "memnoc"; 60 - 61 - power-domains = <&gpucc GPU_CX_GDSC>, 62 - <&gpucc GPU_GX_GDSC>; 63 - power-domain-names = "cx", "gx"; 64 - 65 - iommus = <&adreno_smmu 5>; 66 - 67 - operating-points-v2 = <&gmu_opp_table>; 68 - }; 69 - }; 70 - 71 - a3xx example with OCMEM support: 72 - 73 - / { 74 - ... 75 - 76 - gpu: adreno@fdb00000 { 77 - compatible = "qcom,adreno-330.2", 78 - "qcom,adreno"; 79 - reg = <0xfdb00000 0x10000>; 80 - reg-names = "kgsl_3d0_reg_memory"; 81 - interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; 82 - interrupt-names = "kgsl_3d0_irq"; 83 - clock-names = "core", 84 - "iface", 85 - "mem_iface"; 86 - clocks = <&mmcc OXILI_GFX3D_CLK>, 87 - <&mmcc OXILICX_AHB_CLK>, 88 - <&mmcc OXILICX_AXI_CLK>; 89 - sram = <&gmu_sram>; 90 - power-domains = <&mmcc OXILICX_GDSC>; 91 - operating-points-v2 = <&gpu_opp_table>; 92 - iommus = <&gpu_iommu 0>; 93 - }; 94 - 95 - ocmem@fdd00000 { 96 - compatible = "qcom,msm8974-ocmem"; 97 - 98 - reg = <0xfdd00000 0x2000>, 99 - <0xfec00000 0x180000>; 100 - reg-names = "ctrl", 101 - "mem"; 102 - 103 - clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>, 104 - <&mmcc OCMEMCX_OCMEMNOC_CLK>; 105 - clock-names = "core", 106 - "iface"; 107 - 108 - #address-cells = <1>; 109 - #size-cells = <1>; 110 - 111 - gmu_sram: gmu-sram@0 { 112 - reg = <0x0 0x100000>; 113 - ranges = <0 0 0xfec00000 0x100000>; 114 - }; 115 - }; 116 - };
+123
Documentation/devicetree/bindings/display/msm/gmu.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + # Copyright 2019-2020, The Linux Foundation, All Rights Reserved 3 + %YAML 1.2 4 + --- 5 + 6 + $id: "http://devicetree.org/schemas/display/msm/gmu.yaml#" 7 + $schema: "http://devicetree.org/meta-schemas/core.yaml#" 8 + 9 + title: Devicetree bindings for the GMU attached to certain Adreno GPUs 10 + 11 + maintainers: 12 + - Rob Clark <robdclark@gmail.com> 13 + 14 + description: | 15 + These bindings describe the Graphics Management Unit (GMU) that is attached 16 + to members of the Adreno A6xx GPU family. The GMU provides on-device power 17 + management and support to improve power efficiency and reduce the load on 18 + the CPU. 19 + 20 + properties: 21 + compatible: 22 + items: 23 + - enum: 24 + - qcom,adreno-gmu-630.2 25 + - const: qcom,adreno-gmu 26 + 27 + reg: 28 + items: 29 + - description: Core GMU registers 30 + - description: GMU PDC registers 31 + - description: GMU PDC sequence registers 32 + 33 + reg-names: 34 + items: 35 + - const: gmu 36 + - const: gmu_pdc 37 + - const: gmu_pdc_seq 38 + 39 + clocks: 40 + items: 41 + - description: GMU clock 42 + - description: GPU CX clock 43 + - description: GPU AXI clock 44 + - description: GPU MEMNOC clock 45 + 46 + clock-names: 47 + items: 48 + - const: gmu 49 + - const: cxo 50 + - const: axi 51 + - const: memnoc 52 + 53 + interrupts: 54 + items: 55 + - description: GMU HFI interrupt 56 + - description: GMU interrupt 57 + 58 + 59 + interrupt-names: 60 + items: 61 + - const: hfi 62 + - const: gmu 63 + 64 + power-domains: 65 + items: 66 + - description: CX power domain 67 + - description: GX power domain 68 + 69 + power-domain-names: 70 + items: 71 + - const: cx 72 + - const: gx 73 + 74 + iommus: 75 + maxItems: 1 76 + 77 + operating-points-v2: true 78 + 79 + required: 80 + - compatible 81 + - reg 82 + - reg-names 83 + - clocks 84 + - clock-names 85 + - interrupts 86 + - interrupt-names 87 + - power-domains 88 + - power-domain-names 89 + - iommus 90 + - operating-points-v2 91 + 92 + examples: 93 + - | 94 + #include <dt-bindings/clock/qcom,gpucc-sdm845.h> 95 + #include <dt-bindings/clock/qcom,gcc-sdm845.h> 96 + #include <dt-bindings/interrupt-controller/irq.h> 97 + #include <dt-bindings/interrupt-controller/arm-gic.h> 98 + 99 + gmu: gmu@506a000 { 100 + compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu"; 101 + 102 + reg = <0x506a000 0x30000>, 103 + <0xb280000 0x10000>, 104 + <0xb480000 0x10000>; 105 + reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq"; 106 + 107 + clocks = <&gpucc GPU_CC_CX_GMU_CLK>, 108 + <&gpucc GPU_CC_CXO_CLK>, 109 + <&gcc GCC_DDRSS_GPU_AXI_CLK>, 110 + <&gcc GCC_GPU_MEMNOC_GFX_CLK>; 111 + clock-names = "gmu", "cxo", "axi", "memnoc"; 112 + 113 + interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>, 114 + <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>; 115 + interrupt-names = "hfi", "gmu"; 116 + 117 + power-domains = <&gpucc GPU_CX_GDSC>, 118 + <&gpucc GPU_GX_GDSC>; 119 + power-domain-names = "cx", "gx"; 120 + 121 + iommus = <&adreno_smmu 5>; 122 + operating-points-v2 = <&gmu_opp_table>; 123 + };
+42 -13
Documentation/devicetree/bindings/display/msm/gpu.txt
··· 35 35 bring the GPU out of secure mode. 36 36 - firmware-name: optional property of the 'zap-shader' node, listing the 37 37 relative path of the device specific zap firmware. 38 + - sram: phandle to the On Chip Memory (OCMEM) that's present on some a3xx and 39 + a4xx Snapdragon SoCs. See 40 + Documentation/devicetree/bindings/sram/qcom,ocmem.yaml. 38 41 39 - Example 3xx/4xx/a5xx: 42 + Example 3xx/4xx: 40 43 41 44 / { 42 45 ... 43 46 44 - gpu: qcom,kgsl-3d0@4300000 { 45 - compatible = "qcom,adreno-320.2", "qcom,adreno"; 46 - reg = <0x04300000 0x20000>; 47 + gpu: adreno@fdb00000 { 48 + compatible = "qcom,adreno-330.2", 49 + "qcom,adreno"; 50 + reg = <0xfdb00000 0x10000>; 47 51 reg-names = "kgsl_3d0_reg_memory"; 48 - interrupts = <GIC_SPI 80 0>; 49 - clock-names = 50 - "core", 51 - "iface", 52 - "mem_iface"; 53 - clocks = 54 - <&mmcc GFX3D_CLK>, 55 - <&mmcc GFX3D_AHB_CLK>, 56 - <&mmcc MMSS_IMEM_AHB_CLK>; 52 + interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; 53 + interrupt-names = "kgsl_3d0_irq"; 54 + clock-names = "core", 55 + "iface", 56 + "mem_iface"; 57 + clocks = <&mmcc OXILI_GFX3D_CLK>, 58 + <&mmcc OXILICX_AHB_CLK>, 59 + <&mmcc OXILICX_AXI_CLK>; 60 + sram = <&gpu_sram>; 61 + power-domains = <&mmcc OXILICX_GDSC>; 62 + operating-points-v2 = <&gpu_opp_table>; 63 + iommus = <&gpu_iommu 0>; 64 + }; 65 + 66 + gpu_sram: ocmem@fdd00000 { 67 + compatible = "qcom,msm8974-ocmem"; 68 + 69 + reg = <0xfdd00000 0x2000>, 70 + <0xfec00000 0x180000>; 71 + reg-names = "ctrl", 72 + "mem"; 73 + 74 + clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>, 75 + <&mmcc OCMEMCX_OCMEMNOC_CLK>; 76 + clock-names = "core", 77 + "iface"; 78 + 79 + #address-cells = <1>; 80 + #size-cells = <1>; 81 + 82 + gpu_sram: gpu-sram@0 { 83 + reg = <0x0 0x100000>; 84 + ranges = <0 0 0xfec00000 0x100000>; 85 + }; 57 86 }; 58 87 }; 59 88
+20 -7
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 1446 1446 static void check_speed_bin(struct device *dev) 1447 1447 { 1448 1448 struct nvmem_cell *cell; 1449 - u32 bin, val; 1449 + u32 val; 1450 + 1451 + /* 1452 + * If the OPP table specifies a opp-supported-hw property then we have 1453 + * to set something with dev_pm_opp_set_supported_hw() or the table 1454 + * doesn't get populated so pick an arbitrary value that should 1455 + * ensure the default frequencies are selected but not conflict with any 1456 + * actual bins 1457 + */ 1458 + val = 0x80; 1450 1459 1451 1460 cell = nvmem_cell_get(dev, "speed_bin"); 1452 1461 1453 - /* If a nvmem cell isn't defined, nothing to do */ 1454 - if (IS_ERR(cell)) 1455 - return; 1462 + if (!IS_ERR(cell)) { 1463 + void *buf = nvmem_cell_read(cell, NULL); 1456 1464 1457 - bin = *((u32 *) nvmem_cell_read(cell, NULL)); 1458 - nvmem_cell_put(cell); 1465 + if (!IS_ERR(buf)) { 1466 + u8 bin = *((u8 *) buf); 1459 1467 1460 - val = (1 << bin); 1468 + val = (1 << bin); 1469 + kfree(buf); 1470 + } 1471 + 1472 + nvmem_cell_put(cell); 1473 + } 1461 1474 1462 1475 dev_pm_opp_set_supported_hw(dev, &val, 1); 1463 1476 }
+13 -102
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 2 2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ 3 3 4 4 #include <linux/clk.h> 5 + #include <linux/dma-mapping.h> 5 6 #include <linux/interconnect.h> 6 7 #include <linux/pm_domain.h> 7 8 #include <linux/pm_opp.h> ··· 921 920 922 921 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo) 923 922 { 924 - int count, i; 925 - u64 iova; 926 - 927 923 if (IS_ERR_OR_NULL(bo)) 928 924 return; 929 925 930 - count = bo->size >> PAGE_SHIFT; 931 - iova = bo->iova; 932 - 933 - for (i = 0; i < count; i++, iova += PAGE_SIZE) { 934 - iommu_unmap(gmu->domain, iova, PAGE_SIZE); 935 - __free_pages(bo->pages[i], 0); 936 - } 937 - 938 - kfree(bo->pages); 926 + dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova); 939 927 kfree(bo); 940 928 } 941 929 ··· 932 942 size_t size) 933 943 { 934 944 struct a6xx_gmu_bo *bo; 935 - int ret, count, i; 936 945 937 946 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 938 947 if (!bo) ··· 939 950 940 951 bo->size = PAGE_ALIGN(size); 941 952 942 - count = bo->size >> PAGE_SHIFT; 953 + bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL); 943 954 944 - bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL); 945 - if (!bo->pages) { 955 + if (!bo->virt) { 946 956 kfree(bo); 947 957 return ERR_PTR(-ENOMEM); 948 958 } 949 959 950 - for (i = 0; i < count; i++) { 951 - bo->pages[i] = alloc_page(GFP_KERNEL); 952 - if (!bo->pages[i]) 953 - goto err; 954 - } 955 - 956 - bo->iova = gmu->uncached_iova_base; 957 - 958 - for (i = 0; i < count; i++) { 959 - ret = iommu_map(gmu->domain, 960 - bo->iova + (PAGE_SIZE * i), 961 - page_to_phys(bo->pages[i]), PAGE_SIZE, 962 - IOMMU_READ | IOMMU_WRITE); 963 - 964 - if (ret) { 965 - DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n"); 966 - 967 - for (i = i - 1 ; i >= 0; i--) 968 - iommu_unmap(gmu->domain, 969 - bo->iova + (PAGE_SIZE * i), 970 - PAGE_SIZE); 971 - 972 - goto err; 973 - } 974 - } 975 - 976 - bo->virt = vmap(bo->pages, count, VM_IOREMAP, 977 - pgprot_writecombine(PAGE_KERNEL)); 978 - if (!bo->virt) 979 - goto err; 980 - 981 - /* Align future IOVA addresses on 1MB boundaries */ 982 - gmu->uncached_iova_base += ALIGN(size, SZ_1M); 983 - 984 960 return bo; 985 - 986 - err: 987 - for (i = 0; i < count; i++) { 988 - if (bo->pages[i]) 989 - __free_pages(bo->pages[i], 0); 990 - } 991 - 992 - kfree(bo->pages); 993 - kfree(bo); 994 - 995 - return ERR_PTR(-ENOMEM); 996 - } 997 - 998 - static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) 999 - { 1000 - int ret; 1001 - 1002 - /* 1003 - * The GMU address space is hardcoded to treat the range 1004 - * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared 1005 - * between the GMU and the CPU will live in this space 1006 - */ 1007 - gmu->uncached_iova_base = 0x60000000; 1008 - 1009 - 1010 - gmu->domain = iommu_domain_alloc(&platform_bus_type); 1011 - if (!gmu->domain) 1012 - return -ENODEV; 1013 - 1014 - ret = iommu_attach_device(gmu->domain, gmu->dev); 1015 - 1016 - if (ret) { 1017 - iommu_domain_free(gmu->domain); 1018 - gmu->domain = NULL; 1019 - } 1020 - 1021 - return ret; 1022 961 } 1023 962 1024 963 /* Return the 'arc-level' for the given frequency */ ··· 1206 1289 1207 1290 a6xx_gmu_memory_free(gmu, gmu->hfi); 1208 1291 1209 - iommu_detach_device(gmu->domain, gmu->dev); 1210 - 1211 - iommu_domain_free(gmu->domain); 1212 - 1213 1292 free_irq(gmu->gmu_irq, gmu); 1214 1293 free_irq(gmu->hfi_irq, gmu); 1215 1294 ··· 1226 1313 1227 1314 gmu->dev = &pdev->dev; 1228 1315 1229 - of_dma_configure(gmu->dev, node, true); 1316 + /* Pass force_dma false to require the DT to set the dma region */ 1317 + ret = of_dma_configure(gmu->dev, node, false); 1318 + if (ret) 1319 + return ret; 1320 + 1321 + /* Set the mask after the of_dma_configure() */ 1322 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31)); 1323 + if (ret) 1324 + return ret; 1230 1325 1231 1326 /* Fow now, don't do anything fancy until we get our feet under us */ 1232 1327 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; ··· 1243 1322 1244 1323 /* Get the list of clocks */ 1245 1324 ret = a6xx_gmu_clocks_probe(gmu); 1246 - if (ret) 1247 - goto err_put_device; 1248 - 1249 - /* Set up the IOMMU context bank */ 1250 - ret = a6xx_gmu_memory_probe(gmu); 1251 1325 if (ret) 1252 1326 goto err_put_device; 1253 1327 ··· 1291 1375 err_memory: 1292 1376 a6xx_gmu_memory_free(gmu, gmu->hfi); 1293 1377 1294 - if (gmu->domain) { 1295 - iommu_detach_device(gmu->domain, gmu->dev); 1296 - 1297 - iommu_domain_free(gmu->domain); 1298 - } 1299 1378 ret = -ENODEV; 1300 1379 1301 1380 err_put_device:
+1 -5
drivers/gpu/drm/msm/adreno/a6xx_gmu.h
··· 12 12 struct a6xx_gmu_bo { 13 13 void *virt; 14 14 size_t size; 15 - u64 iova; 16 - struct page **pages; 15 + dma_addr_t iova; 17 16 }; 18 17 19 18 /* ··· 47 48 48 49 int hfi_irq; 49 50 int gmu_irq; 50 - 51 - struct iommu_domain *domain; 52 - u64 uncached_iova_base; 53 51 54 52 struct device *gxpd; 55 53
+1 -1
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
··· 379 379 }; 380 380 381 381 static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = { 382 - "CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR, 382 + "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR, 383 383 REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, 384 384 }; 385 385
+1 -1
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 673 673 return NULL; 674 674 675 675 for (i = 0; i < l; i++) 676 - buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s", 676 + buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s", 677 677 ascii85_encode(src[i], out)); 678 678 679 679 return buf;
+50 -68
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 164 164 * clks and resources after IDLE_TIMEOUT time. 165 165 * @vsync_event_work: worker to handle vsync event for autorefresh 166 166 * @topology: topology of the display 167 - * @mode_set_complete: flag to indicate modeset completion 168 167 * @idle_timeout: idle timeout duration in milliseconds 169 168 */ 170 169 struct dpu_encoder_virt { ··· 201 202 struct delayed_work delayed_off_work; 202 203 struct kthread_work vsync_event_work; 203 204 struct msm_display_topology topology; 204 - bool mode_set_complete; 205 205 206 206 u32 idle_timeout; 207 207 }; ··· 459 461 struct msm_display_info *disp_info; 460 462 461 463 if (!phys_enc->hw_mdptop || !phys_enc->parent) { 462 - DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); 464 + DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); 463 465 return; 464 466 } 465 467 ··· 560 562 const struct drm_display_mode *mode; 561 563 struct drm_display_mode *adj_mode; 562 564 struct msm_display_topology topology; 565 + struct dpu_global_state *global_state; 563 566 int i = 0; 564 567 int ret = 0; 565 568 566 569 if (!drm_enc || !crtc_state || !conn_state) { 567 570 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n", 568 - drm_enc != 0, crtc_state != 0, conn_state != 0); 571 + drm_enc != NULL, crtc_state != NULL, conn_state != NULL); 569 572 return -EINVAL; 570 573 } 571 574 ··· 577 578 dpu_kms = to_dpu_kms(priv->kms); 578 579 mode = &crtc_state->mode; 579 580 adj_mode = &crtc_state->adjusted_mode; 581 + global_state = dpu_kms_get_existing_global_state(dpu_kms); 580 582 trace_dpu_enc_atomic_check(DRMID(drm_enc)); 581 583 582 584 /* ··· 609 609 610 610 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); 611 611 612 - /* Reserve dynamic resources now. Indicating AtomicTest phase */ 612 + /* Reserve dynamic resources now. */ 613 613 if (!ret) { 614 614 /* 615 615 * Avoid reserving resources when mode set is pending. Topology 616 616 * info may not be available to complete reservation. 617 617 */ 618 - if (drm_atomic_crtc_needs_modeset(crtc_state) 619 - && dpu_enc->mode_set_complete) { 620 - ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state, 621 - topology, true); 622 - dpu_enc->mode_set_complete = false; 618 + if (drm_atomic_crtc_needs_modeset(crtc_state)) { 619 + ret = dpu_rm_reserve(&dpu_kms->rm, global_state, 620 + drm_enc, crtc_state, topology); 623 621 } 624 622 } 625 623 ··· 954 956 struct drm_connector *conn = NULL, *conn_iter; 955 957 struct drm_crtc *drm_crtc; 956 958 struct dpu_crtc_state *cstate; 957 - struct dpu_rm_hw_iter hw_iter; 959 + struct dpu_global_state *global_state; 958 960 struct msm_display_topology topology; 959 - struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL }; 960 - struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL }; 961 - int num_lm = 0, num_ctl = 0; 962 - int i, j, ret; 961 + struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; 962 + struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; 963 + struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; 964 + int num_lm, num_ctl, num_pp; 965 + int i, j; 963 966 964 967 if (!drm_enc) { 965 968 DPU_ERROR("invalid encoder\n"); ··· 973 974 priv = drm_enc->dev->dev_private; 974 975 dpu_kms = to_dpu_kms(priv->kms); 975 976 connector_list = &dpu_kms->dev->mode_config.connector_list; 977 + 978 + global_state = dpu_kms_get_existing_global_state(dpu_kms); 979 + if (IS_ERR_OR_NULL(global_state)) { 980 + DPU_ERROR("Failed to get global state"); 981 + return; 982 + } 976 983 977 984 trace_dpu_enc_mode_set(DRMID(drm_enc)); 978 985 ··· 1000 995 1001 996 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); 1002 997 1003 - /* Reserve dynamic resources now. Indicating non-AtomicTest phase */ 1004 - ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state, 1005 - topology, false); 1006 - if (ret) { 1007 - DPU_ERROR_ENC(dpu_enc, 1008 - "failed to reserve hw resources, %d\n", ret); 1009 - return; 1010 - } 998 + /* Query resource that have been reserved in atomic check step. */ 999 + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1000 + drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp, 1001 + ARRAY_SIZE(hw_pp)); 1002 + num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1003 + drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); 1004 + num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1005 + drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); 1011 1006 1012 - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG); 1013 - for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 1014 - dpu_enc->hw_pp[i] = NULL; 1015 - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) 1016 - break; 1017 - dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw; 1018 - } 1019 - 1020 - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL); 1021 - for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 1022 - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) 1023 - break; 1024 - hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw; 1025 - num_ctl++; 1026 - } 1027 - 1028 - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM); 1029 - for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 1030 - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) 1031 - break; 1032 - hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw; 1033 - num_lm++; 1034 - } 1007 + for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 1008 + dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) 1009 + : NULL; 1035 1010 1036 1011 cstate = to_dpu_crtc_state(drm_crtc->state); 1037 1012 1038 1013 for (i = 0; i < num_lm; i++) { 1039 1014 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); 1040 1015 1041 - cstate->mixers[i].hw_lm = hw_lm[i]; 1042 - cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx]; 1016 + cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); 1017 + cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); 1043 1018 } 1044 1019 1045 1020 cstate->num_mixers = num_lm; 1046 1021 1047 1022 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1023 + int num_blk; 1024 + struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC]; 1048 1025 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1049 1026 1050 1027 if (!dpu_enc->hw_pp[i]) { 1051 1028 DPU_ERROR_ENC(dpu_enc, 1052 1029 "no pp block assigned at idx: %d\n", i); 1053 - goto error; 1030 + return; 1054 1031 } 1055 1032 1056 1033 if (!hw_ctl[i]) { 1057 1034 DPU_ERROR_ENC(dpu_enc, 1058 1035 "no ctl block assigned at idx: %d\n", i); 1059 - goto error; 1036 + return; 1060 1037 } 1061 1038 1062 1039 phys->hw_pp = dpu_enc->hw_pp[i]; 1063 - phys->hw_ctl = hw_ctl[i]; 1040 + phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); 1064 1041 1065 - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, 1066 - DPU_HW_BLK_INTF); 1067 - for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) { 1042 + num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm, 1043 + global_state, drm_enc->base.id, DPU_HW_BLK_INTF, 1044 + hw_blk, ARRAY_SIZE(hw_blk)); 1045 + for (j = 0; j < num_blk; j++) { 1068 1046 struct dpu_hw_intf *hw_intf; 1069 1047 1070 - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) 1071 - break; 1072 - 1073 - hw_intf = (struct dpu_hw_intf *)hw_iter.hw; 1048 + hw_intf = to_dpu_hw_intf(hw_blk[i]); 1074 1049 if (hw_intf->idx == phys->intf_idx) 1075 1050 phys->hw_intf = hw_intf; 1076 1051 } ··· 1058 1073 if (!phys->hw_intf) { 1059 1074 DPU_ERROR_ENC(dpu_enc, 1060 1075 "no intf block assigned at idx: %d\n", i); 1061 - goto error; 1076 + return; 1062 1077 } 1063 1078 1064 1079 phys->connector = conn->state->connector; 1065 1080 if (phys->ops.mode_set) 1066 1081 phys->ops.mode_set(phys, mode, adj_mode); 1067 1082 } 1068 - 1069 - dpu_enc->mode_set_complete = true; 1070 - 1071 - error: 1072 - dpu_rm_release(&dpu_kms->rm, drm_enc); 1073 1083 } 1074 1084 1075 1085 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) ··· 1161 1181 struct dpu_encoder_virt *dpu_enc = NULL; 1162 1182 struct msm_drm_private *priv; 1163 1183 struct dpu_kms *dpu_kms; 1184 + struct dpu_global_state *global_state; 1164 1185 int i = 0; 1165 1186 1166 1187 if (!drm_enc) { ··· 1180 1199 1181 1200 priv = drm_enc->dev->dev_private; 1182 1201 dpu_kms = to_dpu_kms(priv->kms); 1202 + global_state = dpu_kms_get_existing_global_state(dpu_kms); 1183 1203 1184 1204 trace_dpu_enc_disable(DRMID(drm_enc)); 1185 1205 ··· 1210 1228 1211 1229 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); 1212 1230 1213 - dpu_rm_release(&dpu_kms->rm, drm_enc); 1231 + dpu_rm_release(global_state, drm_enc); 1214 1232 1215 1233 mutex_unlock(&dpu_enc->enc_lock); 1216 1234 } ··· 1946 1964 if (IS_ERR_OR_NULL(enc)) { 1947 1965 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", 1948 1966 PTR_ERR(enc)); 1949 - return enc == 0 ? -EINVAL : PTR_ERR(enc); 1967 + return enc == NULL ? -EINVAL : PTR_ERR(enc); 1950 1968 } 1951 1969 1952 1970 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; ··· 1959 1977 if (IS_ERR_OR_NULL(enc)) { 1960 1978 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", 1961 1979 PTR_ERR(enc)); 1962 - return enc == 0 ? -EINVAL : PTR_ERR(enc); 1980 + return enc == NULL ? -EINVAL : PTR_ERR(enc); 1963 1981 } 1964 1982 1965 1983 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; ··· 1990 2008 struct dpu_enc_phys_init_params phys_params; 1991 2009 1992 2010 if (!dpu_enc) { 1993 - DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0); 2011 + DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL); 1994 2012 return -EINVAL; 1995 2013 } 1996 2014
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
··· 411 411 to_dpu_encoder_phys_cmd(phys_enc); 412 412 413 413 if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) { 414 - DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0); 414 + DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL); 415 415 return; 416 416 } 417 417 ··· 440 440 u32 flush_mask = 0; 441 441 442 442 if (!phys_enc->hw_pp) { 443 - DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); 443 + DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); 444 444 return; 445 445 } 446 446
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
··· 239 239 struct dpu_hw_intf_cfg intf_cfg = { 0 }; 240 240 241 241 if (!phys_enc->hw_ctl->ops.setup_intf_cfg) { 242 - DPU_ERROR("invalid encoder %d\n", phys_enc != 0); 242 + DPU_ERROR("invalid encoder %d\n", phys_enc != NULL); 243 243 return; 244 244 } 245 245 ··· 559 559 560 560 if (!phys_enc->hw_intf) { 561 561 DPU_ERROR("invalid hw_intf %d hw_ctl %d\n", 562 - phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0); 562 + phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL); 563 563 return; 564 564 } 565 565
+10
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
··· 90 90 }; 91 91 92 92 /** 93 + * to_dpu_hw_intf - convert base object dpu_hw_base to container 94 + * @hw: Pointer to base hardware block 95 + * return: Pointer to hardware block container 96 + */ 97 + static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw) 98 + { 99 + return container_of(hw, struct dpu_hw_intf, base); 100 + } 101 + 102 + /** 93 103 * dpu_hw_intf_init(): Initializes the intf driver for the passed 94 104 * interface idx. 95 105 * @idx: interface index for which driver object is required
+10
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
··· 97 97 }; 98 98 99 99 /** 100 + * to_dpu_hw_pingpong - convert base object dpu_hw_base to container 101 + * @hw: Pointer to base hardware block 102 + * return: Pointer to hardware block container 103 + */ 104 + static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw) 105 + { 106 + return container_of(hw, struct dpu_hw_pingpong, base); 107 + } 108 + 109 + /** 100 110 * dpu_hw_pingpong_init - initializes the pingpong driver for the passed 101 111 * pingpong idx. 102 112 * @idx: Pingpong index for which driver object is required
+83 -15
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
··· 138 138 { 139 139 struct dpu_debugfs_regset32 *regset = s->private; 140 140 struct dpu_kms *dpu_kms = regset->dpu_kms; 141 - struct drm_device *dev; 142 - struct msm_drm_private *priv; 143 141 void __iomem *base; 144 142 uint32_t i, addr; 145 143 146 144 if (!dpu_kms->mmio) 147 145 return 0; 148 146 149 - dev = dpu_kms->dev; 150 - priv = dev->dev_private; 151 147 base = dpu_kms->mmio + regset->offset; 152 148 153 149 /* insert padding spaces, if needed */ ··· 224 228 } 225 229 #endif 226 230 231 + /* Global/shared object state funcs */ 232 + 233 + /* 234 + * This is a helper that returns the private state currently in operation. 235 + * Note that this would return the "old_state" if called in the atomic check 236 + * path, and the "new_state" after the atomic swap has been done. 237 + */ 238 + struct dpu_global_state * 239 + dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms) 240 + { 241 + return to_dpu_global_state(dpu_kms->global_state.state); 242 + } 243 + 244 + /* 245 + * This acquires the modeset lock set aside for global state, creates 246 + * a new duplicated private object state. 247 + */ 248 + struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s) 249 + { 250 + struct msm_drm_private *priv = s->dev->dev_private; 251 + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 252 + struct drm_private_state *priv_state; 253 + int ret; 254 + 255 + ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx); 256 + if (ret) 257 + return ERR_PTR(ret); 258 + 259 + priv_state = drm_atomic_get_private_obj_state(s, 260 + &dpu_kms->global_state); 261 + if (IS_ERR(priv_state)) 262 + return ERR_CAST(priv_state); 263 + 264 + return to_dpu_global_state(priv_state); 265 + } 266 + 267 + static struct drm_private_state * 268 + dpu_kms_global_duplicate_state(struct drm_private_obj *obj) 269 + { 270 + struct dpu_global_state *state; 271 + 272 + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 273 + if (!state) 274 + return NULL; 275 + 276 + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 277 + 278 + return &state->base; 279 + } 280 + 281 + static void dpu_kms_global_destroy_state(struct drm_private_obj *obj, 282 + struct drm_private_state *state) 283 + { 284 + struct dpu_global_state *dpu_state = to_dpu_global_state(state); 285 + 286 + kfree(dpu_state); 287 + } 288 + 289 + static const struct drm_private_state_funcs dpu_kms_global_state_funcs = { 290 + .atomic_duplicate_state = dpu_kms_global_duplicate_state, 291 + .atomic_destroy_state = dpu_kms_global_destroy_state, 292 + }; 293 + 294 + static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms) 295 + { 296 + struct dpu_global_state *state; 297 + 298 + drm_modeset_lock_init(&dpu_kms->global_state_lock); 299 + 300 + state = kzalloc(sizeof(*state), GFP_KERNEL); 301 + if (!state) 302 + return -ENOMEM; 303 + 304 + drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state, 305 + &state->base, 306 + &dpu_kms_global_state_funcs); 307 + return 0; 308 + } 309 + 227 310 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) 228 311 { 229 312 return dpu_crtc_vblank(crtc, true); ··· 342 267 static void dpu_kms_prepare_commit(struct msm_kms *kms, 343 268 struct drm_atomic_state *state) 344 269 { 345 - struct dpu_kms *dpu_kms; 346 - struct drm_device *dev; 347 270 struct drm_crtc *crtc; 348 271 struct drm_crtc_state *crtc_state; 349 272 struct drm_encoder *encoder; ··· 349 276 350 277 if (!kms) 351 278 return; 352 - dpu_kms = to_dpu_kms(kms); 353 - dev = dpu_kms->dev; 354 279 355 280 /* Call prepare_commit for all affected encoders */ 356 281 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { ··· 623 552 624 553 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) 625 554 { 626 - struct drm_device *dev; 627 555 int i; 628 - 629 - dev = dpu_kms->dev; 630 556 631 557 if (dpu_kms->hw_intr) 632 558 dpu_hw_intr_destroy(dpu_kms->hw_intr); ··· 828 760 { 829 761 struct dpu_kms *dpu_kms; 830 762 struct drm_device *dev; 831 - struct msm_drm_private *priv; 832 763 int i, rc = -EINVAL; 833 764 834 765 if (!kms) { ··· 837 770 838 771 dpu_kms = to_dpu_kms(kms); 839 772 dev = dpu_kms->dev; 840 - priv = dev->dev_private; 773 + 774 + rc = dpu_kms_global_obj_init(dpu_kms); 775 + if (rc) 776 + return rc; 841 777 842 778 atomic_set(&dpu_kms->bandwidth_ref, 0); 843 779 ··· 1088 1018 int rc = -1; 1089 1019 struct platform_device *pdev = to_platform_device(dev); 1090 1020 struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); 1091 - struct drm_device *ddev; 1092 1021 struct dss_module_power *mp = &dpu_kms->mp; 1093 1022 1094 - ddev = dpu_kms->dev; 1095 1023 rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); 1096 1024 if (rc) 1097 1025 DPU_ERROR("clock disable failed rc:%d\n", rc);
+26
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
··· 111 111 112 112 struct dpu_core_perf perf; 113 113 114 + /* 115 + * Global private object state, Do not access directly, use 116 + * dpu_kms_global_get_state() 117 + */ 118 + struct drm_modeset_lock global_state_lock; 119 + struct drm_private_obj global_state; 120 + 114 121 struct dpu_rm rm; 115 122 bool rm_init; 116 123 ··· 145 138 }; 146 139 147 140 #define to_dpu_kms(x) container_of(x, struct dpu_kms, base) 141 + 142 + #define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base) 143 + 144 + /* Global private object state for tracking resources that are shared across 145 + * multiple kms objects (planes/crtcs/etc). 146 + */ 147 + struct dpu_global_state { 148 + struct drm_private_state base; 149 + 150 + uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0]; 151 + uint32_t mixer_to_enc_id[LM_MAX - LM_0]; 152 + uint32_t ctl_to_enc_id[CTL_MAX - CTL_0]; 153 + uint32_t intf_to_enc_id[INTF_MAX - INTF_0]; 154 + }; 155 + 156 + struct dpu_global_state 157 + *dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms); 158 + struct dpu_global_state 159 + *__must_check dpu_kms_get_global_state(struct drm_atomic_state *s); 148 160 149 161 /** 150 162 * Debugfs functions - extra helper functions for debugfs support
+281 -345
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
··· 12 12 #include "dpu_encoder.h" 13 13 #include "dpu_trace.h" 14 14 15 - #define RESERVED_BY_OTHER(h, r) \ 16 - ((h)->enc_id && (h)->enc_id != r) 15 + 16 + static inline bool reserved_by_other(uint32_t *res_map, int idx, 17 + uint32_t enc_id) 18 + { 19 + return res_map[idx] && res_map[idx] != enc_id; 20 + } 17 21 18 22 /** 19 23 * struct dpu_rm_requirements - Reservation requirements parameter bundle ··· 29 25 struct dpu_encoder_hw_resources hw_res; 30 26 }; 31 27 32 - 33 - /** 34 - * struct dpu_rm_hw_blk - hardware block tracking list member 35 - * @list: List head for list of all hardware blocks tracking items 36 - * @id: Hardware ID number, within it's own space, ie. LM_X 37 - * @enc_id: Encoder id to which this blk is binded 38 - * @hw: Pointer to the hardware register access object for this block 39 - */ 40 - struct dpu_rm_hw_blk { 41 - struct list_head list; 42 - uint32_t id; 43 - uint32_t enc_id; 44 - struct dpu_hw_blk *hw; 45 - }; 46 - 47 - void dpu_rm_init_hw_iter( 48 - struct dpu_rm_hw_iter *iter, 49 - uint32_t enc_id, 50 - enum dpu_hw_blk_type type) 51 - { 52 - memset(iter, 0, sizeof(*iter)); 53 - iter->enc_id = enc_id; 54 - iter->type = type; 55 - } 56 - 57 - static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) 58 - { 59 - struct list_head *blk_list; 60 - 61 - if (!rm || !i || i->type >= DPU_HW_BLK_MAX) { 62 - DPU_ERROR("invalid rm\n"); 63 - return false; 64 - } 65 - 66 - i->hw = NULL; 67 - blk_list = &rm->hw_blks[i->type]; 68 - 69 - if (i->blk && (&i->blk->list == blk_list)) { 70 - DPU_DEBUG("attempt resume iteration past last\n"); 71 - return false; 72 - } 73 - 74 - i->blk = list_prepare_entry(i->blk, blk_list, list); 75 - 76 - list_for_each_entry_continue(i->blk, blk_list, list) { 77 - if (i->enc_id == i->blk->enc_id) { 78 - i->hw = i->blk->hw; 79 - DPU_DEBUG("found type %d id %d for enc %d\n", 80 - i->type, i->blk->id, i->enc_id); 81 - return true; 82 - } 83 - } 84 - 85 - DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id); 86 - 87 - return false; 88 - } 89 - 90 - bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) 91 - { 92 - bool ret; 93 - 94 - mutex_lock(&rm->rm_lock); 95 - ret = _dpu_rm_get_hw_locked(rm, i); 96 - mutex_unlock(&rm->rm_lock); 97 - 98 - return ret; 99 - } 100 - 101 - static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw) 102 - { 103 - switch (type) { 104 - case DPU_HW_BLK_LM: 105 - dpu_hw_lm_destroy(hw); 106 - break; 107 - case DPU_HW_BLK_CTL: 108 - dpu_hw_ctl_destroy(hw); 109 - break; 110 - case DPU_HW_BLK_PINGPONG: 111 - dpu_hw_pingpong_destroy(hw); 112 - break; 113 - case DPU_HW_BLK_INTF: 114 - dpu_hw_intf_destroy(hw); 115 - break; 116 - case DPU_HW_BLK_SSPP: 117 - /* SSPPs are not managed by the resource manager */ 118 - case DPU_HW_BLK_TOP: 119 - /* Top is a singleton, not managed in hw_blks list */ 120 - case DPU_HW_BLK_MAX: 121 - default: 122 - DPU_ERROR("unsupported block type %d\n", type); 123 - break; 124 - } 125 - } 126 - 127 28 int dpu_rm_destroy(struct dpu_rm *rm) 128 29 { 129 - struct dpu_rm_hw_blk *hw_cur, *hw_nxt; 130 - enum dpu_hw_blk_type type; 30 + int i; 131 31 132 - for (type = 0; type < DPU_HW_BLK_MAX; type++) { 133 - list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type], 134 - list) { 135 - list_del(&hw_cur->list); 136 - _dpu_rm_hw_destroy(type, hw_cur->hw); 137 - kfree(hw_cur); 32 + for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { 33 + struct dpu_hw_pingpong *hw; 34 + 35 + if (rm->pingpong_blks[i]) { 36 + hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]); 37 + dpu_hw_pingpong_destroy(hw); 138 38 } 139 39 } 40 + for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) { 41 + struct dpu_hw_mixer *hw; 140 42 141 - mutex_destroy(&rm->rm_lock); 142 - 143 - return 0; 144 - } 145 - 146 - static int _dpu_rm_hw_blk_create( 147 - struct dpu_rm *rm, 148 - const struct dpu_mdss_cfg *cat, 149 - void __iomem *mmio, 150 - enum dpu_hw_blk_type type, 151 - uint32_t id, 152 - const void *hw_catalog_info) 153 - { 154 - struct dpu_rm_hw_blk *blk; 155 - void *hw; 156 - 157 - switch (type) { 158 - case DPU_HW_BLK_LM: 159 - hw = dpu_hw_lm_init(id, mmio, cat); 160 - break; 161 - case DPU_HW_BLK_CTL: 162 - hw = dpu_hw_ctl_init(id, mmio, cat); 163 - break; 164 - case DPU_HW_BLK_PINGPONG: 165 - hw = dpu_hw_pingpong_init(id, mmio, cat); 166 - break; 167 - case DPU_HW_BLK_INTF: 168 - hw = dpu_hw_intf_init(id, mmio, cat); 169 - break; 170 - case DPU_HW_BLK_SSPP: 171 - /* SSPPs are not managed by the resource manager */ 172 - case DPU_HW_BLK_TOP: 173 - /* Top is a singleton, not managed in hw_blks list */ 174 - case DPU_HW_BLK_MAX: 175 - default: 176 - DPU_ERROR("unsupported block type %d\n", type); 177 - return -EINVAL; 43 + if (rm->mixer_blks[i]) { 44 + hw = to_dpu_hw_mixer(rm->mixer_blks[i]); 45 + dpu_hw_lm_destroy(hw); 46 + } 178 47 } 48 + for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) { 49 + struct dpu_hw_ctl *hw; 179 50 180 - if (IS_ERR_OR_NULL(hw)) { 181 - DPU_ERROR("failed hw object creation: type %d, err %ld\n", 182 - type, PTR_ERR(hw)); 183 - return -EFAULT; 51 + if (rm->ctl_blks[i]) { 52 + hw = to_dpu_hw_ctl(rm->ctl_blks[i]); 53 + dpu_hw_ctl_destroy(hw); 54 + } 184 55 } 56 + for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) { 57 + struct dpu_hw_intf *hw; 185 58 186 - blk = kzalloc(sizeof(*blk), GFP_KERNEL); 187 - if (!blk) { 188 - _dpu_rm_hw_destroy(type, hw); 189 - return -ENOMEM; 59 + if (rm->intf_blks[i]) { 60 + hw = to_dpu_hw_intf(rm->intf_blks[i]); 61 + dpu_hw_intf_destroy(hw); 62 + } 190 63 } 191 - 192 - blk->id = id; 193 - blk->hw = hw; 194 - blk->enc_id = 0; 195 - list_add_tail(&blk->list, &rm->hw_blks[type]); 196 64 197 65 return 0; 198 66 } ··· 74 198 void __iomem *mmio) 75 199 { 76 200 int rc, i; 77 - enum dpu_hw_blk_type type; 78 201 79 202 if (!rm || !cat || !mmio) { 80 203 DPU_ERROR("invalid kms\n"); ··· 83 208 /* Clear, setup lists */ 84 209 memset(rm, 0, sizeof(*rm)); 85 210 86 - mutex_init(&rm->rm_lock); 87 - 88 - for (type = 0; type < DPU_HW_BLK_MAX; type++) 89 - INIT_LIST_HEAD(&rm->hw_blks[type]); 90 - 91 211 /* Interrogate HW catalog and create tracking items for hw blocks */ 92 212 for (i = 0; i < cat->mixer_count; i++) { 213 + struct dpu_hw_mixer *hw; 93 214 const struct dpu_lm_cfg *lm = &cat->mixer[i]; 94 215 95 216 if (lm->pingpong == PINGPONG_MAX) { ··· 93 222 continue; 94 223 } 95 224 96 - rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM, 97 - cat->mixer[i].id, &cat->mixer[i]); 98 - if (rc) { 99 - DPU_ERROR("failed: lm hw not available\n"); 225 + if (lm->id < LM_0 || lm->id >= LM_MAX) { 226 + DPU_ERROR("skip mixer %d with invalid id\n", lm->id); 227 + continue; 228 + } 229 + hw = dpu_hw_lm_init(lm->id, mmio, cat); 230 + if (IS_ERR_OR_NULL(hw)) { 231 + rc = PTR_ERR(hw); 232 + DPU_ERROR("failed lm object creation: err %d\n", rc); 100 233 goto fail; 101 234 } 235 + rm->mixer_blks[lm->id - LM_0] = &hw->base; 102 236 103 237 if (!rm->lm_max_width) { 104 238 rm->lm_max_width = lm->sblk->maxwidth; ··· 119 243 } 120 244 121 245 for (i = 0; i < cat->pingpong_count; i++) { 122 - rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG, 123 - cat->pingpong[i].id, &cat->pingpong[i]); 124 - if (rc) { 125 - DPU_ERROR("failed: pp hw not available\n"); 246 + struct dpu_hw_pingpong *hw; 247 + const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; 248 + 249 + if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) { 250 + DPU_ERROR("skip pingpong %d with invalid id\n", pp->id); 251 + continue; 252 + } 253 + hw = dpu_hw_pingpong_init(pp->id, mmio, cat); 254 + if (IS_ERR_OR_NULL(hw)) { 255 + rc = PTR_ERR(hw); 256 + DPU_ERROR("failed pingpong object creation: err %d\n", 257 + rc); 126 258 goto fail; 127 259 } 260 + rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base; 128 261 } 129 262 130 263 for (i = 0; i < cat->intf_count; i++) { 131 - if (cat->intf[i].type == INTF_NONE) { 264 + struct dpu_hw_intf *hw; 265 + const struct dpu_intf_cfg *intf = &cat->intf[i]; 266 + 267 + if (intf->type == INTF_NONE) { 132 268 DPU_DEBUG("skip intf %d with type none\n", i); 133 269 continue; 134 270 } 135 - 136 - rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF, 137 - cat->intf[i].id, &cat->intf[i]); 138 - if (rc) { 139 - DPU_ERROR("failed: intf hw not available\n"); 271 + if (intf->id < INTF_0 || intf->id >= INTF_MAX) { 272 + DPU_ERROR("skip intf %d with invalid id\n", intf->id); 273 + continue; 274 + } 275 + hw = dpu_hw_intf_init(intf->id, mmio, cat); 276 + if (IS_ERR_OR_NULL(hw)) { 277 + rc = PTR_ERR(hw); 278 + DPU_ERROR("failed intf object creation: err %d\n", rc); 140 279 goto fail; 141 280 } 281 + rm->intf_blks[intf->id - INTF_0] = &hw->base; 142 282 } 143 283 144 284 for (i = 0; i < cat->ctl_count; i++) { 145 - rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL, 146 - cat->ctl[i].id, &cat->ctl[i]); 147 - if (rc) { 148 - DPU_ERROR("failed: ctl hw not available\n"); 285 + struct dpu_hw_ctl *hw; 286 + const struct dpu_ctl_cfg *ctl = &cat->ctl[i]; 287 + 288 + if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) { 289 + DPU_ERROR("skip ctl %d with invalid id\n", ctl->id); 290 + continue; 291 + } 292 + hw = dpu_hw_ctl_init(ctl->id, mmio, cat); 293 + if (IS_ERR_OR_NULL(hw)) { 294 + rc = PTR_ERR(hw); 295 + DPU_ERROR("failed ctl object creation: err %d\n", rc); 149 296 goto fail; 150 297 } 298 + rm->ctl_blks[ctl->id - CTL_0] = &hw->base; 151 299 } 152 300 153 301 return 0; ··· 179 279 fail: 180 280 dpu_rm_destroy(rm); 181 281 182 - return rc; 282 + return rc ? rc : -EFAULT; 183 283 } 184 284 185 285 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) ··· 188 288 } 189 289 190 290 /** 291 + * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary 292 + * @rm: dpu resource manager handle 293 + * @primary_idx: index of primary mixer in rm->mixer_blks[] 294 + * @peer_idx: index of other mixer in rm->mixer_blks[] 295 + * @Return: true if rm->mixer_blks[peer_idx] is a peer of 296 + * rm->mixer_blks[primary_idx] 297 + */ 298 + static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx, 299 + int peer_idx) 300 + { 301 + const struct dpu_lm_cfg *prim_lm_cfg; 302 + const struct dpu_lm_cfg *peer_cfg; 303 + 304 + prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap; 305 + peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap; 306 + 307 + if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) { 308 + DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id, 309 + peer_cfg->id); 310 + return false; 311 + } 312 + return true; 313 + } 314 + 315 + /** 191 316 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets 192 317 * proposed use case requirements, incl. hardwired dependent blocks like 193 318 * pingpong 194 319 * @rm: dpu resource manager handle 195 320 * @enc_id: encoder id requesting for allocation 196 - * @reqs: proposed use case requirements 197 - * @lm: proposed layer mixer, function checks if lm, and all other hardwired 198 - * blocks connected to the lm (pp) is available and appropriate 199 - * @pp: output parameter, pingpong block attached to the layer mixer. 200 - * NULL if pp was not available, or not matching requirements. 201 - * @primary_lm: if non-null, this function check if lm is compatible primary_lm 202 - * as well as satisfying all other requirements 321 + * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks 322 + * if lm, and all other hardwired blocks connected to the lm (pp) is 323 + * available and appropriate 324 + * @pp_idx: output parameter, index of pingpong block attached to the layer 325 + * mixer in rm->pongpong_blks[]. 203 326 * @Return: true if lm matches all requirements, false otherwise 204 327 */ 205 - static bool _dpu_rm_check_lm_and_get_connected_blks( 206 - struct dpu_rm *rm, 207 - uint32_t enc_id, 208 - struct dpu_rm_requirements *reqs, 209 - struct dpu_rm_hw_blk *lm, 210 - struct dpu_rm_hw_blk **pp, 211 - struct dpu_rm_hw_blk *primary_lm) 328 + static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, 329 + struct dpu_global_state *global_state, 330 + uint32_t enc_id, int lm_idx, int *pp_idx) 212 331 { 213 - const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap; 214 - struct dpu_rm_hw_iter iter; 215 - 216 - *pp = NULL; 217 - 218 - DPU_DEBUG("check lm %d pp %d\n", 219 - lm_cfg->id, lm_cfg->pingpong); 220 - 221 - /* Check if this layer mixer is a peer of the proposed primary LM */ 222 - if (primary_lm) { 223 - const struct dpu_lm_cfg *prim_lm_cfg = 224 - to_dpu_hw_mixer(primary_lm->hw)->cap; 225 - 226 - if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) { 227 - DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id, 228 - prim_lm_cfg->id); 229 - return false; 230 - } 231 - } 332 + const struct dpu_lm_cfg *lm_cfg; 333 + int idx; 232 334 233 335 /* Already reserved? */ 234 - if (RESERVED_BY_OTHER(lm, enc_id)) { 235 - DPU_DEBUG("lm %d already reserved\n", lm_cfg->id); 336 + if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) { 337 + DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0); 236 338 return false; 237 339 } 238 340 239 - dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG); 240 - while (_dpu_rm_get_hw_locked(rm, &iter)) { 241 - if (iter.blk->id == lm_cfg->pingpong) { 242 - *pp = iter.blk; 243 - break; 244 - } 245 - } 246 - 247 - if (!*pp) { 341 + lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap; 342 + idx = lm_cfg->pingpong - PINGPONG_0; 343 + if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) { 248 344 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong); 249 345 return false; 250 346 } 251 347 252 - if (RESERVED_BY_OTHER(*pp, enc_id)) { 253 - DPU_DEBUG("lm %d pp %d already reserved\n", lm->id, 254 - (*pp)->id); 348 + if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) { 349 + DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id, 350 + lm_cfg->pingpong); 255 351 return false; 256 352 } 257 - 353 + *pp_idx = idx; 258 354 return true; 259 355 } 260 356 261 - static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, 357 + static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 358 + struct dpu_global_state *global_state, 359 + uint32_t enc_id, 262 360 struct dpu_rm_requirements *reqs) 263 361 264 362 { 265 - struct dpu_rm_hw_blk *lm[MAX_BLOCKS]; 266 - struct dpu_rm_hw_blk *pp[MAX_BLOCKS]; 267 - struct dpu_rm_hw_iter iter_i, iter_j; 268 - int lm_count = 0; 269 - int i, rc = 0; 363 + int lm_idx[MAX_BLOCKS]; 364 + int pp_idx[MAX_BLOCKS]; 365 + int i, j, lm_count = 0; 270 366 271 367 if (!reqs->topology.num_lm) { 272 368 DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm); ··· 270 374 } 271 375 272 376 /* Find a primary mixer */ 273 - dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM); 274 - while (lm_count != reqs->topology.num_lm && 275 - _dpu_rm_get_hw_locked(rm, &iter_i)) { 276 - memset(&lm, 0, sizeof(lm)); 277 - memset(&pp, 0, sizeof(pp)); 377 + for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && 378 + lm_count < reqs->topology.num_lm; i++) { 379 + if (!rm->mixer_blks[i]) 380 + continue; 278 381 279 382 lm_count = 0; 280 - lm[lm_count] = iter_i.blk; 383 + lm_idx[lm_count] = i; 281 384 282 - if (!_dpu_rm_check_lm_and_get_connected_blks( 283 - rm, enc_id, reqs, lm[lm_count], 284 - &pp[lm_count], NULL)) 385 + if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, 386 + enc_id, i, &pp_idx[lm_count])) { 285 387 continue; 388 + } 286 389 287 390 ++lm_count; 288 391 289 392 /* Valid primary mixer found, find matching peers */ 290 - dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM); 291 - 292 - while (lm_count != reqs->topology.num_lm && 293 - _dpu_rm_get_hw_locked(rm, &iter_j)) { 294 - if (iter_i.blk == iter_j.blk) 393 + for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) && 394 + lm_count < reqs->topology.num_lm; j++) { 395 + if (!rm->mixer_blks[j]) 295 396 continue; 296 397 297 - if (!_dpu_rm_check_lm_and_get_connected_blks( 298 - rm, enc_id, reqs, iter_j.blk, 299 - &pp[lm_count], iter_i.blk)) 398 + if (!_dpu_rm_check_lm_peer(rm, i, j)) { 399 + DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j, 400 + LM_0 + i); 300 401 continue; 402 + } 301 403 302 - lm[lm_count] = iter_j.blk; 404 + if (!_dpu_rm_check_lm_and_get_connected_blks(rm, 405 + global_state, enc_id, j, 406 + &pp_idx[lm_count])) { 407 + continue; 408 + } 409 + 410 + lm_idx[lm_count] = j; 303 411 ++lm_count; 304 412 } 305 413 } ··· 313 413 return -ENAVAIL; 314 414 } 315 415 316 - for (i = 0; i < ARRAY_SIZE(lm); i++) { 317 - if (!lm[i]) 318 - break; 416 + for (i = 0; i < lm_count; i++) { 417 + global_state->mixer_to_enc_id[lm_idx[i]] = enc_id; 418 + global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id; 319 419 320 - lm[i]->enc_id = enc_id; 321 - pp[i]->enc_id = enc_id; 322 - 323 - trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id); 420 + trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id, 421 + pp_idx[i] + PINGPONG_0); 324 422 } 325 423 326 - return rc; 424 + return 0; 327 425 } 328 426 329 427 static int _dpu_rm_reserve_ctls( 330 428 struct dpu_rm *rm, 429 + struct dpu_global_state *global_state, 331 430 uint32_t enc_id, 332 431 const struct msm_display_topology *top) 333 432 { 334 - struct dpu_rm_hw_blk *ctls[MAX_BLOCKS]; 335 - struct dpu_rm_hw_iter iter; 336 - int i = 0, num_ctls = 0; 337 - bool needs_split_display = false; 338 - 339 - memset(&ctls, 0, sizeof(ctls)); 433 + int ctl_idx[MAX_BLOCKS]; 434 + int i = 0, j, num_ctls; 435 + bool needs_split_display; 340 436 341 437 /* each hw_intf needs its own hw_ctrl to program its control path */ 342 438 num_ctls = top->num_intf; 343 439 344 440 needs_split_display = _dpu_rm_needs_split_display(top); 345 441 346 - dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL); 347 - while (_dpu_rm_get_hw_locked(rm, &iter)) { 348 - const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw); 349 - unsigned long features = ctl->caps->features; 442 + for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) { 443 + const struct dpu_hw_ctl *ctl; 444 + unsigned long features; 350 445 bool has_split_display; 351 446 352 - if (RESERVED_BY_OTHER(iter.blk, enc_id)) 447 + if (!rm->ctl_blks[j]) 448 + continue; 449 + if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id)) 353 450 continue; 354 451 452 + ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); 453 + features = ctl->caps->features; 355 454 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; 356 455 357 - DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features); 456 + DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features); 358 457 359 458 if (needs_split_display != has_split_display) 360 459 continue; 361 460 362 - ctls[i] = iter.blk; 363 - DPU_DEBUG("ctl %d match\n", iter.blk->id); 461 + ctl_idx[i] = j; 462 + DPU_DEBUG("ctl %d match\n", j + CTL_0); 364 463 365 464 if (++i == num_ctls) 366 465 break; 466 + 367 467 } 368 468 369 469 if (i != num_ctls) 370 470 return -ENAVAIL; 371 471 372 - for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) { 373 - ctls[i]->enc_id = enc_id; 374 - trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id); 472 + for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { 473 + global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id; 474 + trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id); 375 475 } 376 476 377 477 return 0; ··· 379 479 380 480 static int _dpu_rm_reserve_intf( 381 481 struct dpu_rm *rm, 482 + struct dpu_global_state *global_state, 382 483 uint32_t enc_id, 383 - uint32_t id, 384 - enum dpu_hw_blk_type type) 484 + uint32_t id) 385 485 { 386 - struct dpu_rm_hw_iter iter; 387 - int ret = 0; 486 + int idx = id - INTF_0; 388 487 389 - /* Find the block entry in the rm, and note the reservation */ 390 - dpu_rm_init_hw_iter(&iter, 0, type); 391 - while (_dpu_rm_get_hw_locked(rm, &iter)) { 392 - if (iter.blk->id != id) 393 - continue; 394 - 395 - if (RESERVED_BY_OTHER(iter.blk, enc_id)) { 396 - DPU_ERROR("type %d id %d already reserved\n", type, id); 397 - return -ENAVAIL; 398 - } 399 - 400 - iter.blk->enc_id = enc_id; 401 - trace_dpu_rm_reserve_intf(iter.blk->id, enc_id); 402 - break; 403 - } 404 - 405 - /* Shouldn't happen since intfs are fixed at probe */ 406 - if (!iter.hw) { 407 - DPU_ERROR("couldn't find type %d id %d\n", type, id); 488 + if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) { 489 + DPU_ERROR("invalid intf id: %d", id); 408 490 return -EINVAL; 409 491 } 410 492 411 - return ret; 493 + if (!rm->intf_blks[idx]) { 494 + DPU_ERROR("couldn't find intf id %d\n", id); 495 + return -EINVAL; 496 + } 497 + 498 + if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) { 499 + DPU_ERROR("intf id %d already reserved\n", id); 500 + return -ENAVAIL; 501 + } 502 + 503 + global_state->intf_to_enc_id[idx] = enc_id; 504 + return 0; 412 505 } 413 506 414 507 static int _dpu_rm_reserve_intf_related_hw( 415 508 struct dpu_rm *rm, 509 + struct dpu_global_state *global_state, 416 510 uint32_t enc_id, 417 511 struct dpu_encoder_hw_resources *hw_res) 418 512 { ··· 417 523 if (hw_res->intfs[i] == INTF_MODE_NONE) 418 524 continue; 419 525 id = i + INTF_0; 420 - ret = _dpu_rm_reserve_intf(rm, enc_id, id, 421 - DPU_HW_BLK_INTF); 526 + ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id); 422 527 if (ret) 423 528 return ret; 424 529 } ··· 427 534 428 535 static int _dpu_rm_make_reservation( 429 536 struct dpu_rm *rm, 537 + struct dpu_global_state *global_state, 430 538 struct drm_encoder *enc, 431 - struct drm_crtc_state *crtc_state, 432 539 struct dpu_rm_requirements *reqs) 433 540 { 434 541 int ret; 435 542 436 - ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs); 543 + ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs); 437 544 if (ret) { 438 545 DPU_ERROR("unable to find appropriate mixers\n"); 439 546 return ret; 440 547 } 441 548 442 - ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology); 549 + ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id, 550 + &reqs->topology); 443 551 if (ret) { 444 552 DPU_ERROR("unable to find appropriate CTL\n"); 445 553 return ret; 446 554 } 447 555 448 - ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res); 556 + ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id, 557 + &reqs->hw_res); 449 558 if (ret) 450 559 return ret; 451 560 ··· 455 560 } 456 561 457 562 static int _dpu_rm_populate_requirements( 458 - struct dpu_rm *rm, 459 563 struct drm_encoder *enc, 460 - struct drm_crtc_state *crtc_state, 461 564 struct dpu_rm_requirements *reqs, 462 565 struct msm_display_topology req_topology) 463 566 { ··· 470 577 return 0; 471 578 } 472 579 473 - static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id) 580 + static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, 581 + uint32_t enc_id) 474 582 { 475 - struct dpu_rm_hw_blk *blk; 476 - enum dpu_hw_blk_type type; 583 + int i; 477 584 478 - for (type = 0; type < DPU_HW_BLK_MAX; type++) { 479 - list_for_each_entry(blk, &rm->hw_blks[type], list) { 480 - if (blk->enc_id == enc_id) { 481 - blk->enc_id = 0; 482 - DPU_DEBUG("rel enc %d %d %d\n", enc_id, 483 - type, blk->id); 484 - } 485 - } 585 + for (i = 0; i < cnt; i++) { 586 + if (res_mapping[i] == enc_id) 587 + res_mapping[i] = 0; 486 588 } 487 589 } 488 590 489 - void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc) 591 + void dpu_rm_release(struct dpu_global_state *global_state, 592 + struct drm_encoder *enc) 490 593 { 491 - mutex_lock(&rm->rm_lock); 492 - 493 - _dpu_rm_release_reservation(rm, enc->base.id); 494 - 495 - mutex_unlock(&rm->rm_lock); 594 + _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id, 595 + ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id); 596 + _dpu_rm_clear_mapping(global_state->mixer_to_enc_id, 597 + ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id); 598 + _dpu_rm_clear_mapping(global_state->ctl_to_enc_id, 599 + ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); 600 + _dpu_rm_clear_mapping(global_state->intf_to_enc_id, 601 + ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id); 496 602 } 497 603 498 604 int dpu_rm_reserve( 499 605 struct dpu_rm *rm, 606 + struct dpu_global_state *global_state, 500 607 struct drm_encoder *enc, 501 608 struct drm_crtc_state *crtc_state, 502 - struct msm_display_topology topology, 503 - bool test_only) 609 + struct msm_display_topology topology) 504 610 { 505 611 struct dpu_rm_requirements reqs; 506 612 int ret; ··· 508 616 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 509 617 return 0; 510 618 511 - DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n", 512 - enc->base.id, crtc_state->crtc->base.id, test_only); 619 + if (IS_ERR(global_state)) { 620 + DPU_ERROR("failed to global state\n"); 621 + return PTR_ERR(global_state); 622 + } 513 623 514 - mutex_lock(&rm->rm_lock); 624 + DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n", 625 + enc->base.id, crtc_state->crtc->base.id); 515 626 516 - ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs, 517 - topology); 627 + ret = _dpu_rm_populate_requirements(enc, &reqs, topology); 518 628 if (ret) { 519 629 DPU_ERROR("failed to populate hw requirements\n"); 520 - goto end; 630 + return ret; 521 631 } 522 632 523 - ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs); 524 - if (ret) { 633 + ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs); 634 + if (ret) 525 635 DPU_ERROR("failed to reserve hw resources: %d\n", ret); 526 - _dpu_rm_release_reservation(rm, enc->base.id); 527 - } else if (test_only) { 528 - /* test_only: test the reservation and then undo */ 529 - DPU_DEBUG("test_only: discard test [enc: %d]\n", 530 - enc->base.id); 531 - _dpu_rm_release_reservation(rm, enc->base.id); 532 - } 533 636 534 - end: 535 - mutex_unlock(&rm->rm_lock); 637 + 536 638 537 639 return ret; 640 + } 641 + 642 + int dpu_rm_get_assigned_resources(struct dpu_rm *rm, 643 + struct dpu_global_state *global_state, uint32_t enc_id, 644 + enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) 645 + { 646 + struct dpu_hw_blk **hw_blks; 647 + uint32_t *hw_to_enc_id; 648 + int i, num_blks, max_blks; 649 + 650 + switch (type) { 651 + case DPU_HW_BLK_PINGPONG: 652 + hw_blks = rm->pingpong_blks; 653 + hw_to_enc_id = global_state->pingpong_to_enc_id; 654 + max_blks = ARRAY_SIZE(rm->pingpong_blks); 655 + break; 656 + case DPU_HW_BLK_LM: 657 + hw_blks = rm->mixer_blks; 658 + hw_to_enc_id = global_state->mixer_to_enc_id; 659 + max_blks = ARRAY_SIZE(rm->mixer_blks); 660 + break; 661 + case DPU_HW_BLK_CTL: 662 + hw_blks = rm->ctl_blks; 663 + hw_to_enc_id = global_state->ctl_to_enc_id; 664 + max_blks = ARRAY_SIZE(rm->ctl_blks); 665 + break; 666 + case DPU_HW_BLK_INTF: 667 + hw_blks = rm->intf_blks; 668 + hw_to_enc_id = global_state->intf_to_enc_id; 669 + max_blks = ARRAY_SIZE(rm->intf_blks); 670 + break; 671 + default: 672 + DPU_ERROR("blk type %d not managed by rm\n", type); 673 + return 0; 674 + } 675 + 676 + num_blks = 0; 677 + for (i = 0; i < max_blks; i++) { 678 + if (hw_to_enc_id[i] != enc_id) 679 + continue; 680 + 681 + if (num_blks == blks_size) { 682 + DPU_ERROR("More than %d resources assigned to enc %d\n", 683 + blks_size, enc_id); 684 + break; 685 + } 686 + blks[num_blks++] = hw_blks[i]; 687 + } 688 + 689 + return num_blks; 538 690 }
+20 -51
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
··· 11 11 #include "msm_kms.h" 12 12 #include "dpu_hw_top.h" 13 13 14 + struct dpu_global_state; 15 + 14 16 /** 15 17 * struct dpu_rm - DPU dynamic hardware resource manager 16 - * @hw_blks: array of lists of hardware resources present in the system, one 17 - * list per type of hardware block 18 + * @pingpong_blks: array of pingpong hardware resources 19 + * @mixer_blks: array of layer mixer hardware resources 20 + * @ctl_blks: array of ctl hardware resources 21 + * @intf_blks: array of intf hardware resources 18 22 * @lm_max_width: cached layer mixer maximum width 19 23 * @rm_lock: resource manager mutex 20 24 */ 21 25 struct dpu_rm { 22 - struct list_head hw_blks[DPU_HW_BLK_MAX]; 26 + struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0]; 27 + struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0]; 28 + struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0]; 29 + struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0]; 30 + 23 31 uint32_t lm_max_width; 24 - struct mutex rm_lock; 25 - }; 26 - 27 - /** 28 - * struct dpu_rm_hw_blk - resource manager internal structure 29 - * forward declaration for single iterator definition without void pointer 30 - */ 31 - struct dpu_rm_hw_blk; 32 - 33 - /** 34 - * struct dpu_rm_hw_iter - iterator for use with dpu_rm 35 - * @hw: dpu_hw object requested, or NULL on failure 36 - * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator. 37 - * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder 38 - * @type: Hardware Block Type client wishes to search for. 39 - */ 40 - struct dpu_rm_hw_iter { 41 - void *hw; 42 - struct dpu_rm_hw_blk *blk; 43 - uint32_t enc_id; 44 - enum dpu_hw_blk_type type; 45 32 }; 46 33 47 34 /** ··· 61 74 * @drm_enc: DRM Encoder handle 62 75 * @crtc_state: Proposed Atomic DRM CRTC State handle 63 76 * @topology: Pointer to topology info for the display 64 - * @test_only: Atomic-Test phase, discard results (unless property overrides) 65 77 * @Return: 0 on Success otherwise -ERROR 66 78 */ 67 79 int dpu_rm_reserve(struct dpu_rm *rm, 80 + struct dpu_global_state *global_state, 68 81 struct drm_encoder *drm_enc, 69 82 struct drm_crtc_state *crtc_state, 70 - struct msm_display_topology topology, 71 - bool test_only); 83 + struct msm_display_topology topology); 72 84 73 85 /** 74 86 * dpu_rm_reserve - Given the encoder for the display chain, release any ··· 76 90 * @enc: DRM Encoder handle 77 91 * @Return: 0 on Success otherwise -ERROR 78 92 */ 79 - void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc); 93 + void dpu_rm_release(struct dpu_global_state *global_state, 94 + struct drm_encoder *enc); 80 95 81 96 /** 82 - * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list 83 - * using dpu_rm_get_hw 84 - * @iter: iter object to initialize 85 - * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder 86 - * @type: Hardware Block Type client wishes to search for. 97 + * Get hw resources of the given type that are assigned to this encoder. 87 98 */ 88 - void dpu_rm_init_hw_iter( 89 - struct dpu_rm_hw_iter *iter, 90 - uint32_t enc_id, 91 - enum dpu_hw_blk_type type); 92 - /** 93 - * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type 94 - * Meant to do a single pass through the hardware list to iteratively 95 - * retrieve hardware blocks of a given type for a given encoder. 96 - * Initialize an iterator object. 97 - * Set hw block type of interest. Set encoder id of interest, 0 for any. 98 - * Function returns first hw of type for that encoder. 99 - * Subsequent calls will return the next reserved hw of that type in-order. 100 - * Iterator HW pointer will be null on failure to find hw. 101 - * @rm: DPU Resource Manager handle 102 - * @iter: iterator object 103 - * @Return: true on match found, false on no match found 104 - */ 105 - bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter); 99 + int dpu_rm_get_assigned_resources(struct dpu_rm *rm, 100 + struct dpu_global_state *global_state, uint32_t enc_id, 101 + enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size); 106 102 #endif /* __DPU_RM_H__ */ 103 +
+3 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
··· 24 24 int rc; 25 25 26 26 if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) { 27 - DPU_ERROR("invalid arguments vbif %d\n", vbif != 0); 27 + DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL); 28 28 return -EINVAL; 29 29 } 30 30 ··· 106 106 u32 val; 107 107 108 108 if (!vbif || !vbif->cap) { 109 - DPU_ERROR("invalid arguments vbif %d\n", vbif != 0); 109 + DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL); 110 110 return -EINVAL; 111 111 } 112 112 ··· 164 164 165 165 if (!vbif || !mdp) { 166 166 DPU_DEBUG("invalid arguments vbif %d mdp %d\n", 167 - vbif != 0, mdp != 0); 167 + vbif != NULL, mdp != NULL); 168 168 return; 169 169 } 170 170
-4
drivers/gpu/drm/msm/edp/edp.c
··· 178 178 goto fail; 179 179 } 180 180 181 - ret = drm_bridge_attach(encoder, edp->bridge, NULL, 0); 182 - if (ret) 183 - goto fail; 184 - 185 181 priv->bridges[priv->num_bridges++] = edp->bridge; 186 182 priv->connectors[priv->num_connectors++] = edp->connector; 187 183
-4
drivers/gpu/drm/msm/hdmi/hdmi.c
··· 327 327 goto fail; 328 328 } 329 329 330 - ret = drm_bridge_attach(encoder, hdmi->bridge, NULL, 0); 331 - if (ret) 332 - goto fail; 333 - 334 330 priv->bridges[priv->num_bridges++] = hdmi->bridge; 335 331 priv->connectors[priv->num_connectors++] = hdmi->connector; 336 332
+4 -2
drivers/gpu/drm/msm/msm_drv.c
··· 444 444 if (!dev->dma_parms) { 445 445 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), 446 446 GFP_KERNEL); 447 - if (!dev->dma_parms) 448 - return -ENOMEM; 447 + if (!dev->dma_parms) { 448 + ret = -ENOMEM; 449 + goto err_msm_uninit; 450 + } 449 451 } 450 452 dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); 451 453
+11 -1
drivers/gpu/drm/msm/msm_gem.h
··· 157 157 uint32_t handle; 158 158 }; 159 159 uint64_t iova; 160 - } bos[0]; 160 + } bos[]; 161 161 }; 162 + 163 + /* helper to determine of a buffer in submit should be dumped, used for both 164 + * devcoredump and debugfs cmdstream dumping: 165 + */ 166 + static inline bool 167 + should_dump(struct msm_gem_submit *submit, int idx) 168 + { 169 + extern bool rd_full; 170 + return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); 171 + } 162 172 163 173 #endif /* __MSM_GEM_H__ */
+22 -4
drivers/gpu/drm/msm/msm_gpu.c
··· 355 355 state->cmd = kstrdup(cmd, GFP_KERNEL); 356 356 357 357 if (submit) { 358 - int i; 358 + int i, nr = 0; 359 359 360 - state->bos = kcalloc(submit->nr_cmds, 360 + /* count # of buffers to dump: */ 361 + for (i = 0; i < submit->nr_bos; i++) 362 + if (should_dump(submit, i)) 363 + nr++; 364 + /* always dump cmd bo's, but don't double count them: */ 365 + for (i = 0; i < submit->nr_cmds; i++) 366 + if (!should_dump(submit, submit->cmd[i].idx)) 367 + nr++; 368 + 369 + state->bos = kcalloc(nr, 361 370 sizeof(struct msm_gpu_state_bo), GFP_KERNEL); 371 + 372 + for (i = 0; i < submit->nr_bos; i++) { 373 + if (should_dump(submit, i)) { 374 + msm_gpu_crashstate_get_bo(state, submit->bos[i].obj, 375 + submit->bos[i].iova, submit->bos[i].flags); 376 + } 377 + } 362 378 363 379 for (i = 0; state->bos && i < submit->nr_cmds; i++) { 364 380 int idx = submit->cmd[i].idx; 365 381 366 - msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj, 367 - submit->bos[idx].iova, submit->bos[idx].flags); 382 + if (!should_dump(submit, submit->cmd[i].idx)) { 383 + msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj, 384 + submit->bos[idx].iova, submit->bos[idx].flags); 385 + } 368 386 } 369 387 } 370 388
+1 -7
drivers/gpu/drm/msm/msm_rd.c
··· 43 43 #include "msm_gpu.h" 44 44 #include "msm_gem.h" 45 45 46 - static bool rd_full = false; 46 + bool rd_full = false; 47 47 MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents"); 48 48 module_param_named(rd_full, rd_full, bool, 0600); 49 49 ··· 334 334 rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); 335 335 336 336 msm_gem_put_vaddr(&obj->base); 337 - } 338 - 339 - static bool 340 - should_dump(struct msm_gem_submit *submit, int idx) 341 - { 342 - return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); 343 337 } 344 338 345 339 /* called under struct_mutex */