Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-msm-next-2019-11-05' of https://gitlab.freedesktop.org/drm/msm into drm-next

+ OCMEM support to enable the couple generations that had shared OCMEM
rather than GMEM exclusively for the GPU (late a3xx and I think basically
all of a4xx). Bjorn and Brian decided to land this through the drm
tree to avoid having to coordinate merge requests.
+ a510 support, and various associated display support
+ the usual misc cleanups and fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGv-JWswEJRxe5AmnGQO1SZnpxK05kO1E29K6UUzC9GMMw@mail.gmail.com

+1235 -307
+51
Documentation/devicetree/bindings/display/msm/gmu.txt
··· 31 31 - iommus: phandle to the adreno iommu 32 32 - operating-points-v2: phandle to the OPP operating points 33 33 34 + Optional properties: 35 + - sram: phandle to the On Chip Memory (OCMEM) that's present on some Snapdragon 36 + SoCs. See Documentation/devicetree/bindings/sram/qcom,ocmem.yaml. 37 + 34 38 Example: 35 39 36 40 / { ··· 65 61 iommus = <&adreno_smmu 5>; 66 62 67 63 operating-points-v2 = <&gmu_opp_table>; 64 + }; 65 + }; 66 + 67 + a3xx example with OCMEM support: 68 + 69 + / { 70 + ... 71 + 72 + gpu: adreno@fdb00000 { 73 + compatible = "qcom,adreno-330.2", 74 + "qcom,adreno"; 75 + reg = <0xfdb00000 0x10000>; 76 + reg-names = "kgsl_3d0_reg_memory"; 77 + interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; 78 + interrupt-names = "kgsl_3d0_irq"; 79 + clock-names = "core", 80 + "iface", 81 + "mem_iface"; 82 + clocks = <&mmcc OXILI_GFX3D_CLK>, 83 + <&mmcc OXILICX_AHB_CLK>, 84 + <&mmcc OXILICX_AXI_CLK>; 85 + sram = <&gmu_sram>; 86 + power-domains = <&mmcc OXILICX_GDSC>; 87 + operating-points-v2 = <&gpu_opp_table>; 88 + iommus = <&gpu_iommu 0>; 89 + }; 90 + 91 + ocmem@fdd00000 { 92 + compatible = "qcom,msm8974-ocmem"; 93 + 94 + reg = <0xfdd00000 0x2000>, 95 + <0xfec00000 0x180000>; 96 + reg-names = "ctrl", 97 + "mem"; 98 + 99 + clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>, 100 + <&mmcc OCMEMCX_OCMEMNOC_CLK>; 101 + clock-names = "core", 102 + "iface"; 103 + 104 + #address-cells = <1>; 105 + #size-cells = <1>; 106 + 107 + gmu_sram: gmu-sram@0 { 108 + reg = <0x0 0x100000>; 109 + ranges = <0 0 0xfec00000 0x100000>; 110 + }; 68 111 }; 69 112 };
+2
Documentation/devicetree/bindings/display/msm/mdp5.txt
··· 76 76 Optional properties: 77 77 - clock-names: the following clocks are optional: 78 78 * "lut" 79 + * "tbu" 80 + * "tbu_rt" 79 81 80 82 Example: 81 83
+96
Documentation/devicetree/bindings/sram/qcom,ocmem.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/sram/qcom,ocmem.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: On Chip Memory (OCMEM) that is present on some Qualcomm Snapdragon SoCs. 8 + 9 + maintainers: 10 + - Brian Masney <masneyb@onstation.org> 11 + 12 + description: | 13 + The On Chip Memory (OCMEM) is typically used by the GPU, camera/video, and 14 + audio components on some Snapdragon SoCs. 15 + 16 + properties: 17 + compatible: 18 + const: qcom,msm8974-ocmem 19 + 20 + reg: 21 + items: 22 + - description: Control registers 23 + - description: OCMEM address range 24 + 25 + reg-names: 26 + items: 27 + - const: ctrl 28 + - const: mem 29 + 30 + clocks: 31 + items: 32 + - description: Core clock 33 + - description: Interface clock 34 + 35 + clock-names: 36 + items: 37 + - const: core 38 + - const: iface 39 + 40 + '#address-cells': 41 + const: 1 42 + 43 + '#size-cells': 44 + const: 1 45 + 46 + required: 47 + - compatible 48 + - reg 49 + - reg-names 50 + - clocks 51 + - clock-names 52 + - '#address-cells' 53 + - '#size-cells' 54 + 55 + patternProperties: 56 + "^.+-sram$": 57 + type: object 58 + description: A region of reserved memory. 59 + 60 + properties: 61 + reg: 62 + maxItems: 1 63 + 64 + ranges: 65 + maxItems: 1 66 + 67 + required: 68 + - reg 69 + - ranges 70 + 71 + examples: 72 + - | 73 + #include <dt-bindings/clock/qcom,rpmcc.h> 74 + #include <dt-bindings/clock/qcom,mmcc-msm8974.h> 75 + 76 + ocmem: ocmem@fdd00000 { 77 + compatible = "qcom,msm8974-ocmem"; 78 + 79 + reg = <0xfdd00000 0x2000>, 80 + <0xfec00000 0x180000>; 81 + reg-names = "ctrl", 82 + "mem"; 83 + 84 + clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>, 85 + <&mmcc OCMEMCX_OCMEMNOC_CLK>; 86 + clock-names = "core", 87 + "iface"; 88 + 89 + #address-cells = <1>; 90 + #size-cells = <1>; 91 + 92 + gmu-sram@0 { 93 + reg = <0x0 0x100000>; 94 + ranges = <0 0 0xfec00000 0x100000>; 95 + }; 96 + };
+51 -1
drivers/firmware/qcom_scm-32.c
··· 442 442 req, req_cnt * sizeof(*req), resp, sizeof(*resp)); 443 443 } 444 444 445 + int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, u32 size, 446 + u32 mode) 447 + { 448 + struct ocmem_tz_lock { 449 + __le32 id; 450 + __le32 offset; 451 + __le32 size; 452 + __le32 mode; 453 + } request; 454 + 455 + request.id = cpu_to_le32(id); 456 + request.offset = cpu_to_le32(offset); 457 + request.size = cpu_to_le32(size); 458 + request.mode = cpu_to_le32(mode); 459 + 460 + return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_LOCK_CMD, 461 + &request, sizeof(request), NULL, 0); 462 + } 463 + 464 + int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, u32 size) 465 + { 466 + struct ocmem_tz_unlock { 467 + __le32 id; 468 + __le32 offset; 469 + __le32 size; 470 + } request; 471 + 472 + request.id = cpu_to_le32(id); 473 + request.offset = cpu_to_le32(offset); 474 + request.size = cpu_to_le32(size); 475 + 476 + return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_UNLOCK_CMD, 477 + &request, sizeof(request), NULL, 0); 478 + } 479 + 445 480 void __qcom_scm_init(void) 446 481 { 447 482 } ··· 617 582 int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, 618 583 u32 spare) 619 584 { 620 - return -ENODEV; 585 + struct msm_scm_sec_cfg { 586 + __le32 id; 587 + __le32 ctx_bank_num; 588 + } cfg; 589 + int ret, scm_ret = 0; 590 + 591 + cfg.id = cpu_to_le32(device_id); 592 + cfg.ctx_bank_num = cpu_to_le32(spare); 593 + 594 + ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG, 595 + &cfg, sizeof(cfg), &scm_ret, sizeof(scm_ret)); 596 + 597 + if (ret || scm_ret) 598 + return ret ? ret : -EINVAL; 599 + 600 + return 0; 621 601 } 622 602 623 603 int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
+12
drivers/firmware/qcom_scm-64.c
··· 241 241 return ret; 242 242 } 243 243 244 + int __qcom_scm_ocmem_lock(struct device *dev, uint32_t id, uint32_t offset, 245 + uint32_t size, uint32_t mode) 246 + { 247 + return -ENOTSUPP; 248 + } 249 + 250 + int __qcom_scm_ocmem_unlock(struct device *dev, uint32_t id, uint32_t offset, 251 + uint32_t size) 252 + { 253 + return -ENOTSUPP; 254 + } 255 + 244 256 void __qcom_scm_init(void) 245 257 { 246 258 u64 cmd;
+53
drivers/firmware/qcom_scm.c
··· 192 192 EXPORT_SYMBOL(qcom_scm_pas_supported); 193 193 194 194 /** 195 + * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 196 + */ 197 + bool qcom_scm_ocmem_lock_available(void) 198 + { 199 + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_OCMEM_SVC, 200 + QCOM_SCM_OCMEM_LOCK_CMD); 201 + } 202 + EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); 203 + 204 + /** 205 + * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 206 + * region to the specified initiator 207 + * 208 + * @id: tz initiator id 209 + * @offset: OCMEM offset 210 + * @size: OCMEM size 211 + * @mode: access mode (WIDE/NARROW) 212 + */ 213 + int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 214 + u32 mode) 215 + { 216 + return __qcom_scm_ocmem_lock(__scm->dev, id, offset, size, mode); 217 + } 218 + EXPORT_SYMBOL(qcom_scm_ocmem_lock); 219 + 220 + /** 221 + * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 222 + * region from the specified initiator 223 + * 224 + * @id: tz initiator id 225 + * @offset: OCMEM offset 226 + * @size: OCMEM size 227 + */ 228 + int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 229 + { 230 + return __qcom_scm_ocmem_unlock(__scm->dev, id, offset, size); 231 + } 232 + EXPORT_SYMBOL(qcom_scm_ocmem_unlock); 233 + 234 + /** 195 235 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 196 236 * state machine for a given peripheral, using the 197 237 * metadata ··· 366 326 .assert = qcom_scm_pas_reset_assert, 367 327 .deassert = qcom_scm_pas_reset_deassert, 368 328 }; 329 + 330 + /** 331 + * qcom_scm_restore_sec_cfg_available() - Check if secure environment 332 + * supports restore security config interface. 333 + * 334 + * Return true if restore-cfg interface is supported, false if not. 335 + */ 336 + bool qcom_scm_restore_sec_cfg_available(void) 337 + { 338 + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 339 + QCOM_SCM_RESTORE_SEC_CFG); 340 + } 341 + EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available); 369 342 370 343 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 371 344 {
+9
drivers/firmware/qcom_scm.h
··· 42 42 43 43 extern void __qcom_scm_init(void); 44 44 45 + #define QCOM_SCM_OCMEM_SVC 0xf 46 + #define QCOM_SCM_OCMEM_LOCK_CMD 0x1 47 + #define QCOM_SCM_OCMEM_UNLOCK_CMD 0x2 48 + 49 + extern int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, 50 + u32 size, u32 mode); 51 + extern int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, 52 + u32 size); 53 + 45 54 #define QCOM_SCM_SVC_PIL 0x2 46 55 #define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1 47 56 #define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2
+1
drivers/gpu/drm/msm/Kconfig
··· 7 7 depends on OF && COMMON_CLK 8 8 depends on MMU 9 9 depends on INTERCONNECT || !INTERCONNECT 10 + depends on QCOM_OCMEM || QCOM_OCMEM=n 10 11 select QCOM_MDT_LOADER if ARCH_QCOM 11 12 select REGULATOR 12 13 select DRM_KMS_HELPER
+7 -21
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
··· 6 6 * Copyright (c) 2014 The Linux Foundation. All rights reserved. 7 7 */ 8 8 9 - #ifdef CONFIG_MSM_OCMEM 10 - # include <mach/ocmem.h> 11 - #endif 12 - 13 9 #include "a3xx_gpu.h" 14 10 15 11 #define A3XX_INT0_MASK \ ··· 191 195 gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000); 192 196 193 197 /* Set the OCMEM base address for A330, etc */ 194 - if (a3xx_gpu->ocmem_hdl) { 198 + if (a3xx_gpu->ocmem.hdl) { 195 199 gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, 196 - (unsigned int)(a3xx_gpu->ocmem_base >> 14)); 200 + (unsigned int)(a3xx_gpu->ocmem.base >> 14)); 197 201 } 198 202 199 203 /* Turn on performance counters: */ ··· 314 318 315 319 adreno_gpu_cleanup(adreno_gpu); 316 320 317 - #ifdef CONFIG_MSM_OCMEM 318 - if (a3xx_gpu->ocmem_base) 319 - ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl); 320 - #endif 321 + adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem); 321 322 322 323 kfree(a3xx_gpu); 323 324 } ··· 487 494 488 495 /* if needed, allocate gmem: */ 489 496 if (adreno_is_a330(adreno_gpu)) { 490 - #ifdef CONFIG_MSM_OCMEM 491 - /* TODO this is different/missing upstream: */ 492 - struct ocmem_buf *ocmem_hdl = 493 - ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem); 494 - 495 - a3xx_gpu->ocmem_hdl = ocmem_hdl; 496 - a3xx_gpu->ocmem_base = ocmem_hdl->addr; 497 - adreno_gpu->gmem = ocmem_hdl->len; 498 - DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024, 499 - a3xx_gpu->ocmem_base); 500 - #endif 497 + ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev, 498 + adreno_gpu, &a3xx_gpu->ocmem); 499 + if (ret) 500 + goto fail; 501 501 } 502 502 503 503 if (!gpu->aspace) {
+1 -2
drivers/gpu/drm/msm/adreno/a3xx_gpu.h
··· 19 19 struct adreno_gpu base; 20 20 21 21 /* if OCMEM is used for GMEM: */ 22 - uint32_t ocmem_base; 23 - void *ocmem_hdl; 22 + struct adreno_ocmem ocmem; 24 23 }; 25 24 #define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base) 26 25
+6 -19
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
··· 2 2 /* Copyright (c) 2014 The Linux Foundation. All rights reserved. 3 3 */ 4 4 #include "a4xx_gpu.h" 5 - #ifdef CONFIG_MSM_OCMEM 6 - # include <soc/qcom/ocmem.h> 7 - #endif 8 5 9 6 #define A4XX_INT0_MASK \ 10 7 (A4XX_INT0_RBBM_AHB_ERROR | \ ··· 185 188 (1 << 30) | 0xFFFF); 186 189 187 190 gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR, 188 - (unsigned int)(a4xx_gpu->ocmem_base >> 14)); 191 + (unsigned int)(a4xx_gpu->ocmem.base >> 14)); 189 192 190 193 /* Turn on performance counters: */ 191 194 gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01); ··· 315 318 316 319 adreno_gpu_cleanup(adreno_gpu); 317 320 318 - #ifdef CONFIG_MSM_OCMEM 319 - if (a4xx_gpu->ocmem_base) 320 - ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl); 321 - #endif 321 + adreno_gpu_ocmem_cleanup(&a4xx_gpu->ocmem); 322 322 323 323 kfree(a4xx_gpu); 324 324 } ··· 572 578 573 579 /* if needed, allocate gmem: */ 574 580 if (adreno_is_a4xx(adreno_gpu)) { 575 - #ifdef CONFIG_MSM_OCMEM 576 - /* TODO this is different/missing upstream: */ 577 - struct ocmem_buf *ocmem_hdl = 578 - ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem); 579 - 580 - a4xx_gpu->ocmem_hdl = ocmem_hdl; 581 - a4xx_gpu->ocmem_base = ocmem_hdl->addr; 582 - adreno_gpu->gmem = ocmem_hdl->len; 583 - DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024, 584 - a4xx_gpu->ocmem_base); 585 - #endif 581 + ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu, 582 + &a4xx_gpu->ocmem); 583 + if (ret) 584 + goto fail; 586 585 } 587 586 588 587 if (!gpu->aspace) {
+1 -2
drivers/gpu/drm/msm/adreno/a4xx_gpu.h
··· 16 16 struct adreno_gpu base; 17 17 18 18 /* if OCMEM is used for GMEM: */ 19 - uint32_t ocmem_base; 20 - void *ocmem_hdl; 19 + struct adreno_ocmem ocmem; 21 20 }; 22 21 #define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base) 23 22
+62 -17
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 353 353 * 2D mode 3 draw 354 354 */ 355 355 OUT_RING(ring, 0x0000000B); 356 + } else if (adreno_is_a510(adreno_gpu)) { 357 + /* Workaround for token and syncs */ 358 + OUT_RING(ring, 0x00000001); 356 359 } else { 357 360 /* No workarounds enabled */ 358 361 OUT_RING(ring, 0x00000000); ··· 571 568 0x00100000 + adreno_gpu->gmem - 1); 572 569 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); 573 570 574 - gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); 575 - if (adreno_is_a530(adreno_gpu)) 576 - gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); 577 - if (adreno_is_a540(adreno_gpu)) 578 - gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); 579 - gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); 580 - gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); 581 - 582 - gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); 571 + if (adreno_is_a510(adreno_gpu)) { 572 + gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20); 573 + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20); 574 + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030); 575 + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A); 576 + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 577 + (0x200 << 11 | 0x200 << 22)); 578 + } else { 579 + gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); 580 + if (adreno_is_a530(adreno_gpu)) 581 + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); 582 + if (adreno_is_a540(adreno_gpu)) 583 + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); 584 + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); 585 + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); 586 + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 587 + (0x400 << 11 | 0x300 << 22)); 588 + } 583 589 584 590 if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) 585 591 gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); ··· 600 588 601 589 /* Enable ME/PFP split notification */ 602 590 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); 591 + 592 + /* 593 + * In A5x, CCU can send context_done event of a particular context to 594 + * UCHE which ultimately reaches CP even when there is valid 595 + * transaction of that context inside CCU. This can let CP to program 596 + * config registers, which will make the "valid transaction" inside 597 + * CCU to be interpreted differently. This can cause gpu fault. This 598 + * bug is fixed in latest A510 revision. To enable this bug fix - 599 + * bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1 600 + * (disable). For older A510 version this bit is unused. 601 + */ 602 + if (adreno_is_a510(adreno_gpu)) 603 + gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0); 603 604 604 605 /* Enable HWCG */ 605 606 a5xx_set_hwcg(gpu, true); ··· 660 635 /* UCHE */ 661 636 gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16)); 662 637 663 - if (adreno_is_a530(adreno_gpu)) 638 + if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu)) 664 639 gpu_write(gpu, REG_A5XX_CP_PROTECT(17), 665 640 ADRENO_PROTECT_RW(0x10000, 0x8000)); 666 641 ··· 704 679 705 680 a5xx_preempt_hw_init(gpu); 706 681 707 - a5xx_gpmu_ucode_init(gpu); 682 + if (!adreno_is_a510(adreno_gpu)) 683 + a5xx_gpmu_ucode_init(gpu); 708 684 709 685 ret = a5xx_ucode_init(gpu); 710 686 if (ret) ··· 738 712 } 739 713 740 714 /* 741 - * Try to load a zap shader into the secure world. If successful 715 + * If the chip that we are using does support loading one, then 716 + * try to load a zap shader into the secure world. If successful 742 717 * we can use the CP to switch out of secure mode. If not then we 743 718 * have no resource but to try to switch ourselves out manually. If we 744 719 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will ··· 1093 1066 1094 1067 static int a5xx_pm_resume(struct msm_gpu *gpu) 1095 1068 { 1069 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 1096 1070 int ret; 1097 1071 1098 1072 /* Turn on the core power */ 1099 1073 ret = msm_gpu_pm_resume(gpu); 1100 1074 if (ret) 1101 1075 return ret; 1076 + 1077 + if (adreno_is_a510(adreno_gpu)) { 1078 + /* Halt the sp_input_clk at HM level */ 1079 + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055); 1080 + a5xx_set_hwcg(gpu, true); 1081 + /* Turn on sp_input_clk at HM level */ 1082 + gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0); 1083 + return 0; 1084 + } 1102 1085 1103 1086 /* Turn the RBCCU domain first to limit the chances of voltage droop */ 1104 1087 gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000); ··· 1138 1101 1139 1102 static int a5xx_pm_suspend(struct msm_gpu *gpu) 1140 1103 { 1104 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 1105 + u32 mask = 0xf; 1106 + 1107 + /* A510 has 3 XIN ports in VBIF */ 1108 + if (adreno_is_a510(adreno_gpu)) 1109 + mask = 0x7; 1110 + 1141 1111 /* Clear the VBIF pipe before shutting down */ 1142 - gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF); 1143 - spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF); 1112 + gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask); 1113 + spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 1114 + mask) == mask); 1144 1115 1145 1116 gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0); 1146 1117 ··· 1334 1289 kfree(a5xx_state); 1335 1290 } 1336 1291 1337 - int a5xx_gpu_state_put(struct msm_gpu_state *state) 1292 + static int a5xx_gpu_state_put(struct msm_gpu_state *state) 1338 1293 { 1339 1294 if (IS_ERR_OR_NULL(state)) 1340 1295 return 1; ··· 1344 1299 1345 1300 1346 1301 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) 1347 - void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, 1348 - struct drm_printer *p) 1302 + static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, 1303 + struct drm_printer *p) 1349 1304 { 1350 1305 int i, j; 1351 1306 u32 pos = 0;
+7
drivers/gpu/drm/msm/adreno/a5xx_power.c
··· 297 297 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 298 298 int ret; 299 299 300 + /* Not all A5xx chips have a GPMU */ 301 + if (adreno_is_a510(adreno_gpu)) 302 + return 0; 303 + 300 304 /* Set up the limits management */ 301 305 if (adreno_is_a530(adreno_gpu)) 302 306 a530_lm_setup(gpu); ··· 329 325 uint32_t dwords = 0, offset = 0, bosize; 330 326 unsigned int *data, *ptr, *cmds; 331 327 unsigned int cmds_size; 328 + 329 + if (adreno_is_a510(adreno_gpu)) 330 + return; 332 331 333 332 if (a5xx_gpu->gpmu_bo) 334 333 return;
+15
drivers/gpu/drm/msm/adreno/adreno_device.c
··· 115 115 .inactive_period = DRM_MSM_INACTIVE_PERIOD, 116 116 .init = a4xx_gpu_init, 117 117 }, { 118 + .rev = ADRENO_REV(5, 1, 0, ANY_ID), 119 + .revn = 510, 120 + .name = "A510", 121 + .fw = { 122 + [ADRENO_FW_PM4] = "a530_pm4.fw", 123 + [ADRENO_FW_PFP] = "a530_pfp.fw", 124 + }, 125 + .gmem = SZ_256K, 126 + /* 127 + * Increase inactive period to 250 to avoid bouncing 128 + * the GDSC which appears to make it grumpy 129 + */ 130 + .inactive_period = 250, 131 + .init = a5xx_gpu_init, 132 + }, { 118 133 .rev = ADRENO_REV(5, 3, 0, 2), 119 134 .revn = 530, 120 135 .name = "A530",
+40
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 14 14 #include <linux/pm_opp.h> 15 15 #include <linux/slab.h> 16 16 #include <linux/soc/qcom/mdt_loader.h> 17 + #include <soc/qcom/ocmem.h> 17 18 #include "adreno_gpu.h" 18 19 #include "msm_gem.h" 19 20 #include "msm_mmu.h" ··· 892 891 gpu->icc_path = NULL; 893 892 894 893 return 0; 894 + } 895 + 896 + int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, 897 + struct adreno_ocmem *adreno_ocmem) 898 + { 899 + struct ocmem_buf *ocmem_hdl; 900 + struct ocmem *ocmem; 901 + 902 + ocmem = of_get_ocmem(dev); 903 + if (IS_ERR(ocmem)) { 904 + if (PTR_ERR(ocmem) == -ENODEV) { 905 + /* 906 + * Return success since either the ocmem property was 907 + * not specified in device tree, or ocmem support is 908 + * not compiled into the kernel. 909 + */ 910 + return 0; 911 + } 912 + 913 + return PTR_ERR(ocmem); 914 + } 915 + 916 + ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem); 917 + if (IS_ERR(ocmem_hdl)) 918 + return PTR_ERR(ocmem_hdl); 919 + 920 + adreno_ocmem->ocmem = ocmem; 921 + adreno_ocmem->base = ocmem_hdl->addr; 922 + adreno_ocmem->hdl = ocmem_hdl; 923 + adreno_gpu->gmem = ocmem_hdl->len; 924 + 925 + return 0; 926 + } 927 + 928 + void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem) 929 + { 930 + if (adreno_ocmem && adreno_ocmem->base) 931 + ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS, 932 + adreno_ocmem->hdl); 895 933 } 896 934 897 935 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+15
drivers/gpu/drm/msm/adreno/adreno_gpu.h
··· 126 126 }; 127 127 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) 128 128 129 + struct adreno_ocmem { 130 + struct ocmem *ocmem; 131 + unsigned long base; 132 + void *hdl; 133 + }; 134 + 129 135 /* platform config data (ie. from DT, or pdata) */ 130 136 struct adreno_platform_config { 131 137 struct adreno_rev rev; ··· 212 206 return gpu->revn == 430; 213 207 } 214 208 209 + static inline int adreno_is_a510(struct adreno_gpu *gpu) 210 + { 211 + return gpu->revn == 510; 212 + } 213 + 215 214 static inline int adreno_is_a530(struct adreno_gpu *gpu) 216 215 { 217 216 return gpu->revn == 530; ··· 246 235 void adreno_dump(struct msm_gpu *gpu); 247 236 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords); 248 237 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu); 238 + 239 + int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, 240 + struct adreno_ocmem *ocmem); 241 + void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem); 249 242 250 243 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, 251 244 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+10 -33
drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
··· 55 55 int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms, 56 56 enum dpu_intr_type intr_type, u32 instance_idx) 57 57 { 58 - if (!dpu_kms || !dpu_kms->hw_intr || 59 - !dpu_kms->hw_intr->ops.irq_idx_lookup) 58 + if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.irq_idx_lookup) 60 59 return -EINVAL; 61 60 62 61 return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type, ··· 72 73 unsigned long irq_flags; 73 74 int ret = 0, enable_count; 74 75 75 - if (!dpu_kms || !dpu_kms->hw_intr || 76 + if (!dpu_kms->hw_intr || 76 77 !dpu_kms->irq_obj.enable_counts || 77 78 !dpu_kms->irq_obj.irq_counts) { 78 79 DPU_ERROR("invalid params\n"); ··· 113 114 { 114 115 int i, ret = 0, counts; 115 116 116 - if (!dpu_kms || !irq_idxs || !irq_count) { 117 + if (!irq_idxs || !irq_count) { 117 118 DPU_ERROR("invalid params\n"); 118 119 return -EINVAL; 119 120 } ··· 137 138 { 138 139 int ret = 0, enable_count; 139 140 140 - if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) { 141 + if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) { 141 142 DPU_ERROR("invalid params\n"); 142 143 return -EINVAL; 143 144 } ··· 168 169 { 169 170 int i, ret = 0, counts; 170 171 171 - if (!dpu_kms || !irq_idxs || !irq_count) { 172 + if (!irq_idxs || !irq_count) { 172 173 DPU_ERROR("invalid params\n"); 173 174 return -EINVAL; 174 175 } ··· 185 186 186 187 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear) 187 188 { 188 - if (!dpu_kms || !dpu_kms->hw_intr || 189 + if (!dpu_kms->hw_intr || 189 190 !dpu_kms->hw_intr->ops.get_interrupt_status) 190 191 return 0; 191 192 ··· 204 205 { 205 206 unsigned long irq_flags; 206 207 207 - if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) { 208 + if (!dpu_kms->irq_obj.irq_cb_tbl) { 208 209 DPU_ERROR("invalid params\n"); 209 210 return -EINVAL; 210 211 } ··· 239 240 { 240 241 unsigned long irq_flags; 241 242 242 - if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) { 243 + if (!dpu_kms->irq_obj.irq_cb_tbl) { 243 244 DPU_ERROR("invalid params\n"); 244 245 return -EINVAL; 245 246 } ··· 273 274 274 275 static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms) 275 276 { 276 - if (!dpu_kms || !dpu_kms->hw_intr || 277 - !dpu_kms->hw_intr->ops.clear_all_irqs) 277 + if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs) 278 278 return; 279 279 280 280 dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr); ··· 281 283 282 284 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) 283 285 { 284 - if (!dpu_kms || !dpu_kms->hw_intr || 285 - !dpu_kms->hw_intr->ops.disable_all_irqs) 286 + if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs) 286 287 return; 287 288 288 289 dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr); ··· 340 343 341 344 void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms) 342 345 { 343 - struct msm_drm_private *priv; 344 346 int i; 345 - 346 - if (!dpu_kms->dev) { 347 - DPU_ERROR("invalid drm device\n"); 348 - return; 349 - } else if (!dpu_kms->dev->dev_private) { 350 - DPU_ERROR("invalid device private\n"); 351 - return; 352 - } 353 - priv = dpu_kms->dev->dev_private; 354 347 355 348 pm_runtime_get_sync(&dpu_kms->pdev->dev); 356 349 dpu_clear_all_irqs(dpu_kms); ··· 366 379 367 380 void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms) 368 381 { 369 - struct msm_drm_private *priv; 370 382 int i; 371 - 372 - if (!dpu_kms->dev) { 373 - DPU_ERROR("invalid drm device\n"); 374 - return; 375 - } else if (!dpu_kms->dev->dev_private) { 376 - DPU_ERROR("invalid device private\n"); 377 - return; 378 - } 379 - priv = dpu_kms->dev->dev_private; 380 383 381 384 pm_runtime_get_sync(&dpu_kms->pdev->dev); 382 385 for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
+3 -18
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
··· 32 32 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) 33 33 { 34 34 struct msm_drm_private *priv; 35 - 36 - if (!crtc->dev || !crtc->dev->dev_private) { 37 - DPU_ERROR("invalid device\n"); 38 - return NULL; 39 - } 40 - 41 35 priv = crtc->dev->dev_private; 42 - if (!priv || !priv->kms) { 43 - DPU_ERROR("invalid kms\n"); 44 - return NULL; 45 - } 46 - 47 36 return to_dpu_kms(priv->kms); 48 37 } 49 38 ··· 105 116 } 106 117 107 118 kms = _dpu_crtc_get_kms(crtc); 108 - if (!kms || !kms->catalog) { 119 + if (!kms->catalog) { 109 120 DPU_ERROR("invalid parameters\n"); 110 121 return 0; 111 122 } ··· 204 215 void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc) 205 216 { 206 217 struct dpu_crtc *dpu_crtc; 207 - struct dpu_crtc_state *dpu_cstate; 208 218 struct dpu_kms *kms; 209 219 210 220 if (!crtc) { ··· 212 224 } 213 225 214 226 kms = _dpu_crtc_get_kms(crtc); 215 - if (!kms || !kms->catalog) { 227 + if (!kms->catalog) { 216 228 DPU_ERROR("invalid kms\n"); 217 229 return; 218 230 } 219 231 220 232 dpu_crtc = to_dpu_crtc(crtc); 221 - dpu_cstate = to_dpu_crtc_state(crtc->state); 222 233 223 234 if (atomic_dec_return(&kms->bandwidth_ref) > 0) 224 235 return; ··· 274 287 u64 clk_rate = 0; 275 288 struct dpu_crtc *dpu_crtc; 276 289 struct dpu_crtc_state *dpu_cstate; 277 - struct msm_drm_private *priv; 278 290 struct dpu_kms *kms; 279 291 int ret; 280 292 ··· 283 297 } 284 298 285 299 kms = _dpu_crtc_get_kms(crtc); 286 - if (!kms || !kms->catalog) { 300 + if (!kms->catalog) { 287 301 DPU_ERROR("invalid kms\n"); 288 302 return -EINVAL; 289 303 } 290 - priv = kms->dev->dev_private; 291 304 292 305 dpu_crtc = to_dpu_crtc(crtc); 293 306 dpu_cstate = to_dpu_crtc_state(crtc->state);
+12 -8
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
··· 266 266 { 267 267 struct drm_encoder *encoder; 268 268 269 - if (!crtc || !crtc->dev) { 269 + if (!crtc) { 270 270 DPU_ERROR("invalid crtc\n"); 271 271 return INTF_MODE_NONE; 272 272 } 273 273 274 + /* 275 + * TODO: This function is called from dpu debugfs and as part of atomic 276 + * check. When called from debugfs, the crtc->mutex must be held to 277 + * read crtc->state. However reading crtc->state from atomic check isn't 278 + * allowed (unless you have a good reason, a big comment, and a deep 279 + * understanding of how the atomic/modeset locks work (<- and this is 280 + * probably not possible)). So we'll keep the WARN_ON here for now, but 281 + * really we need to figure out a better way to track our operating mode 282 + */ 274 283 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 275 284 276 285 /* TODO: Returns the first INTF_MODE, could there be multiple values? */ ··· 703 694 unsigned long flags; 704 695 bool release_bandwidth = false; 705 696 706 - if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) { 697 + if (!crtc || !crtc->state) { 707 698 DPU_ERROR("invalid crtc\n"); 708 699 return; 709 700 } ··· 775 766 struct msm_drm_private *priv; 776 767 bool request_bandwidth; 777 768 778 - if (!crtc || !crtc->dev || !crtc->dev->dev_private) { 769 + if (!crtc) { 779 770 DPU_ERROR("invalid crtc\n"); 780 771 return; 781 772 } ··· 1297 1288 { 1298 1289 struct drm_crtc *crtc = NULL; 1299 1290 struct dpu_crtc *dpu_crtc = NULL; 1300 - struct msm_drm_private *priv = NULL; 1301 - struct dpu_kms *kms = NULL; 1302 1291 int i; 1303 - 1304 - priv = dev->dev_private; 1305 - kms = to_dpu_kms(priv->kms); 1306 1292 1307 1293 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); 1308 1294 if (!dpu_crtc)
+7 -32
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 645 645 priv = drm_enc->dev->dev_private; 646 646 647 647 dpu_kms = to_dpu_kms(priv->kms); 648 - if (!dpu_kms) { 649 - DPU_ERROR("invalid dpu_kms\n"); 650 - return; 651 - } 652 - 653 648 hw_mdptop = dpu_kms->hw_mdp; 654 649 if (!hw_mdptop) { 655 650 DPU_ERROR("invalid mdptop\n"); ··· 730 735 struct msm_drm_private *priv; 731 736 bool is_vid_mode = false; 732 737 733 - if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private || 734 - !drm_enc->crtc) { 738 + if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) { 735 739 DPU_ERROR("invalid parameters\n"); 736 740 return -EINVAL; 737 741 } ··· 1086 1092 struct msm_drm_private *priv; 1087 1093 struct dpu_kms *dpu_kms; 1088 1094 1089 - if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) { 1095 + if (!drm_enc || !drm_enc->dev) { 1090 1096 DPU_ERROR("invalid parameters\n"); 1091 1097 return; 1092 1098 } 1093 1099 1094 1100 priv = drm_enc->dev->dev_private; 1095 1101 dpu_kms = to_dpu_kms(priv->kms); 1096 - if (!dpu_kms) { 1097 - DPU_ERROR("invalid dpu_kms\n"); 1098 - return; 1099 - } 1100 1102 1101 1103 dpu_enc = to_dpu_encoder_virt(drm_enc); 1102 1104 if (!dpu_enc || !dpu_enc->cur_master) { ··· 1174 1184 struct dpu_encoder_virt *dpu_enc = NULL; 1175 1185 struct msm_drm_private *priv; 1176 1186 struct dpu_kms *dpu_kms; 1177 - struct drm_display_mode *mode; 1178 1187 int i = 0; 1179 1188 1180 1189 if (!drm_enc) { ··· 1182 1193 } else if (!drm_enc->dev) { 1183 1194 DPU_ERROR("invalid dev\n"); 1184 1195 return; 1185 - } else if (!drm_enc->dev->dev_private) { 1186 - DPU_ERROR("invalid dev_private\n"); 1187 - return; 1188 1196 } 1189 1197 1190 1198 dpu_enc = to_dpu_encoder_virt(drm_enc); ··· 1189 1203 1190 1204 mutex_lock(&dpu_enc->enc_lock); 1191 1205 dpu_enc->enabled = false; 1192 - 1193 - mode = &drm_enc->crtc->state->adjusted_mode; 1194 1206 1195 1207 priv = drm_enc->dev->dev_private; 1196 1208 dpu_kms = to_dpu_kms(priv->kms); ··· 1718 1734 struct msm_drm_private *priv; 1719 1735 struct msm_drm_thread *event_thread; 1720 1736 1721 - if (!drm_enc->dev || !drm_enc->dev->dev_private || 1722 - !drm_enc->crtc) { 1737 + if (!drm_enc->dev || !drm_enc->crtc) { 1723 1738 DPU_ERROR("invalid parameters\n"); 1724 1739 return; 1725 1740 } ··· 1897 1914 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) 1898 1915 { 1899 1916 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1900 - struct msm_drm_private *priv; 1901 - struct dpu_kms *dpu_kms; 1902 1917 int i; 1903 1918 1904 1919 static const struct file_operations debugfs_status_fops = { ··· 1908 1927 1909 1928 char name[DPU_NAME_SIZE]; 1910 1929 1911 - if (!drm_enc->dev || !drm_enc->dev->dev_private) { 1930 + if (!drm_enc->dev) { 1912 1931 DPU_ERROR("invalid encoder or kms\n"); 1913 1932 return -EINVAL; 1914 1933 } 1915 - 1916 - priv = drm_enc->dev->dev_private; 1917 - dpu_kms = to_dpu_kms(priv->kms); 1918 1934 1919 1935 snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id); 1920 1936 ··· 2020 2042 enum dpu_intf_type intf_type; 2021 2043 struct dpu_enc_phys_init_params phys_params; 2022 2044 2023 - if (!dpu_enc || !dpu_kms) { 2024 - DPU_ERROR("invalid arg(s), enc %d kms %d\n", 2025 - dpu_enc != 0, dpu_kms != 0); 2045 + if (!dpu_enc) { 2046 + DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0); 2026 2047 return -EINVAL; 2027 2048 } 2028 2049 ··· 2110 2133 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, 2111 2134 frame_done_timer); 2112 2135 struct drm_encoder *drm_enc = &dpu_enc->base; 2113 - struct msm_drm_private *priv; 2114 2136 u32 event; 2115 2137 2116 - if (!drm_enc->dev || !drm_enc->dev->dev_private) { 2138 + if (!drm_enc->dev) { 2117 2139 DPU_ERROR("invalid parameters\n"); 2118 2140 return; 2119 2141 } 2120 - priv = drm_enc->dev->dev_private; 2121 2142 2122 2143 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) { 2123 2144 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
-15
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
··· 124 124 static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx) 125 125 { 126 126 struct dpu_encoder_phys *phys_enc = arg; 127 - struct dpu_encoder_phys_cmd *cmd_enc; 128 127 129 128 if (!phys_enc || !phys_enc->hw_ctl) 130 129 return; 131 130 132 131 DPU_ATRACE_BEGIN("ctl_start_irq"); 133 - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); 134 132 135 133 atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0); 136 134 ··· 314 316 static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc, 315 317 bool enable) 316 318 { 317 - struct dpu_encoder_phys_cmd *cmd_enc; 318 - 319 319 if (!phys_enc) 320 320 return; 321 - 322 - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); 323 321 324 322 trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent), 325 323 phys_enc->hw_pp->idx - PINGPONG_0, ··· 349 355 struct drm_display_mode *mode; 350 356 bool tc_enable = true; 351 357 u32 vsync_hz; 352 - struct msm_drm_private *priv; 353 358 struct dpu_kms *dpu_kms; 354 359 355 360 if (!phys_enc || !phys_enc->hw_pp) { ··· 366 373 } 367 374 368 375 dpu_kms = phys_enc->dpu_kms; 369 - if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) { 370 - DPU_ERROR("invalid device\n"); 371 - return; 372 - } 373 - priv = dpu_kms->dev->dev_private; 374 376 375 377 /* 376 378 * TE default: dsi byte clock calculated base on 70 fps; ··· 638 650 struct dpu_encoder_phys *phys_enc) 639 651 { 640 652 int rc; 641 - struct dpu_encoder_phys_cmd *cmd_enc; 642 653 643 654 if (!phys_enc) 644 655 return -EINVAL; 645 - 646 - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); 647 656 648 657 rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc); 649 658 if (rc) {
+2 -5
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
··· 374 374 struct drm_display_mode *mode, 375 375 struct drm_display_mode *adj_mode) 376 376 { 377 - if (!phys_enc || !phys_enc->dpu_kms) { 377 + if (!phys_enc) { 378 378 DPU_ERROR("invalid encoder/kms\n"); 379 379 return; 380 380 } ··· 566 566 567 567 static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) 568 568 { 569 - struct msm_drm_private *priv; 570 569 unsigned long lock_flags; 571 570 int ret; 572 571 573 - if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev || 574 - !phys_enc->parent->dev->dev_private) { 572 + if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev) { 575 573 DPU_ERROR("invalid encoder/device\n"); 576 574 return; 577 575 } 578 - priv = phys_enc->parent->dev->dev_private; 579 576 580 577 if (!phys_enc->hw_intf || !phys_enc->hw_ctl) { 581 578 DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+4 -56
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
··· 30 30 #define CREATE_TRACE_POINTS 31 31 #include "dpu_trace.h" 32 32 33 - static const char * const iommu_ports[] = { 34 - "mdp_0", 35 - }; 36 - 37 33 /* 38 34 * To enable overall DRM driver logging 39 35 * # echo 0x2 > /sys/module/drm/parameters/debug ··· 64 68 bool danger_status) 65 69 { 66 70 struct dpu_kms *kms = (struct dpu_kms *)s->private; 67 - struct msm_drm_private *priv; 68 71 struct dpu_danger_safe_status status; 69 72 int i; 70 73 71 - if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) { 74 + if (!kms->hw_mdp) { 72 75 DPU_ERROR("invalid arg(s)\n"); 73 76 return 0; 74 77 } 75 78 76 - priv = kms->dev->dev_private; 77 79 memset(&status, 0, sizeof(struct dpu_danger_safe_status)); 78 80 79 81 pm_runtime_get_sync(&kms->pdev->dev); ··· 147 153 return 0; 148 154 149 155 dev = dpu_kms->dev; 150 - if (!dev) 151 - return 0; 152 - 153 156 priv = dev->dev_private; 154 - if (!priv) 155 - return 0; 156 - 157 157 base = dpu_kms->mmio + regset->offset; 158 158 159 159 /* insert padding spaces, if needed */ ··· 268 280 struct drm_atomic_state *state) 269 281 { 270 282 struct dpu_kms *dpu_kms; 271 - struct msm_drm_private *priv; 272 283 struct drm_device *dev; 273 284 struct drm_crtc *crtc; 274 285 struct drm_crtc_state *crtc_state; ··· 278 291 return; 279 292 dpu_kms = to_dpu_kms(kms); 280 293 dev = dpu_kms->dev; 281 - 282 - if (!dev || !dev->dev_private) 283 - return; 284 - priv = dev->dev_private; 285 294 286 295 /* Call prepare_commit for all affected encoders */ 287 296 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { ··· 316 333 if (funcs && funcs->commit) 317 334 funcs->commit(encoder); 318 335 319 - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 320 336 drm_for_each_crtc(crtc, dev) { 321 337 if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder))) 322 338 continue; ··· 446 464 struct msm_drm_private *priv; 447 465 int i; 448 466 449 - if (!dpu_kms) { 450 - DPU_ERROR("invalid dpu_kms\n"); 451 - return; 452 - } else if (!dpu_kms->dev) { 453 - DPU_ERROR("invalid dev\n"); 454 - return; 455 - } else if (!dpu_kms->dev->dev_private) { 456 - DPU_ERROR("invalid dev_private\n"); 457 - return; 458 - } 459 467 priv = dpu_kms->dev->dev_private; 460 468 461 469 for (i = 0; i < priv->num_crtcs; i++) ··· 477 505 478 506 int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret; 479 507 int max_crtc_count; 480 - 481 508 dev = dpu_kms->dev; 482 509 priv = dev->dev_private; 483 510 catalog = dpu_kms->catalog; ··· 556 585 int i; 557 586 558 587 dev = dpu_kms->dev; 559 - if (!dev) 560 - return; 561 588 562 589 if (dpu_kms->hw_intr) 563 590 dpu_hw_intr_destroy(dpu_kms->hw_intr); ··· 694 725 695 726 mmu = dpu_kms->base.aspace->mmu; 696 727 697 - mmu->funcs->detach(mmu, (const char **)iommu_ports, 698 - ARRAY_SIZE(iommu_ports)); 728 + mmu->funcs->detach(mmu); 699 729 msm_gem_address_space_put(dpu_kms->base.aspace); 700 730 701 731 dpu_kms->base.aspace = NULL; ··· 720 752 return PTR_ERR(aspace); 721 753 } 722 754 723 - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, 724 - ARRAY_SIZE(iommu_ports)); 755 + ret = aspace->mmu->funcs->attach(aspace->mmu); 725 756 if (ret) { 726 757 DPU_ERROR("failed to attach iommu %d\n", ret); 727 758 msm_gem_address_space_put(aspace); ··· 770 803 771 804 dpu_kms = to_dpu_kms(kms); 772 805 dev = dpu_kms->dev; 773 - if (!dev) { 774 - DPU_ERROR("invalid device\n"); 775 - return rc; 776 - } 777 - 778 806 priv = dev->dev_private; 779 - if (!priv) { 780 - DPU_ERROR("invalid private data\n"); 781 - return rc; 782 - } 783 807 784 808 atomic_set(&dpu_kms->bandwidth_ref, 0); 785 809 ··· 932 974 struct dpu_kms *dpu_kms; 933 975 int irq; 934 976 935 - if (!dev || !dev->dev_private) { 977 + if (!dev) { 936 978 DPU_ERROR("drm device node invalid\n"); 937 979 return ERR_PTR(-EINVAL); 938 980 } ··· 1022 1064 struct dss_module_power *mp = &dpu_kms->mp; 1023 1065 1024 1066 ddev = dpu_kms->dev; 1025 - if (!ddev) { 1026 - DPU_ERROR("invalid drm_device\n"); 1027 - return rc; 1028 - } 1029 - 1030 1067 rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); 1031 1068 if (rc) 1032 1069 DPU_ERROR("clock disable failed rc:%d\n", rc); ··· 1039 1086 struct dss_module_power *mp = &dpu_kms->mp; 1040 1087 1041 1088 ddev = dpu_kms->dev; 1042 - if (!ddev) { 1043 - DPU_ERROR("invalid drm_device\n"); 1044 - return rc; 1045 - } 1046 - 1047 1089 rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); 1048 1090 if (rc) { 1049 1091 DPU_ERROR("clock enable failed rc:%d\n", rc);
-4
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
··· 139 139 140 140 #define to_dpu_kms(x) container_of(x, struct dpu_kms, base) 141 141 142 - /* get struct msm_kms * from drm_device * */ 143 - #define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \ 144 - ((struct msm_drm_private *)((D)->dev_private))->kms : NULL) 145 - 146 142 /** 147 143 * Debugfs functions - extra helper functions for debugfs support 148 144 *
+1 -5
drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
··· 154 154 u32 ot_lim; 155 155 int ret, i; 156 156 157 - if (!dpu_kms) { 158 - DPU_ERROR("invalid arguments\n"); 159 - return; 160 - } 161 157 mdp = dpu_kms->hw_mdp; 162 158 163 159 for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { ··· 210 214 const struct dpu_vbif_qos_tbl *qos_tbl; 211 215 int i; 212 216 213 - if (!dpu_kms || !params || !dpu_kms->hw_mdp) { 217 + if (!params || !dpu_kms->hw_mdp) { 214 218 DPU_ERROR("invalid arguments\n"); 215 219 return; 216 220 }
+2 -8
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
··· 157 157 } 158 158 } 159 159 160 - static const char * const iommu_ports[] = { 161 - "mdp_port0_cb0", "mdp_port1_cb0", 162 - }; 163 - 164 160 static void mdp4_destroy(struct msm_kms *kms) 165 161 { 166 162 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); ··· 168 172 drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo); 169 173 170 174 if (aspace) { 171 - aspace->mmu->funcs->detach(aspace->mmu, 172 - iommu_ports, ARRAY_SIZE(iommu_ports)); 175 + aspace->mmu->funcs->detach(aspace->mmu); 173 176 msm_gem_address_space_put(aspace); 174 177 } 175 178 ··· 519 524 520 525 kms->aspace = aspace; 521 526 522 - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, 523 - ARRAY_SIZE(iommu_ports)); 527 + ret = aspace->mmu->funcs->attach(aspace->mmu); 524 528 if (ret) 525 529 goto fail; 526 530 } else {
+106 -8
drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
··· 14 14 /* mdp5_cfg must be exposed (used in mdp5.xml.h) */ 15 15 const struct mdp5_cfg_hw *mdp5_cfg = NULL; 16 16 17 - const struct mdp5_cfg_hw msm8x74v1_config = { 17 + static const struct mdp5_cfg_hw msm8x74v1_config = { 18 18 .name = "msm8x74v1", 19 19 .mdp = { 20 20 .count = 1, ··· 98 98 .max_clk = 200000000, 99 99 }; 100 100 101 - const struct mdp5_cfg_hw msm8x74v2_config = { 101 + static const struct mdp5_cfg_hw msm8x74v2_config = { 102 102 .name = "msm8x74", 103 103 .mdp = { 104 104 .count = 1, ··· 180 180 .max_clk = 200000000, 181 181 }; 182 182 183 - const struct mdp5_cfg_hw apq8084_config = { 183 + static const struct mdp5_cfg_hw apq8084_config = { 184 184 .name = "apq8084", 185 185 .mdp = { 186 186 .count = 1, ··· 275 275 .max_clk = 320000000, 276 276 }; 277 277 278 - const struct mdp5_cfg_hw msm8x16_config = { 278 + static const struct mdp5_cfg_hw msm8x16_config = { 279 279 .name = "msm8x16", 280 280 .mdp = { 281 281 .count = 1, ··· 342 342 .max_clk = 320000000, 343 343 }; 344 344 345 - const struct mdp5_cfg_hw msm8x94_config = { 345 + static const struct mdp5_cfg_hw msm8x94_config = { 346 346 .name = "msm8x94", 347 347 .mdp = { 348 348 .count = 1, ··· 437 437 .max_clk = 400000000, 438 438 }; 439 439 440 - const struct mdp5_cfg_hw msm8x96_config = { 440 + static const struct mdp5_cfg_hw msm8x96_config = { 441 441 .name = "msm8x96", 442 442 .mdp = { 443 443 .count = 1, ··· 545 545 .max_clk = 412500000, 546 546 }; 547 547 548 - const struct mdp5_cfg_hw msm8917_config = { 548 + const struct mdp5_cfg_hw msm8x76_config = { 549 + .name = "msm8x76", 550 + .mdp = { 551 + .count = 1, 552 + .caps = MDP_CAP_SMP | 553 + MDP_CAP_DSC | 554 + MDP_CAP_SRC_SPLIT | 555 + 0, 556 + }, 557 + .ctl = { 558 + .count = 3, 559 + .base = { 0x01000, 0x01200, 0x01400 }, 560 + .flush_hw_mask = 0xffffffff, 561 + }, 562 + .smp = { 563 + .mmb_count = 10, 564 + .mmb_size = 10240, 565 + .clients = { 566 + [SSPP_VIG0] = 1, [SSPP_VIG1] = 9, 567 + [SSPP_DMA0] = 4, 568 + [SSPP_RGB0] = 7, [SSPP_RGB1] = 8, 569 + }, 570 + }, 571 + .pipe_vig = { 572 + .count = 2, 573 + .base = { 0x04000, 0x06000 }, 574 + .caps = MDP_PIPE_CAP_HFLIP | 575 + MDP_PIPE_CAP_VFLIP | 576 + MDP_PIPE_CAP_SCALE | 577 + MDP_PIPE_CAP_CSC | 578 + MDP_PIPE_CAP_DECIMATION | 579 + MDP_PIPE_CAP_SW_PIX_EXT | 580 + 0, 581 + }, 582 + .pipe_rgb = { 583 + .count = 2, 584 + .base = { 0x14000, 0x16000 }, 585 + .caps = MDP_PIPE_CAP_HFLIP | 586 + MDP_PIPE_CAP_VFLIP | 587 + MDP_PIPE_CAP_DECIMATION | 588 + MDP_PIPE_CAP_SW_PIX_EXT | 589 + 0, 590 + }, 591 + .pipe_dma = { 592 + .count = 1, 593 + .base = { 0x24000 }, 594 + .caps = MDP_PIPE_CAP_HFLIP | 595 + MDP_PIPE_CAP_VFLIP | 596 + MDP_PIPE_CAP_SW_PIX_EXT | 597 + 0, 598 + }, 599 + .pipe_cursor = { 600 + .count = 1, 601 + .base = { 0x440DC }, 602 + .caps = MDP_PIPE_CAP_HFLIP | 603 + MDP_PIPE_CAP_VFLIP | 604 + MDP_PIPE_CAP_SW_PIX_EXT | 605 + MDP_PIPE_CAP_CURSOR | 606 + 0, 607 + }, 608 + .lm = { 609 + .count = 2, 610 + .base = { 0x44000, 0x45000 }, 611 + .instances = { 612 + { .id = 0, .pp = 0, .dspp = 0, 613 + .caps = MDP_LM_CAP_DISPLAY, }, 614 + { .id = 1, .pp = -1, .dspp = -1, 615 + .caps = MDP_LM_CAP_WB }, 616 + }, 617 + .nb_stages = 8, 618 + .max_width = 2560, 619 + .max_height = 0xFFFF, 620 + }, 621 + .dspp = { 622 + .count = 1, 623 + .base = { 0x54000 }, 624 + 625 + }, 626 + .pp = { 627 + .count = 3, 628 + .base = { 0x70000, 0x70800, 0x72000 }, 629 + }, 630 + .dsc = { 631 + .count = 2, 632 + .base = { 0x80000, 0x80400 }, 633 + }, 634 + .intf = { 635 + .base = { 0x6a000, 0x6a800, 0x6b000 }, 636 + .connect = { 637 + [0] = INTF_DISABLED, 638 + [1] = INTF_DSI, 639 + [2] = INTF_DSI, 640 + }, 641 + }, 642 + .max_clk = 360000000, 643 + }; 644 + 645 + static const struct mdp5_cfg_hw msm8917_config = { 549 646 .name = "msm8917", 550 647 .mdp = { 551 648 .count = 1, ··· 727 630 .max_clk = 320000000, 728 631 }; 729 632 730 - const struct mdp5_cfg_hw msm8998_config = { 633 + static const struct mdp5_cfg_hw msm8998_config = { 731 634 .name = "msm8998", 732 635 .mdp = { 733 636 .count = 1, ··· 842 745 { .revision = 6, .config = { .hw = &msm8x16_config } }, 843 746 { .revision = 9, .config = { .hw = &msm8x94_config } }, 844 747 { .revision = 7, .config = { .hw = &msm8x96_config } }, 748 + { .revision = 11, .config = { .hw = &msm8x76_config } }, 845 749 { .revision = 15, .config = { .hw = &msm8917_config } }, 846 750 }; 847 751
-3
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
··· 214 214 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; 215 215 struct mdp5_kms *mdp5_kms = get_kms(crtc); 216 216 struct drm_plane *plane; 217 - const struct mdp5_cfg_hw *hw_cfg; 218 217 struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; 219 218 const struct mdp_format *format; 220 219 struct mdp5_hw_mixer *mixer = pipeline->mixer; ··· 230 231 u32 mixer_op_mode = 0; 231 232 u32 val; 232 233 #define blender(stage) ((stage) - STAGE0) 233 - 234 - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 235 234 236 235 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); 237 236
+12 -11
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
··· 19 19 #include "msm_mmu.h" 20 20 #include "mdp5_kms.h" 21 21 22 - static const char *iommu_ports[] = { 23 - "mdp_0", 24 - }; 25 - 26 22 static int mdp5_hw_init(struct msm_kms *kms) 27 23 { 28 24 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); ··· 229 233 mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); 230 234 231 235 if (aspace) { 232 - aspace->mmu->funcs->detach(aspace->mmu, 233 - iommu_ports, ARRAY_SIZE(iommu_ports)); 236 + aspace->mmu->funcs->detach(aspace->mmu); 234 237 msm_gem_address_space_put(aspace); 235 238 } 236 239 } ··· 309 314 mdp5_kms->enable_count--; 310 315 WARN_ON(mdp5_kms->enable_count < 0); 311 316 317 + if (mdp5_kms->tbu_rt_clk) 318 + clk_disable_unprepare(mdp5_kms->tbu_rt_clk); 319 + if (mdp5_kms->tbu_clk) 320 + clk_disable_unprepare(mdp5_kms->tbu_clk); 312 321 clk_disable_unprepare(mdp5_kms->ahb_clk); 313 322 clk_disable_unprepare(mdp5_kms->axi_clk); 314 323 clk_disable_unprepare(mdp5_kms->core_clk); ··· 333 334 clk_prepare_enable(mdp5_kms->core_clk); 334 335 if (mdp5_kms->lut_clk) 335 336 clk_prepare_enable(mdp5_kms->lut_clk); 337 + if (mdp5_kms->tbu_clk) 338 + clk_prepare_enable(mdp5_kms->tbu_clk); 339 + if (mdp5_kms->tbu_rt_clk) 340 + clk_prepare_enable(mdp5_kms->tbu_rt_clk); 336 341 337 342 return 0; 338 343 } ··· 469 466 { 470 467 struct drm_device *dev = mdp5_kms->dev; 471 468 struct msm_drm_private *priv = dev->dev_private; 472 - const struct mdp5_cfg_hw *hw_cfg; 473 469 unsigned int num_crtcs; 474 470 int i, ret, pi = 0, ci = 0; 475 471 struct drm_plane *primary[MAX_BASES] = { NULL }; 476 472 struct drm_plane *cursor[MAX_BASES] = { NULL }; 477 - 478 - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 479 473 480 474 /* 481 475 * Construct encoders and modeset initialize connector devices ··· 737 737 738 738 kms->aspace = aspace; 739 739 740 - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, 741 - ARRAY_SIZE(iommu_ports)); 740 + ret = aspace->mmu->funcs->attach(aspace->mmu); 742 741 if (ret) { 743 742 DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n", 744 743 ret); ··· 973 974 974 975 /* optional clocks: */ 975 976 get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); 977 + get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false); 978 + get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false); 976 979 977 980 /* we need to set a default rate before enabling. Set a safe 978 981 * rate first, then figure out hw revision, and then set a
+2
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
··· 53 53 struct clk *ahb_clk; 54 54 struct clk *core_clk; 55 55 struct clk *lut_clk; 56 + struct clk *tbu_clk; 57 + struct clk *tbu_rt_clk; 56 58 struct clk *vsync_clk; 57 59 58 60 /*
-2
drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
··· 121 121 struct mdp5_kms *mdp5_kms = get_kms(smp); 122 122 int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); 123 123 int i, hsub, nplanes, nlines; 124 - u32 fmt = format->base.pixel_format; 125 124 uint32_t blkcfg = 0; 126 125 127 126 nplanes = info->num_planes; ··· 134 135 * them together, writes to SMP using a single client. 135 136 */ 136 137 if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) { 137 - fmt = DRM_FORMAT_NV24; 138 138 nplanes = 2; 139 139 140 140 /* if decimation is enabled, HW decimates less on the
+25 -3
drivers/gpu/drm/msm/dsi/dsi_cfg.c
··· 66 66 .num_dsi = 1, 67 67 }; 68 68 69 + static const char * const dsi_8976_bus_clk_names[] = { 70 + "mdp_core", "iface", "bus", 71 + }; 72 + 73 + static const struct msm_dsi_config msm8976_dsi_cfg = { 74 + .io_offset = DSI_6G_REG_SHIFT, 75 + .reg_cfg = { 76 + .num = 3, 77 + .regs = { 78 + {"gdsc", -1, -1}, 79 + {"vdda", 100000, 100}, /* 1.2 V */ 80 + {"vddio", 100000, 100}, /* 1.8 V */ 81 + }, 82 + }, 83 + .bus_clk_names = dsi_8976_bus_clk_names, 84 + .num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names), 85 + .io_start = { 0x1a94000, 0x1a96000 }, 86 + .num_dsi = 2, 87 + }; 88 + 69 89 static const struct msm_dsi_config msm8994_dsi_cfg = { 70 90 .io_offset = DSI_6G_REG_SHIFT, 71 91 .reg_cfg = { ··· 167 147 .num_dsi = 2, 168 148 }; 169 149 170 - const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { 150 + static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { 171 151 .link_clk_enable = dsi_link_clk_enable_v2, 172 152 .link_clk_disable = dsi_link_clk_disable_v2, 173 153 .clk_init_ver = dsi_clk_init_v2, ··· 178 158 .calc_clk_rate = dsi_calc_clk_rate_v2, 179 159 }; 180 160 181 - const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { 161 + static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { 182 162 .link_clk_enable = dsi_link_clk_enable_6g, 183 163 .link_clk_disable = dsi_link_clk_disable_6g, 184 164 .clk_init_ver = NULL, ··· 189 169 .calc_clk_rate = dsi_calc_clk_rate_6g, 190 170 }; 191 171 192 - const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = { 172 + static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = { 193 173 .link_clk_enable = dsi_link_clk_enable_6g, 194 174 .link_clk_disable = dsi_link_clk_disable_6g, 195 175 .clk_init_ver = dsi_clk_init_6g_v2, ··· 217 197 &msm8916_dsi_cfg, &msm_dsi_6g_host_ops}, 218 198 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, 219 199 &msm8996_dsi_cfg, &msm_dsi_6g_host_ops}, 200 + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2, 201 + &msm8976_dsi_cfg, &msm_dsi_6g_host_ops}, 220 202 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0, 221 203 &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops}, 222 204 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
+1
drivers/gpu/drm/msm/dsi/dsi_cfg.h
··· 17 17 #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 18 18 #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 19 19 #define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001 20 + #define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002 20 21 #define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000 21 22 #define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 22 23
+1 -2
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 1293 1293 static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host, 1294 1294 u8 *buf, int rx_byte, int pkt_size) 1295 1295 { 1296 - u32 *lp, *temp, data; 1296 + u32 *temp, data; 1297 1297 int i, j = 0, cnt; 1298 1298 u32 read_cnt; 1299 1299 u8 reg[16]; 1300 1300 int repeated_bytes = 0; 1301 1301 int buf_offset = buf - msm_host->rx_buf; 1302 1302 1303 - lp = (u32 *)buf; 1304 1303 temp = (u32 *)reg; 1305 1304 cnt = (rx_byte + 3) >> 2; 1306 1305 if (cnt > 4)
+4 -4
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
··· 145 145 { 146 146 const unsigned long bit_rate = clk_req->bitclk_rate; 147 147 const unsigned long esc_rate = clk_req->escclk_rate; 148 - s32 ui, ui_x8, lpx; 148 + s32 ui, ui_x8; 149 149 s32 tmax, tmin; 150 150 s32 pcnt0 = 50; 151 151 s32 pcnt1 = 50; ··· 175 175 176 176 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); 177 177 ui_x8 = ui << 3; 178 - lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); 179 178 180 179 temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8); 181 180 tmin = max_t(s32, temp, 0); ··· 261 262 { 262 263 const unsigned long bit_rate = clk_req->bitclk_rate; 263 264 const unsigned long esc_rate = clk_req->escclk_rate; 264 - s32 ui, ui_x8, lpx; 265 + s32 ui, ui_x8; 265 266 s32 tmax, tmin; 266 267 s32 pcnt0 = 50; 267 268 s32 pcnt1 = 50; ··· 283 284 284 285 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); 285 286 ui_x8 = ui << 3; 286 - lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); 287 287 288 288 temp = S_DIV_ROUND_UP(38 * coeff, ui_x8); 289 289 tmin = max_t(s32, temp, 0); ··· 483 485 #ifdef CONFIG_DRM_MSM_DSI_28NM_PHY 484 486 { .compatible = "qcom,dsi-phy-28nm-hpm", 485 487 .data = &dsi_phy_28nm_hpm_cfgs }, 488 + { .compatible = "qcom,dsi-phy-28nm-hpm-fam-b", 489 + .data = &dsi_phy_28nm_hpm_famb_cfgs }, 486 490 { .compatible = "qcom,dsi-phy-28nm-lp", 487 491 .data = &dsi_phy_28nm_lp_cfgs }, 488 492 #endif
+1
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
··· 40 40 }; 41 41 42 42 extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; 43 + extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs; 43 44 extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; 44 45 extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; 45 46 extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
+52 -8
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
··· 39 39 DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); 40 40 } 41 41 42 - static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) 42 + static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy) 43 43 { 44 44 void __iomem *base = phy->reg_base; 45 - 46 - if (!enable) { 47 - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); 48 - return; 49 - } 50 45 51 46 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0); 52 47 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1); ··· 51 56 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9); 52 57 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7); 53 58 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20); 59 + dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00); 60 + } 61 + 62 + static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy) 63 + { 64 + void __iomem *base = phy->reg_base; 65 + 66 + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0); 67 + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); 68 + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7); 69 + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0); 70 + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1); 71 + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1); 72 + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20); 73 + 74 + if (phy->cfg->type == MSM_DSI_PHY_28NM_LP) 75 + dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05); 76 + else 77 + dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d); 78 + } 79 + 80 + static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) 81 + { 82 + if (!enable) { 83 + dsi_phy_write(phy->reg_base + 84 + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); 85 + return; 86 + } 87 + 88 + if (phy->regulator_ldo_mode) 89 + dsi_28nm_phy_regulator_enable_ldo(phy); 90 + else 91 + dsi_28nm_phy_regulator_enable_dcdc(phy); 54 92 } 55 93 56 94 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, ··· 104 76 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff); 105 77 106 78 dsi_28nm_phy_regulator_ctrl(phy, true); 107 - 108 - dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00); 109 79 110 80 dsi_28nm_dphy_set_timing(phy, timing); 111 81 ··· 165 139 .init = msm_dsi_phy_init_common, 166 140 }, 167 141 .io_start = { 0xfd922b00, 0xfd923100 }, 142 + .num_dsi_phy = 2, 143 + }; 144 + 145 + const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = { 146 + .type = MSM_DSI_PHY_28NM_HPM, 147 + .src_pll_truthtable = { {true, true}, {false, true} }, 148 + .reg_cfg = { 149 + .num = 1, 150 + .regs = { 151 + {"vddio", 100000, 100}, 152 + }, 153 + }, 154 + .ops = { 155 + .enable = dsi_28nm_phy_enable, 156 + .disable = dsi_28nm_phy_disable, 157 + .init = msm_dsi_phy_init_common, 158 + }, 159 + .io_start = { 0x1a94400, 0x1a96400 }, 168 160 .num_dsi_phy = 2, 169 161 }; 170 162
+6 -2
drivers/gpu/drm/msm/hdmi/hdmi_phy.c
··· 29 29 reg = devm_regulator_get(dev, cfg->reg_names[i]); 30 30 if (IS_ERR(reg)) { 31 31 ret = PTR_ERR(reg); 32 - DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n", 33 - cfg->reg_names[i], ret); 32 + if (ret != -EPROBE_DEFER) { 33 + DRM_DEV_ERROR(dev, 34 + "failed to get phy regulator: %s (%d)\n", 35 + cfg->reg_names[i], ret); 36 + } 37 + 34 38 return ret; 35 39 } 36 40
+3 -3
drivers/gpu/drm/msm/msm_gpu.c
··· 16 16 #include <linux/pm_opp.h> 17 17 #include <linux/devfreq.h> 18 18 #include <linux/devcoredump.h> 19 + #include <linux/sched/task.h> 19 20 20 21 /* 21 22 * Power Management: ··· 839 838 return ERR_CAST(aspace); 840 839 } 841 840 842 - ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0); 841 + ret = aspace->mmu->funcs->attach(aspace->mmu); 843 842 if (ret) { 844 843 msm_gem_address_space_put(aspace); 845 844 return ERR_PTR(ret); ··· 996 995 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false); 997 996 998 997 if (!IS_ERR_OR_NULL(gpu->aspace)) { 999 - gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, 1000 - NULL, 0); 998 + gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); 1001 999 msm_gem_address_space_put(gpu->aspace); 1002 1000 } 1003 1001 }
+2 -4
drivers/gpu/drm/msm/msm_gpummu.c
··· 21 21 #define GPUMMU_PAGE_SIZE SZ_4K 22 22 #define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE) 23 23 24 - static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names, 25 - int cnt) 24 + static int msm_gpummu_attach(struct msm_mmu *mmu) 26 25 { 27 26 return 0; 28 27 } 29 28 30 - static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names, 31 - int cnt) 29 + static void msm_gpummu_detach(struct msm_mmu *mmu) 32 30 { 33 31 } 34 32
+2 -4
drivers/gpu/drm/msm/msm_iommu.c
··· 23 23 return 0; 24 24 } 25 25 26 - static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names, 27 - int cnt) 26 + static int msm_iommu_attach(struct msm_mmu *mmu) 28 27 { 29 28 struct msm_iommu *iommu = to_msm_iommu(mmu); 30 29 31 30 return iommu_attach_device(iommu->domain, mmu->dev); 32 31 } 33 32 34 - static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, 35 - int cnt) 33 + static void msm_iommu_detach(struct msm_mmu *mmu) 36 34 { 37 35 struct msm_iommu *iommu = to_msm_iommu(mmu); 38 36
+2 -2
drivers/gpu/drm/msm/msm_mmu.h
··· 10 10 #include <linux/iommu.h> 11 11 12 12 struct msm_mmu_funcs { 13 - int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt); 14 - void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt); 13 + int (*attach)(struct msm_mmu *mmu); 14 + void (*detach)(struct msm_mmu *mmu); 15 15 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 16 16 unsigned len, int prot); 17 17 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
+11 -5
drivers/gpu/drm/msm/msm_rd.c
··· 298 298 299 299 static void snapshot_buf(struct msm_rd_state *rd, 300 300 struct msm_gem_submit *submit, int idx, 301 - uint64_t iova, uint32_t size) 301 + uint64_t iova, uint32_t size, bool full) 302 302 { 303 303 struct msm_gem_object *obj = submit->bos[idx].obj; 304 304 unsigned offset = 0; ··· 317 317 */ 318 318 rd_write_section(rd, RD_GPUADDR, 319 319 (uint32_t[3]){ iova, size, iova >> 32 }, 12); 320 + 321 + if (!full) 322 + return; 320 323 321 324 /* But only dump the contents of buffers marked READ */ 322 325 if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ)) ··· 384 381 rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); 385 382 386 383 for (i = 0; i < submit->nr_bos; i++) 387 - if (should_dump(submit, i)) 388 - snapshot_buf(rd, submit, i, 0, 0); 384 + snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i)); 389 385 390 386 for (i = 0; i < submit->nr_cmds; i++) { 391 - uint64_t iova = submit->cmd[i].iova; 392 387 uint32_t szd = submit->cmd[i].size; /* in dwords */ 393 388 394 389 /* snapshot cmdstream bo's (if we haven't already): */ 395 390 if (!should_dump(submit, i)) { 396 391 snapshot_buf(rd, submit, submit->cmd[i].idx, 397 - submit->cmd[i].iova, szd * 4); 392 + submit->cmd[i].iova, szd * 4, true); 398 393 } 394 + } 395 + 396 + for (i = 0; i < submit->nr_cmds; i++) { 397 + uint64_t iova = submit->cmd[i].iova; 398 + uint32_t szd = submit->cmd[i].size; /* in dwords */ 399 399 400 400 switch (submit->cmd[i].type) { 401 401 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+10
drivers/soc/qcom/Kconfig
··· 74 74 tristate 75 75 select QCOM_SCM 76 76 77 + config QCOM_OCMEM 78 + tristate "Qualcomm On Chip Memory (OCMEM) driver" 79 + depends on ARCH_QCOM 80 + select QCOM_SCM 81 + help 82 + The On Chip Memory (OCMEM) allocator allows various clients to 83 + allocate memory from OCMEM based on performance, latency and power 84 + requirements. This is typically used by the GPU, camera/video, and 85 + audio components on some Snapdragon SoCs. 86 + 77 87 config QCOM_PM 78 88 bool "Qualcomm Power Management" 79 89 depends on ARCH_QCOM && !ARM64
+1
drivers/soc/qcom/Makefile
··· 6 6 obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o 7 7 obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o 8 8 obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o 9 + obj-$(CONFIG_QCOM_OCMEM) += ocmem.o 9 10 obj-$(CONFIG_QCOM_PM) += spm.o 10 11 obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o 11 12 qmi_helpers-y += qmi_encdec.o qmi_interface.o
+433
drivers/soc/qcom/ocmem.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * The On Chip Memory (OCMEM) allocator allows various clients to allocate 4 + * memory from OCMEM based on performance, latency and power requirements. 5 + * This is typically used by the GPU, camera/video, and audio components on 6 + * some Snapdragon SoCs. 7 + * 8 + * Copyright (C) 2019 Brian Masney <masneyb@onstation.org> 9 + * Copyright (C) 2015 Red Hat. Author: Rob Clark <robdclark@gmail.com> 10 + */ 11 + 12 + #include <linux/bitfield.h> 13 + #include <linux/clk.h> 14 + #include <linux/io.h> 15 + #include <linux/kernel.h> 16 + #include <linux/module.h> 17 + #include <linux/of_device.h> 18 + #include <linux/platform_device.h> 19 + #include <linux/qcom_scm.h> 20 + #include <linux/sizes.h> 21 + #include <linux/slab.h> 22 + #include <linux/types.h> 23 + #include <soc/qcom/ocmem.h> 24 + 25 + enum region_mode { 26 + WIDE_MODE = 0x0, 27 + THIN_MODE, 28 + MODE_DEFAULT = WIDE_MODE, 29 + }; 30 + 31 + enum ocmem_macro_state { 32 + PASSTHROUGH = 0, 33 + PERI_ON = 1, 34 + CORE_ON = 2, 35 + CLK_OFF = 4, 36 + }; 37 + 38 + struct ocmem_region { 39 + bool interleaved; 40 + enum region_mode mode; 41 + unsigned int num_macros; 42 + enum ocmem_macro_state macro_state[4]; 43 + unsigned long macro_size; 44 + unsigned long region_size; 45 + }; 46 + 47 + struct ocmem_config { 48 + uint8_t num_regions; 49 + unsigned long macro_size; 50 + }; 51 + 52 + struct ocmem { 53 + struct device *dev; 54 + const struct ocmem_config *config; 55 + struct resource *memory; 56 + void __iomem *mmio; 57 + unsigned int num_ports; 58 + unsigned int num_macros; 59 + bool interleaved; 60 + struct ocmem_region *regions; 61 + unsigned long active_allocations; 62 + }; 63 + 64 + #define OCMEM_MIN_ALIGN SZ_64K 65 + #define OCMEM_MIN_ALLOC SZ_64K 66 + 67 + #define OCMEM_REG_HW_VERSION 0x00000000 68 + #define OCMEM_REG_HW_PROFILE 0x00000004 69 + 70 + #define OCMEM_REG_REGION_MODE_CTL 0x00001000 71 + #define OCMEM_REGION_MODE_CTL_REG0_THIN 0x00000001 72 + #define OCMEM_REGION_MODE_CTL_REG1_THIN 0x00000002 73 + #define OCMEM_REGION_MODE_CTL_REG2_THIN 0x00000004 74 + #define OCMEM_REGION_MODE_CTL_REG3_THIN 0x00000008 75 + 76 + #define OCMEM_REG_GFX_MPU_START 0x00001004 77 + #define OCMEM_REG_GFX_MPU_END 0x00001008 78 + 79 + #define OCMEM_HW_PROFILE_NUM_PORTS(val) FIELD_PREP(0x0000000f, (val)) 80 + #define OCMEM_HW_PROFILE_NUM_MACROS(val) FIELD_PREP(0x00003f00, (val)) 81 + 82 + #define OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE 0x00010000 83 + #define OCMEM_HW_PROFILE_INTERLEAVING 0x00020000 84 + #define OCMEM_REG_GEN_STATUS 0x0000000c 85 + 86 + #define OCMEM_REG_PSGSC_STATUS 0x00000038 87 + #define OCMEM_REG_PSGSC_CTL(i0) (0x0000003c + 0x1*(i0)) 88 + 89 + #define OCMEM_PSGSC_CTL_MACRO0_MODE(val) FIELD_PREP(0x00000007, (val)) 90 + #define OCMEM_PSGSC_CTL_MACRO1_MODE(val) FIELD_PREP(0x00000070, (val)) 91 + #define OCMEM_PSGSC_CTL_MACRO2_MODE(val) FIELD_PREP(0x00000700, (val)) 92 + #define OCMEM_PSGSC_CTL_MACRO3_MODE(val) FIELD_PREP(0x00007000, (val)) 93 + 94 + #define OCMEM_CLK_CORE_IDX 0 95 + static struct clk_bulk_data ocmem_clks[] = { 96 + { 97 + .id = "core", 98 + }, 99 + { 100 + .id = "iface", 101 + }, 102 + }; 103 + 104 + static inline void ocmem_write(struct ocmem *ocmem, u32 reg, u32 data) 105 + { 106 + writel(data, ocmem->mmio + reg); 107 + } 108 + 109 + static inline u32 ocmem_read(struct ocmem *ocmem, u32 reg) 110 + { 111 + return readl(ocmem->mmio + reg); 112 + } 113 + 114 + static void update_ocmem(struct ocmem *ocmem) 115 + { 116 + uint32_t region_mode_ctrl = 0x0; 117 + int i; 118 + 119 + if (!qcom_scm_ocmem_lock_available()) { 120 + for (i = 0; i < ocmem->config->num_regions; i++) { 121 + struct ocmem_region *region = &ocmem->regions[i]; 122 + 123 + if (region->mode == THIN_MODE) 124 + region_mode_ctrl |= BIT(i); 125 + } 126 + 127 + dev_dbg(ocmem->dev, "ocmem_region_mode_control %x\n", 128 + region_mode_ctrl); 129 + ocmem_write(ocmem, OCMEM_REG_REGION_MODE_CTL, region_mode_ctrl); 130 + } 131 + 132 + for (i = 0; i < ocmem->config->num_regions; i++) { 133 + struct ocmem_region *region = &ocmem->regions[i]; 134 + u32 data; 135 + 136 + data = OCMEM_PSGSC_CTL_MACRO0_MODE(region->macro_state[0]) | 137 + OCMEM_PSGSC_CTL_MACRO1_MODE(region->macro_state[1]) | 138 + OCMEM_PSGSC_CTL_MACRO2_MODE(region->macro_state[2]) | 139 + OCMEM_PSGSC_CTL_MACRO3_MODE(region->macro_state[3]); 140 + 141 + ocmem_write(ocmem, OCMEM_REG_PSGSC_CTL(i), data); 142 + } 143 + } 144 + 145 + static unsigned long phys_to_offset(struct ocmem *ocmem, 146 + unsigned long addr) 147 + { 148 + if (addr < ocmem->memory->start || addr >= ocmem->memory->end) 149 + return 0; 150 + 151 + return addr - ocmem->memory->start; 152 + } 153 + 154 + static unsigned long device_address(struct ocmem *ocmem, 155 + enum ocmem_client client, 156 + unsigned long addr) 157 + { 158 + WARN_ON(client != OCMEM_GRAPHICS); 159 + 160 + /* TODO: gpu uses phys_to_offset, but others do not.. */ 161 + return phys_to_offset(ocmem, addr); 162 + } 163 + 164 + static void update_range(struct ocmem *ocmem, struct ocmem_buf *buf, 165 + enum ocmem_macro_state mstate, enum region_mode rmode) 166 + { 167 + unsigned long offset = 0; 168 + int i, j; 169 + 170 + for (i = 0; i < ocmem->config->num_regions; i++) { 171 + struct ocmem_region *region = &ocmem->regions[i]; 172 + 173 + if (buf->offset <= offset && offset < buf->offset + buf->len) 174 + region->mode = rmode; 175 + 176 + for (j = 0; j < region->num_macros; j++) { 177 + if (buf->offset <= offset && 178 + offset < buf->offset + buf->len) 179 + region->macro_state[j] = mstate; 180 + 181 + offset += region->macro_size; 182 + } 183 + } 184 + 185 + update_ocmem(ocmem); 186 + } 187 + 188 + struct ocmem *of_get_ocmem(struct device *dev) 189 + { 190 + struct platform_device *pdev; 191 + struct device_node *devnode; 192 + 193 + devnode = of_parse_phandle(dev->of_node, "sram", 0); 194 + if (!devnode || !devnode->parent) { 195 + dev_err(dev, "Cannot look up sram phandle\n"); 196 + return ERR_PTR(-ENODEV); 197 + } 198 + 199 + pdev = of_find_device_by_node(devnode->parent); 200 + if (!pdev) { 201 + dev_err(dev, "Cannot find device node %s\n", devnode->name); 202 + return ERR_PTR(-EPROBE_DEFER); 203 + } 204 + 205 + return platform_get_drvdata(pdev); 206 + } 207 + EXPORT_SYMBOL(of_get_ocmem); 208 + 209 + struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, 210 + unsigned long size) 211 + { 212 + struct ocmem_buf *buf; 213 + int ret; 214 + 215 + /* TODO: add support for other clients... */ 216 + if (WARN_ON(client != OCMEM_GRAPHICS)) 217 + return ERR_PTR(-ENODEV); 218 + 219 + if (size < OCMEM_MIN_ALLOC || !IS_ALIGNED(size, OCMEM_MIN_ALIGN)) 220 + return ERR_PTR(-EINVAL); 221 + 222 + if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations)) 223 + return ERR_PTR(-EBUSY); 224 + 225 + buf = kzalloc(sizeof(*buf), GFP_KERNEL); 226 + if (!buf) { 227 + ret = -ENOMEM; 228 + goto err_unlock; 229 + } 230 + 231 + buf->offset = 0; 232 + buf->addr = device_address(ocmem, client, buf->offset); 233 + buf->len = size; 234 + 235 + update_range(ocmem, buf, CORE_ON, WIDE_MODE); 236 + 237 + if (qcom_scm_ocmem_lock_available()) { 238 + ret = qcom_scm_ocmem_lock(QCOM_SCM_OCMEM_GRAPHICS_ID, 239 + buf->offset, buf->len, WIDE_MODE); 240 + if (ret) { 241 + dev_err(ocmem->dev, "could not lock: %d\n", ret); 242 + ret = -EINVAL; 243 + goto err_kfree; 244 + } 245 + } else { 246 + ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset); 247 + ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END, 248 + buf->offset + buf->len); 249 + } 250 + 251 + dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n", 252 + size / 1024, buf->addr, client); 253 + 254 + return buf; 255 + 256 + err_kfree: 257 + kfree(buf); 258 + err_unlock: 259 + clear_bit_unlock(BIT(client), &ocmem->active_allocations); 260 + 261 + return ERR_PTR(ret); 262 + } 263 + EXPORT_SYMBOL(ocmem_allocate); 264 + 265 + void ocmem_free(struct ocmem *ocmem, enum ocmem_client client, 266 + struct ocmem_buf *buf) 267 + { 268 + /* TODO: add support for other clients... */ 269 + if (WARN_ON(client != OCMEM_GRAPHICS)) 270 + return; 271 + 272 + update_range(ocmem, buf, CLK_OFF, MODE_DEFAULT); 273 + 274 + if (qcom_scm_ocmem_lock_available()) { 275 + int ret; 276 + 277 + ret = qcom_scm_ocmem_unlock(QCOM_SCM_OCMEM_GRAPHICS_ID, 278 + buf->offset, buf->len); 279 + if (ret) 280 + dev_err(ocmem->dev, "could not unlock: %d\n", ret); 281 + } else { 282 + ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, 0x0); 283 + ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END, 0x0); 284 + } 285 + 286 + kfree(buf); 287 + 288 + clear_bit_unlock(BIT(client), &ocmem->active_allocations); 289 + } 290 + EXPORT_SYMBOL(ocmem_free); 291 + 292 + static int ocmem_dev_probe(struct platform_device *pdev) 293 + { 294 + struct device *dev = &pdev->dev; 295 + unsigned long reg, region_size; 296 + int i, j, ret, num_banks; 297 + struct resource *res; 298 + struct ocmem *ocmem; 299 + 300 + if (!qcom_scm_is_available()) 301 + return -EPROBE_DEFER; 302 + 303 + ocmem = devm_kzalloc(dev, sizeof(*ocmem), GFP_KERNEL); 304 + if (!ocmem) 305 + return -ENOMEM; 306 + 307 + ocmem->dev = dev; 308 + ocmem->config = device_get_match_data(dev); 309 + 310 + ret = devm_clk_bulk_get(dev, ARRAY_SIZE(ocmem_clks), ocmem_clks); 311 + if (ret) { 312 + if (ret != -EPROBE_DEFER) 313 + dev_err(dev, "Unable to get clocks\n"); 314 + 315 + return ret; 316 + } 317 + 318 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); 319 + ocmem->mmio = devm_ioremap_resource(&pdev->dev, res); 320 + if (IS_ERR(ocmem->mmio)) { 321 + dev_err(&pdev->dev, "Failed to ioremap ocmem_ctrl resource\n"); 322 + return PTR_ERR(ocmem->mmio); 323 + } 324 + 325 + ocmem->memory = platform_get_resource_byname(pdev, IORESOURCE_MEM, 326 + "mem"); 327 + if (!ocmem->memory) { 328 + dev_err(dev, "Could not get mem region\n"); 329 + return -ENXIO; 330 + } 331 + 332 + /* The core clock is synchronous with graphics */ 333 + WARN_ON(clk_set_rate(ocmem_clks[OCMEM_CLK_CORE_IDX].clk, 1000) < 0); 334 + 335 + ret = clk_bulk_prepare_enable(ARRAY_SIZE(ocmem_clks), ocmem_clks); 336 + if (ret) { 337 + dev_info(ocmem->dev, "Failed to enable clocks\n"); 338 + return ret; 339 + } 340 + 341 + if (qcom_scm_restore_sec_cfg_available()) { 342 + dev_dbg(dev, "configuring scm\n"); 343 + ret = qcom_scm_restore_sec_cfg(QCOM_SCM_OCMEM_DEV_ID, 0); 344 + if (ret) { 345 + dev_err(dev, "Could not enable secure configuration\n"); 346 + goto err_clk_disable; 347 + } 348 + } 349 + 350 + reg = ocmem_read(ocmem, OCMEM_REG_HW_PROFILE); 351 + ocmem->num_ports = OCMEM_HW_PROFILE_NUM_PORTS(reg); 352 + ocmem->num_macros = OCMEM_HW_PROFILE_NUM_MACROS(reg); 353 + ocmem->interleaved = !!(reg & OCMEM_HW_PROFILE_INTERLEAVING); 354 + 355 + num_banks = ocmem->num_ports / 2; 356 + region_size = ocmem->config->macro_size * num_banks; 357 + 358 + dev_info(dev, "%u ports, %u regions, %u macros, %sinterleaved\n", 359 + ocmem->num_ports, ocmem->config->num_regions, 360 + ocmem->num_macros, ocmem->interleaved ? "" : "not "); 361 + 362 + ocmem->regions = devm_kcalloc(dev, ocmem->config->num_regions, 363 + sizeof(struct ocmem_region), GFP_KERNEL); 364 + if (!ocmem->regions) { 365 + ret = -ENOMEM; 366 + goto err_clk_disable; 367 + } 368 + 369 + for (i = 0; i < ocmem->config->num_regions; i++) { 370 + struct ocmem_region *region = &ocmem->regions[i]; 371 + 372 + if (WARN_ON(num_banks > ARRAY_SIZE(region->macro_state))) { 373 + ret = -EINVAL; 374 + goto err_clk_disable; 375 + } 376 + 377 + region->mode = MODE_DEFAULT; 378 + region->num_macros = num_banks; 379 + 380 + if (i == (ocmem->config->num_regions - 1) && 381 + reg & OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE) { 382 + region->macro_size = ocmem->config->macro_size / 2; 383 + region->region_size = region_size / 2; 384 + } else { 385 + region->macro_size = ocmem->config->macro_size; 386 + region->region_size = region_size; 387 + } 388 + 389 + for (j = 0; j < ARRAY_SIZE(region->macro_state); j++) 390 + region->macro_state[j] = CLK_OFF; 391 + } 392 + 393 + platform_set_drvdata(pdev, ocmem); 394 + 395 + return 0; 396 + 397 + err_clk_disable: 398 + clk_bulk_disable_unprepare(ARRAY_SIZE(ocmem_clks), ocmem_clks); 399 + return ret; 400 + } 401 + 402 + static int ocmem_dev_remove(struct platform_device *pdev) 403 + { 404 + clk_bulk_disable_unprepare(ARRAY_SIZE(ocmem_clks), ocmem_clks); 405 + 406 + return 0; 407 + } 408 + 409 + static const struct ocmem_config ocmem_8974_config = { 410 + .num_regions = 3, 411 + .macro_size = SZ_128K, 412 + }; 413 + 414 + static const struct of_device_id ocmem_of_match[] = { 415 + { .compatible = "qcom,msm8974-ocmem", .data = &ocmem_8974_config }, 416 + { } 417 + }; 418 + 419 + MODULE_DEVICE_TABLE(of, ocmem_of_match); 420 + 421 + static struct platform_driver ocmem_driver = { 422 + .probe = ocmem_dev_probe, 423 + .remove = ocmem_dev_remove, 424 + .driver = { 425 + .name = "ocmem", 426 + .of_match_table = ocmem_of_match, 427 + }, 428 + }; 429 + 430 + module_platform_driver(ocmem_driver); 431 + 432 + MODULE_DESCRIPTION("On Chip Memory (OCMEM) allocator for some Snapdragon SoCs"); 433 + MODULE_LICENSE("GPL v2");
+26
include/linux/qcom_scm.h
··· 24 24 int perm; 25 25 }; 26 26 27 + enum qcom_scm_ocmem_client { 28 + QCOM_SCM_OCMEM_UNUSED_ID = 0x0, 29 + QCOM_SCM_OCMEM_GRAPHICS_ID, 30 + QCOM_SCM_OCMEM_VIDEO_ID, 31 + QCOM_SCM_OCMEM_LP_AUDIO_ID, 32 + QCOM_SCM_OCMEM_SENSORS_ID, 33 + QCOM_SCM_OCMEM_OTHER_OS_ID, 34 + QCOM_SCM_OCMEM_DEBUG_ID, 35 + }; 36 + 37 + enum qcom_scm_sec_dev_id { 38 + QCOM_SCM_MDSS_DEV_ID = 1, 39 + QCOM_SCM_OCMEM_DEV_ID = 5, 40 + QCOM_SCM_PCIE0_DEV_ID = 11, 41 + QCOM_SCM_PCIE1_DEV_ID = 12, 42 + QCOM_SCM_GFX_DEV_ID = 18, 43 + QCOM_SCM_UFS_DEV_ID = 19, 44 + QCOM_SCM_ICE_DEV_ID = 20, 45 + }; 46 + 27 47 #define QCOM_SCM_VMID_HLOS 0x3 28 48 #define QCOM_SCM_VMID_MSS_MSA 0xF 29 49 #define QCOM_SCM_VMID_WLAN 0x18 ··· 61 41 extern bool qcom_scm_hdcp_available(void); 62 42 extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, 63 43 u32 *resp); 44 + extern bool qcom_scm_ocmem_lock_available(void); 45 + extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, 46 + u32 size, u32 mode); 47 + extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, 48 + u32 size); 64 49 extern bool qcom_scm_pas_supported(u32 peripheral); 65 50 extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, 66 51 size_t size); ··· 80 55 extern void qcom_scm_cpu_power_down(u32 flags); 81 56 extern u32 qcom_scm_get_version(void); 82 57 extern int qcom_scm_set_remote_state(u32 state, u32 id); 58 + extern bool qcom_scm_restore_sec_cfg_available(void); 83 59 extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); 84 60 extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); 85 61 extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+65
include/soc/qcom/ocmem.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * The On Chip Memory (OCMEM) allocator allows various clients to allocate 4 + * memory from OCMEM based on performance, latency and power requirements. 5 + * This is typically used by the GPU, camera/video, and audio components on 6 + * some Snapdragon SoCs. 7 + * 8 + * Copyright (C) 2019 Brian Masney <masneyb@onstation.org> 9 + * Copyright (C) 2015 Red Hat. Author: Rob Clark <robdclark@gmail.com> 10 + */ 11 + 12 + #include <linux/device.h> 13 + #include <linux/err.h> 14 + 15 + #ifndef __OCMEM_H__ 16 + #define __OCMEM_H__ 17 + 18 + enum ocmem_client { 19 + /* GMEM clients */ 20 + OCMEM_GRAPHICS = 0x0, 21 + /* 22 + * TODO add more once ocmem_allocate() is clever enough to 23 + * deal with multiple clients. 24 + */ 25 + OCMEM_CLIENT_MAX, 26 + }; 27 + 28 + struct ocmem; 29 + 30 + struct ocmem_buf { 31 + unsigned long offset; 32 + unsigned long addr; 33 + unsigned long len; 34 + }; 35 + 36 + #if IS_ENABLED(CONFIG_QCOM_OCMEM) 37 + 38 + struct ocmem *of_get_ocmem(struct device *dev); 39 + struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, 40 + unsigned long size); 41 + void ocmem_free(struct ocmem *ocmem, enum ocmem_client client, 42 + struct ocmem_buf *buf); 43 + 44 + #else /* IS_ENABLED(CONFIG_QCOM_OCMEM) */ 45 + 46 + static inline struct ocmem *of_get_ocmem(struct device *dev) 47 + { 48 + return ERR_PTR(-ENODEV); 49 + } 50 + 51 + static inline struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, 52 + enum ocmem_client client, 53 + unsigned long size) 54 + { 55 + return ERR_PTR(-ENODEV); 56 + } 57 + 58 + static inline void ocmem_free(struct ocmem *ocmem, enum ocmem_client client, 59 + struct ocmem_buf *buf) 60 + { 61 + } 62 + 63 + #endif /* IS_ENABLED(CONFIG_QCOM_OCMEM) */ 64 + 65 + #endif /* __OCMEM_H__ */