Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-msm-next-2020-01-14' of https://gitlab.freedesktop.org/drm/msm into drm-next

+ sc7180 display + DSI support
+ a618 (sc7180) support
+ more UBWC (bandwidth compression) support
+ various cleanups to handle devices that use vs don't
use zap fw, etc
+ usual random cleanups and fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGvv03ifuP0tp7-dmqZtr1iS=s8Vc=az8BNGtEoSMD-dkw@mail.gmail.com

+1056 -435
+2 -2
Documentation/devicetree/bindings/display/msm/dpu.txt
··· 8 8 9 9 MDSS: 10 10 Required properties: 11 - - compatible: "qcom,sdm845-mdss" 11 + - compatible: "qcom,sdm845-mdss", "qcom,sc7180-mdss" 12 12 - reg: physical base address and length of contoller's registers. 13 13 - reg-names: register region names. The following region is required: 14 14 * "mdss" ··· 41 41 42 42 MDP: 43 43 Required properties: 44 - - compatible: "qcom,sdm845-dpu" 44 + - compatible: "qcom,sdm845-dpu", "qcom,sc7180-dpu" 45 45 - reg: physical base address and length of controller's registers. 46 46 - reg-names : register region names. The following region is required: 47 47 * "mdp"
+8 -1
Documentation/devicetree/bindings/display/msm/gpu.txt
··· 23 23 - iommus: optional phandle to an adreno iommu instance 24 24 - operating-points-v2: optional phandle to the OPP operating points 25 25 - interconnects: optional phandle to an interconnect provider. See 26 - ../interconnect/interconnect.txt for details. 26 + ../interconnect/interconnect.txt for details. Some A3xx and all A4xx platforms 27 + will have two paths; all others will have one path. 28 + - interconnect-names: The names of the interconnect paths that correspond to the 29 + interconnects property. Values must be gfx-mem and ocmem. 27 30 - qcom,gmu: For GMU attached devices a phandle to the GMU device that will 28 31 control the power for the GPU. Applicable targets: 29 32 - qcom,adreno-630.2 30 33 - zap-shader: For a5xx and a6xx devices this node contains a memory-region that 31 34 points to reserved memory to store the zap shader that can be used to help 32 35 bring the GPU out of secure mode. 36 + - firmware-name: optional property of the 'zap-shader' node, listing the 37 + relative path of the device specific zap firmware. 33 38 34 39 Example 3xx/4xx/a5xx: 35 40 ··· 81 76 operating-points-v2 = <&gpu_opp_table>; 82 77 83 78 interconnects = <&rsc_hlos MASTER_GFX3D &rsc_hlos SLAVE_EBI1>; 79 + interconnect-names = "gfx-mem"; 84 80 85 81 qcom,gmu = <&gmu>; 86 82 87 83 zap-shader { 88 84 memory-region = <&zap_shader_region>; 85 + firmware-name = "qcom/LENOVO/81JL/qcdxkmsuc850.mbn" 89 86 }; 90 87 }; 91 88 };
+8
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
··· 506 506 goto fail; 507 507 } 508 508 509 + /* 510 + * Set the ICC path to maximum speed for now by multiplying the fastest 511 + * frequency by the bus width (8). We'll want to scale this later on to 512 + * improve battery life. 513 + */ 514 + icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); 515 + icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); 516 + 509 517 return gpu; 510 518 511 519 fail:
+8
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
··· 591 591 goto fail; 592 592 } 593 593 594 + /* 595 + * Set the ICC path to maximum speed for now by multiplying the fastest 596 + * frequency by the bus width (8). We'll want to scale this later on to 597 + * improve battery life. 598 + */ 599 + icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); 600 + icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); 601 + 594 602 return gpu; 595 603 596 604 fail:
+9 -2
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 753 753 gpu->funcs->flush(gpu, gpu->rb[0]); 754 754 if (!a5xx_idle(gpu, gpu->rb[0])) 755 755 return -EINVAL; 756 - } else { 757 - /* Print a warning so if we die, we know why */ 756 + } else if (ret == -ENODEV) { 757 + /* 758 + * This device does not use zap shader (but print a warning 759 + * just in case someone got their dt wrong.. hopefully they 760 + * have a debug UART to realize the error of their ways... 761 + * if you mess this up you are about to crash horribly) 762 + */ 758 763 dev_warn_once(gpu->dev->dev, 759 764 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); 760 765 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0); 766 + } else { 767 + return ret; 761 768 } 762 769 763 770 /* Last step - yield the ringbuffer */
+50 -2
drivers/gpu/drm/msm/adreno/a6xx.xml.h
··· 16 16 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) 17 17 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) 18 18 - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54) 19 - - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54) 19 + - /home/smasetty/playarea/envytools/rnndb/adreno/a6xx.xml ( 161969 bytes, from 2019-11-29 07:18:16) 20 20 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) 21 21 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) 22 22 23 - Copyright (C) 2013-2018 by the following authors: 23 + Copyright (C) 2013-2019 by the following authors: 24 24 - Rob Clark <robdclark@gmail.com> (robclark) 25 25 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 26 26 ··· 2518 2518 #define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119 2519 2519 2520 2520 #define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a 2521 + 2522 + #define REG_A6XX_GBIF_SCACHE_CNTL1 0x00003c02 2523 + 2524 + #define REG_A6XX_GBIF_QSB_SIDE0 0x00003c03 2525 + 2526 + #define REG_A6XX_GBIF_QSB_SIDE1 0x00003c04 2527 + 2528 + #define REG_A6XX_GBIF_QSB_SIDE2 0x00003c05 2529 + 2530 + #define REG_A6XX_GBIF_QSB_SIDE3 0x00003c06 2531 + 2532 + #define REG_A6XX_GBIF_HALT 0x00003c45 2533 + 2534 + #define REG_A6XX_GBIF_HALT_ACK 0x00003c46 2535 + 2536 + #define REG_A6XX_GBIF_PERF_PWR_CNT_EN 0x00003cc0 2537 + 2538 + #define REG_A6XX_GBIF_PERF_CNT_SEL 0x00003cc2 2539 + 2540 + #define REG_A6XX_GBIF_PERF_PWR_CNT_SEL 0x00003cc3 2541 + 2542 + #define REG_A6XX_GBIF_PERF_CNT_LOW0 0x00003cc4 2543 + 2544 + #define REG_A6XX_GBIF_PERF_CNT_LOW1 0x00003cc5 2545 + 2546 + #define REG_A6XX_GBIF_PERF_CNT_LOW2 0x00003cc6 2547 + 2548 + #define REG_A6XX_GBIF_PERF_CNT_LOW3 0x00003cc7 2549 + 2550 + #define REG_A6XX_GBIF_PERF_CNT_HIGH0 0x00003cc8 2551 + 2552 + #define REG_A6XX_GBIF_PERF_CNT_HIGH1 0x00003cc9 2553 + 2554 + #define REG_A6XX_GBIF_PERF_CNT_HIGH2 0x00003cca 2555 + 2556 + #define REG_A6XX_GBIF_PERF_CNT_HIGH3 0x00003ccb 2557 + 2558 + #define REG_A6XX_GBIF_PWR_CNT_LOW0 0x00003ccc 2559 + 2560 + #define REG_A6XX_GBIF_PWR_CNT_LOW1 0x00003ccd 2561 + 2562 + #define REG_A6XX_GBIF_PWR_CNT_LOW2 0x00003cce 2563 + 2564 + #define REG_A6XX_GBIF_PWR_CNT_HIGH0 0x00003ccf 2565 + 2566 + #define REG_A6XX_GBIF_PWR_CNT_HIGH1 0x00003cd0 2567 + 2568 + #define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1 2521 2569 2522 2570 #define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4 2523 2571 #define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE 0x80000000
+26 -6
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ 2 + /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ 3 3 4 4 #include <linux/clk.h> 5 5 #include <linux/interconnect.h> ··· 148 148 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) 149 149 if (freq == gmu->gpu_freqs[perf_index]) 150 150 break; 151 + 152 + gmu->current_perf_index = perf_index; 151 153 152 154 __a6xx_gmu_set_freq(gmu, perf_index); 153 155 } ··· 435 433 436 434 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) 437 435 { 436 + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 437 + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 438 438 struct platform_device *pdev = to_platform_device(gmu->dev); 439 439 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); 440 440 void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); ··· 484 480 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); 485 481 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); 486 482 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); 483 + 487 484 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); 488 - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); 485 + if (adreno_is_a618(adreno_gpu)) 486 + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090); 487 + else 488 + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); 489 489 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); 490 + 490 491 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); 491 492 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); 492 493 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); 493 494 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); 494 495 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); 495 496 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); 497 + 496 498 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); 497 499 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); 498 - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); 500 + if (adreno_is_a618(adreno_gpu)) 501 + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); 502 + else 503 + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); 504 + 505 + 499 506 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); 500 - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); 507 + if (adreno_is_a618(adreno_gpu)) 508 + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090); 509 + else 510 + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); 501 511 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); 502 512 503 513 /* Setup GPU PDC */ ··· 759 741 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); 760 742 enable_irq(gmu->hfi_irq); 761 743 762 - /* Set the GPU to the highest power frequency */ 763 - __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); 744 + /* Set the GPU to the current freq */ 745 + __a6xx_gmu_set_freq(gmu, gmu->current_perf_index); 764 746 765 747 /* 766 748 * "enable" the GX power domain which won't actually do anything but it ··· 1183 1165 */ 1184 1166 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, 1185 1167 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); 1168 + 1169 + gmu->current_perf_index = gmu->nr_gpu_freqs - 1; 1186 1170 1187 1171 /* Build the list of RPMh votes that we'll send to the GMU */ 1188 1172 return a6xx_gmu_rpmh_votes_init(gmu);
+3
drivers/gpu/drm/msm/adreno/a6xx_gmu.h
··· 63 63 struct clk_bulk_data *clocks; 64 64 struct clk *core_clk; 65 65 66 + /* current performance index set externally */ 67 + int current_perf_index; 68 + 66 69 int nr_gpu_freqs; 67 70 unsigned long gpu_freqs[16]; 68 71 u32 gx_arc_votes[16];
+74 -7
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ 2 + /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ 3 3 4 4 5 5 #include "msm_gem.h" ··· 378 378 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 379 379 int ret; 380 380 381 + /* 382 + * During a previous slumber, GBIF halt is asserted to ensure 383 + * no further transaction can go through GPU before GPU 384 + * headswitch is turned off. 385 + * 386 + * This halt is deasserted once headswitch goes off but 387 + * incase headswitch doesn't goes off clear GBIF halt 388 + * here to ensure GPU wake-up doesn't fail because of 389 + * halted GPU transactions. 390 + */ 391 + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); 392 + 381 393 /* Make sure the GMU keeps the GPU on while we set it up */ 382 394 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); 383 395 ··· 418 406 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1); 419 407 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); 420 408 421 - /* enable hardware clockgating */ 422 - a6xx_set_hwcg(gpu, true); 409 + /* 410 + * enable hardware clockgating 411 + * For now enable clock gating only for a630 412 + */ 413 + if (adreno_is_a630(adreno_gpu)) 414 + a6xx_set_hwcg(gpu, true); 423 415 424 - /* VBIF start */ 425 - gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); 416 + /* VBIF/GBIF start*/ 426 417 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); 418 + if (adreno_is_a630(adreno_gpu)) 419 + gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); 427 420 428 421 /* Make all blocks contribute to the GPU BUSY perf counter */ 429 422 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff); ··· 554 537 a6xx_flush(gpu, gpu->rb[0]); 555 538 if (!a6xx_idle(gpu, gpu->rb[0])) 556 539 return -EINVAL; 557 - } else { 558 - /* Print a warning so if we die, we know why */ 540 + } else if (ret == -ENODEV) { 541 + /* 542 + * This device does not use zap shader (but print a warning 543 + * just in case someone got their dt wrong.. hopefully they 544 + * have a debug UART to realize the error of their ways... 545 + * if you mess this up you are about to crash horribly) 546 + */ 559 547 dev_warn_once(gpu->dev->dev, 560 548 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); 561 549 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); 562 550 ret = 0; 551 + } else { 552 + return ret; 563 553 } 564 554 565 555 out: ··· 748 724 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL), 749 725 }; 750 726 727 + #define GBIF_CLIENT_HALT_MASK BIT(0) 728 + #define GBIF_ARB_HALT_MASK BIT(1) 729 + 730 + static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) 731 + { 732 + struct msm_gpu *gpu = &adreno_gpu->base; 733 + 734 + if(!a6xx_has_gbif(adreno_gpu)){ 735 + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); 736 + spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 737 + 0xf) == 0xf); 738 + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); 739 + 740 + return; 741 + } 742 + 743 + /* Halt new client requests on GBIF */ 744 + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); 745 + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 746 + (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); 747 + 748 + /* Halt all AXI requests on GBIF */ 749 + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); 750 + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 751 + (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); 752 + 753 + /* 754 + * GMU needs DDR access in slumber path. Deassert GBIF halt now 755 + * to allow for GMU to access system memory. 756 + */ 757 + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); 758 + } 759 + 751 760 static int a6xx_pm_resume(struct msm_gpu *gpu) 752 761 { 753 762 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); ··· 804 747 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 805 748 806 749 devfreq_suspend_device(gpu->devfreq.devfreq); 750 + 751 + /* 752 + * Make sure the GMU is idle before continuing (because some transitions 753 + * may use VBIF 754 + */ 755 + a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu); 756 + 757 + /* Clear the VBIF pipe before shutting down */ 758 + /* FIXME: This accesses the GPU - do we need to make sure it is on? */ 759 + a6xx_bus_clear_pending_transactions(adreno_gpu); 807 760 808 761 return a6xx_gmu_stop(a6xx_gpu); 809 762 }
+8 -1
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* Copyright (c) 2017 The Linux Foundation. All rights reserved. */ 2 + /* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */ 3 3 4 4 #ifndef __A6XX_GPU_H__ 5 5 #define __A6XX_GPU_H__ ··· 42 42 #define A6XX_PROTECT_RDONLY(_reg, _len) \ 43 43 ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF)) 44 44 45 + static inline bool a6xx_has_gbif(struct adreno_gpu *gpu) 46 + { 47 + if(adreno_is_a630(gpu)) 48 + return false; 49 + 50 + return true; 51 + } 45 52 46 53 int a6xx_gmu_resume(struct a6xx_gpu *gpu); 47 54 int a6xx_gmu_stop(struct a6xx_gpu *gpu);
+41 -11
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright (c) 2018 The Linux Foundation. All rights reserved. */ 2 + /* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */ 3 3 4 4 #include <linux/ascii85.h> 5 5 #include "msm_gem.h" ··· 320 320 { 321 321 struct resource *res; 322 322 void __iomem *cxdbg = NULL; 323 + int nr_debugbus_blocks; 323 324 324 325 /* Set up the GX debug bus */ 325 326 ··· 375 374 cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0); 376 375 } 377 376 378 - a6xx_state->debugbus = state_kcalloc(a6xx_state, 379 - ARRAY_SIZE(a6xx_debugbus_blocks), 380 - sizeof(*a6xx_state->debugbus)); 377 + nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) + 378 + (a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0); 379 + 380 + a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks, 381 + sizeof(*a6xx_state->debugbus)); 381 382 382 383 if (a6xx_state->debugbus) { 383 384 int i; ··· 391 388 &a6xx_state->debugbus[i]); 392 389 393 390 a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks); 391 + 392 + /* 393 + * GBIF has same debugbus as of other GPU blocks, fall back to 394 + * default path if GPU uses GBIF, also GBIF uses exactly same 395 + * ID as of VBIF. 396 + */ 397 + if (a6xx_has_gbif(to_adreno_gpu(gpu))) { 398 + a6xx_get_debugbus_block(gpu, a6xx_state, 399 + &a6xx_gbif_debugbus_block, 400 + &a6xx_state->debugbus[i]); 401 + 402 + a6xx_state->nr_debugbus += 1; 403 + } 394 404 } 395 405 396 - a6xx_state->vbif_debugbus = 397 - state_kcalloc(a6xx_state, 1, 398 - sizeof(*a6xx_state->vbif_debugbus)); 406 + /* Dump the VBIF debugbus on applicable targets */ 407 + if (!a6xx_has_gbif(to_adreno_gpu(gpu))) { 408 + a6xx_state->vbif_debugbus = 409 + state_kcalloc(a6xx_state, 1, 410 + sizeof(*a6xx_state->vbif_debugbus)); 399 411 400 - if (a6xx_state->vbif_debugbus) 401 - a6xx_get_vbif_debugbus_block(gpu, a6xx_state, 402 - a6xx_state->vbif_debugbus); 412 + if (a6xx_state->vbif_debugbus) 413 + a6xx_get_vbif_debugbus_block(gpu, a6xx_state, 414 + a6xx_state->vbif_debugbus); 415 + } 403 416 404 417 if (cxdbg) { 405 418 a6xx_state->cx_debugbus = ··· 789 770 &a6xx_state->gmu_registers[1]); 790 771 } 791 772 773 + #define A6XX_GBIF_REGLIST_SIZE 1 792 774 static void a6xx_get_registers(struct msm_gpu *gpu, 793 775 struct a6xx_gpu_state *a6xx_state, 794 776 struct a6xx_crashdumper *dumper) 795 777 { 796 778 int i, count = ARRAY_SIZE(a6xx_ahb_reglist) + 797 779 ARRAY_SIZE(a6xx_reglist) + 798 - ARRAY_SIZE(a6xx_hlsq_reglist); 780 + ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE; 799 781 int index = 0; 782 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 800 783 801 784 a6xx_state->registers = state_kcalloc(a6xx_state, 802 785 count, sizeof(*a6xx_state->registers)); ··· 812 791 a6xx_get_ahb_gpu_registers(gpu, 813 792 a6xx_state, &a6xx_ahb_reglist[i], 814 793 &a6xx_state->registers[index++]); 794 + 795 + if (a6xx_has_gbif(adreno_gpu)) 796 + a6xx_get_ahb_gpu_registers(gpu, 797 + a6xx_state, &a6xx_gbif_reglist, 798 + &a6xx_state->registers[index++]); 799 + else 800 + a6xx_get_ahb_gpu_registers(gpu, 801 + a6xx_state, &a6xx_vbif_reglist, 802 + &a6xx_state->registers[index++]); 815 803 816 804 for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++) 817 805 a6xx_get_crashdumper_registers(gpu,
+14 -2
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright (c) 2018 The Linux Foundation. All rights reserved. */ 2 + /* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */ 3 3 4 4 #ifndef _A6XX_CRASH_DUMP_H_ 5 5 #define _A6XX_CRASH_DUMP_H_ ··· 307 307 0x3410, 0x3410, 0x3800, 0x3801, 308 308 }; 309 309 310 + static const u32 a6xx_gbif_registers[] = { 311 + 0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A, 312 + }; 313 + 310 314 static const struct a6xx_registers a6xx_ahb_reglist[] = { 311 315 REGS(a6xx_ahb_registers, 0, 0), 312 - REGS(a6xx_vbif_registers, 0, 0), 313 316 }; 317 + 318 + static const struct a6xx_registers a6xx_vbif_reglist = 319 + REGS(a6xx_vbif_registers, 0, 0); 320 + 321 + static const struct a6xx_registers a6xx_gbif_reglist = 322 + REGS(a6xx_gbif_registers, 0, 0); 314 323 315 324 static const u32 a6xx_gmu_gx_registers[] = { 316 325 /* GMU GX */ ··· 430 421 DEBUGBUS(A6XX_DBGBUS_TPL1_2, 0x100), 431 422 DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100), 432 423 }; 424 + 425 + static const struct a6xx_debugbus_block a6xx_gbif_debugbus_block = 426 + DEBUGBUS(A6XX_DBGBUS_VBIF, 0x100); 433 427 434 428 static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = { 435 429 DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
+11
drivers/gpu/drm/msm/adreno/adreno_device.c
··· 167 167 .init = a5xx_gpu_init, 168 168 .zapfw = "a540_zap.mdt", 169 169 }, { 170 + .rev = ADRENO_REV(6, 1, 8, ANY_ID), 171 + .revn = 618, 172 + .name = "A618", 173 + .fw = { 174 + [ADRENO_FW_SQE] = "a630_sqe.fw", 175 + [ADRENO_FW_GMU] = "a630_gmu.bin", 176 + }, 177 + .gmem = SZ_512K, 178 + .inactive_period = DRM_MSM_INACTIVE_PERIOD, 179 + .init = a6xx_gpu_init, 180 + }, { 170 181 .rev = ADRENO_REV(6, 3, 0, ANY_ID), 171 182 .revn = 630, 172 183 .name = "A630",
+53 -13
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 26 26 { 27 27 struct device *dev = &gpu->pdev->dev; 28 28 const struct firmware *fw; 29 + const char *signed_fwname = NULL; 29 30 struct device_node *np, *mem_np; 30 31 struct resource r; 31 32 phys_addr_t mem_phys; ··· 59 58 60 59 mem_phys = r.start; 61 60 62 - /* Request the MDT file for the firmware */ 63 - fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); 61 + /* 62 + * Check for a firmware-name property. This is the new scheme 63 + * to handle firmware that may be signed with device specific 64 + * keys, allowing us to have a different zap fw path for different 65 + * devices. 66 + * 67 + * If the firmware-name property is found, we bypass the 68 + * adreno_request_fw() mechanism, because we don't need to handle 69 + * the /lib/firmware/qcom/... vs /lib/firmware/... case. 70 + * 71 + * If the firmware-name property is not found, for backwards 72 + * compatibility we fall back to the fwname from the gpulist 73 + * table. 74 + */ 75 + of_property_read_string_index(np, "firmware-name", 0, &signed_fwname); 76 + if (signed_fwname) { 77 + fwname = signed_fwname; 78 + ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); 79 + if (ret) 80 + fw = ERR_PTR(ret); 81 + } else if (fwname) { 82 + /* Request the MDT file from the default location: */ 83 + fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); 84 + } else { 85 + /* 86 + * For new targets, we require the firmware-name property, 87 + * if a zap-shader is required, rather than falling back 88 + * to a firmware name specified in gpulist. 89 + * 90 + * Because the firmware is signed with a (potentially) 91 + * device specific key, having the name come from gpulist 92 + * was a bad idea, and is only provided for backwards 93 + * compatibility for older targets. 94 + */ 95 + return -ENODEV; 96 + } 97 + 64 98 if (IS_ERR(fw)) { 65 99 DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname); 66 100 return PTR_ERR(fw); ··· 131 95 * not. But since we've already gotten through adreno_request_fw() 132 96 * we know which of the two cases it is: 133 97 */ 134 - if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) { 98 + if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { 135 99 ret = qcom_mdt_load(dev, fw, fwname, pasid, 136 100 mem_region, mem_phys, mem_size, NULL); 137 101 } else { ··· 180 144 if (!qcom_scm_is_available()) { 181 145 DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n"); 182 146 return -EPROBE_DEFER; 183 - } 184 - 185 - /* Each GPU has a target specific zap shader firmware name to use */ 186 - if (!adreno_gpu->info->zapfw) { 187 - zap_available = false; 188 - DRM_DEV_ERROR(&pdev->dev, 189 - "Zap shader firmware file not specified for this target\n"); 190 - return -ENODEV; 191 147 } 192 148 193 149 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); ··· 854 826 855 827 node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels"); 856 828 if (!node) { 857 - DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n"); 829 + DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n"); 858 830 return -ENXIO; 859 831 } 860 832 ··· 915 887 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); 916 888 917 889 /* Check for an interconnect path for the bus */ 918 - gpu->icc_path = of_icc_get(dev, NULL); 890 + gpu->icc_path = of_icc_get(dev, "gfx-mem"); 891 + if (!gpu->icc_path) { 892 + /* 893 + * Keep compatbility with device trees that don't have an 894 + * interconnect-names property. 895 + */ 896 + gpu->icc_path = of_icc_get(dev, NULL); 897 + } 919 898 if (IS_ERR(gpu->icc_path)) 920 899 gpu->icc_path = NULL; 900 + 901 + gpu->ocmem_icc_path = of_icc_get(dev, "ocmem"); 902 + if (IS_ERR(gpu->ocmem_icc_path)) 903 + gpu->ocmem_icc_path = NULL; 921 904 922 905 return 0; 923 906 } ··· 1016 977 release_firmware(adreno_gpu->fw[i]); 1017 978 1018 979 icc_put(gpu->icc_path); 980 + icc_put(gpu->ocmem_icc_path); 1019 981 1020 982 msm_gpu_cleanup(&adreno_gpu->base); 1021 983 }
+12 -5
drivers/gpu/drm/msm/adreno/adreno_gpu.h
··· 3 3 * Copyright (C) 2013 Red Hat 4 4 * Author: Rob Clark <robdclark@gmail.com> 5 5 * 6 - * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved. 6 + * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved. 7 7 */ 8 8 9 9 #ifndef __ADRENO_GPU_H__ ··· 227 227 return gpu->revn == 540; 228 228 } 229 229 230 + static inline int adreno_is_a618(struct adreno_gpu *gpu) 231 + { 232 + return gpu->revn == 618; 233 + } 234 + 235 + static inline int adreno_is_a630(struct adreno_gpu *gpu) 236 + { 237 + return gpu->revn == 630; 238 + } 239 + 230 240 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); 231 241 const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, 232 242 const char *fwname); ··· 340 330 static inline bool adreno_reg_check(struct adreno_gpu *gpu, 341 331 enum adreno_regs offset_name) 342 332 { 343 - if (offset_name >= REG_ADRENO_REGISTER_MAX || 344 - !gpu->reg_offsets[offset_name]) { 345 - BUG(); 346 - } 333 + BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]); 347 334 348 335 /* 349 336 * REG_SKIP is a special value that tell us that the register in
+3 -12
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
··· 197 197 DPU_DEBUG("%s\n", dpu_crtc->name); 198 198 199 199 for (i = 0; i < cstate->num_mixers; i++) { 200 - if (!mixer[i].hw_lm || !mixer[i].lm_ctl) { 201 - DPU_ERROR("invalid lm or ctl assigned to mixer\n"); 202 - return; 203 - } 204 200 mixer[i].mixer_op_mode = 0; 205 201 mixer[i].flush_mask = 0; 206 202 if (mixer[i].lm_ctl->ops.clear_all_blendstages) ··· 1109 1113 1110 1114 for (i = 0; i < cstate->num_mixers; ++i) { 1111 1115 m = &cstate->mixers[i]; 1112 - if (!m->hw_lm) 1113 - seq_printf(s, "\tmixer[%d] has no lm\n", i); 1114 - else if (!m->lm_ctl) 1115 - seq_printf(s, "\tmixer[%d] has no ctl\n", i); 1116 - else 1117 - seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", 1118 - m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, 1119 - out_width, mode->vdisplay); 1116 + seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", 1117 + m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, 1118 + out_width, mode->vdisplay); 1120 1119 } 1121 1120 1122 1121 seq_puts(s, "\n");
+89 -111
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 58 58 59 59 #define IDLE_SHORT_TIMEOUT 1 60 60 61 - #define MAX_VDISPLAY_SPLIT 1080 61 + #define MAX_HDISPLAY_SPLIT 1080 62 62 63 63 /* timeout in frames waiting for frame done */ 64 64 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5 ··· 233 233 u32 irq_status; 234 234 int ret; 235 235 236 - if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) { 236 + if (!wait_info || intr_idx >= INTR_IDX_MAX) { 237 237 DPU_ERROR("invalid params\n"); 238 238 return -EINVAL; 239 239 } ··· 308 308 struct dpu_encoder_irq *irq; 309 309 int ret = 0; 310 310 311 - if (!phys_enc || intr_idx >= INTR_IDX_MAX) { 311 + if (intr_idx >= INTR_IDX_MAX) { 312 312 DPU_ERROR("invalid params\n"); 313 313 return -EINVAL; 314 314 } ··· 363 363 struct dpu_encoder_irq *irq; 364 364 int ret; 365 365 366 - if (!phys_enc) { 367 - DPU_ERROR("invalid encoder\n"); 368 - return -EINVAL; 369 - } 370 366 irq = &phys_enc->irq[intr_idx]; 371 367 372 368 /* silently skip irqs that weren't registered */ ··· 411 415 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 412 416 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 413 417 414 - if (phys && phys->ops.get_hw_resources) 418 + if (phys->ops.get_hw_resources) 415 419 phys->ops.get_hw_resources(phys, hw_res); 416 420 } 417 421 } ··· 434 438 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 435 439 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 436 440 437 - if (phys && phys->ops.destroy) { 441 + if (phys->ops.destroy) { 438 442 phys->ops.destroy(phys); 439 443 --dpu_enc->num_phys_encs; 440 444 dpu_enc->phys_encs[i] = NULL; ··· 460 464 struct dpu_hw_mdp *hw_mdptop; 461 465 struct msm_display_info *disp_info; 462 466 463 - if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) { 467 + if (!phys_enc->hw_mdptop || !phys_enc->parent) { 464 468 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); 465 469 return; 466 470 } ··· 530 534 if (dpu_enc->phys_encs[i]) 531 535 intf_count++; 532 536 533 - /* User split topology for width > 1080 */ 534 - topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1; 537 + /* Datapath topology selection 538 + * 539 + * Dual display 540 + * 2 LM, 2 INTF ( Split display using 2 interfaces) 541 + * 542 + * Single display 543 + * 1 LM, 1 INTF 544 + * 2 LM, 1 INTF (stream merge to support high resolution interfaces) 545 + * 546 + */ 547 + if (intf_count == 2) 548 + topology.num_lm = 2; 549 + else if (!dpu_kms->catalog->caps->has_3d_merge) 550 + topology.num_lm = 1; 551 + else 552 + topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; 553 + 535 554 topology.num_enc = 0; 536 555 topology.num_intf = intf_count; 537 556 ··· 594 583 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 595 584 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 596 585 597 - if (phys && phys->ops.atomic_check) 586 + if (phys->ops.atomic_check) 598 587 ret = phys->ops.atomic_check(phys, crtc_state, 599 588 conn_state); 600 - else if (phys && phys->ops.mode_fixup) 589 + else if (phys->ops.mode_fixup) 601 590 if (!phys->ops.mode_fixup(phys, mode, adj_mode)) 602 591 ret = -EINVAL; 603 592 ··· 693 682 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 694 683 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 695 684 696 - if (phys && phys->ops.irq_control) 685 + if (phys->ops.irq_control) 697 686 phys->ops.irq_control(phys, enable); 698 687 } 699 688 ··· 1043 1032 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1044 1033 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1045 1034 1046 - if (phys) { 1047 - if (!dpu_enc->hw_pp[i]) { 1048 - DPU_ERROR_ENC(dpu_enc, "no pp block assigned" 1049 - "at idx: %d\n", i); 1050 - goto error; 1051 - } 1052 - 1053 - if (!hw_ctl[i]) { 1054 - DPU_ERROR_ENC(dpu_enc, "no ctl block assigned" 1055 - "at idx: %d\n", i); 1056 - goto error; 1057 - } 1058 - 1059 - phys->hw_pp = dpu_enc->hw_pp[i]; 1060 - phys->hw_ctl = hw_ctl[i]; 1061 - 1062 - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, 1063 - DPU_HW_BLK_INTF); 1064 - for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) { 1065 - struct dpu_hw_intf *hw_intf; 1066 - 1067 - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) 1068 - break; 1069 - 1070 - hw_intf = (struct dpu_hw_intf *)hw_iter.hw; 1071 - if (hw_intf->idx == phys->intf_idx) 1072 - phys->hw_intf = hw_intf; 1073 - } 1074 - 1075 - if (!phys->hw_intf) { 1076 - DPU_ERROR_ENC(dpu_enc, 1077 - "no intf block assigned at idx: %d\n", 1078 - i); 1079 - goto error; 1080 - } 1081 - 1082 - phys->connector = conn->state->connector; 1083 - if (phys->ops.mode_set) 1084 - phys->ops.mode_set(phys, mode, adj_mode); 1035 + if (!dpu_enc->hw_pp[i]) { 1036 + DPU_ERROR_ENC(dpu_enc, 1037 + "no pp block assigned at idx: %d\n", i); 1038 + goto error; 1085 1039 } 1040 + 1041 + if (!hw_ctl[i]) { 1042 + DPU_ERROR_ENC(dpu_enc, 1043 + "no ctl block assigned at idx: %d\n", i); 1044 + goto error; 1045 + } 1046 + 1047 + phys->hw_pp = dpu_enc->hw_pp[i]; 1048 + phys->hw_ctl = hw_ctl[i]; 1049 + 1050 + dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, 1051 + DPU_HW_BLK_INTF); 1052 + for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) { 1053 + struct dpu_hw_intf *hw_intf; 1054 + 1055 + if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) 1056 + break; 1057 + 1058 + hw_intf = (struct dpu_hw_intf *)hw_iter.hw; 1059 + if (hw_intf->idx == phys->intf_idx) 1060 + phys->hw_intf = hw_intf; 1061 + } 1062 + 1063 + if (!phys->hw_intf) { 1064 + DPU_ERROR_ENC(dpu_enc, 1065 + "no intf block assigned at idx: %d\n", i); 1066 + goto error; 1067 + } 1068 + 1069 + phys->connector = conn->state->connector; 1070 + if (phys->ops.mode_set) 1071 + phys->ops.mode_set(phys, mode, adj_mode); 1086 1072 } 1087 1073 1088 1074 dpu_enc->mode_set_complete = true; ··· 1211 1203 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1212 1204 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1213 1205 1214 - if (phys && phys->ops.disable) 1206 + if (phys->ops.disable) 1215 1207 phys->ops.disable(phys); 1216 1208 } 1217 1209 ··· 1224 1216 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); 1225 1217 1226 1218 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1227 - if (dpu_enc->phys_encs[i]) 1228 - dpu_enc->phys_encs[i]->connector = NULL; 1219 + dpu_enc->phys_encs[i]->connector = NULL; 1229 1220 } 1230 1221 1231 1222 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); ··· 1314 1307 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1315 1308 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1316 1309 1317 - if (phys && phys->ops.control_vblank_irq) 1310 + if (phys->ops.control_vblank_irq) 1318 1311 phys->ops.control_vblank_irq(phys, enable); 1319 1312 } 1320 1313 } ··· 1426 1419 } 1427 1420 1428 1421 ctl = phys->hw_ctl; 1429 - if (!ctl || !ctl->ops.trigger_flush) { 1422 + if (!ctl->ops.trigger_flush) { 1430 1423 DPU_ERROR("missing trigger cb\n"); 1431 1424 return; 1432 1425 } ··· 1470 1463 { 1471 1464 struct dpu_hw_ctl *ctl; 1472 1465 1473 - if (!phys_enc) { 1474 - DPU_ERROR("invalid encoder\n"); 1475 - return; 1476 - } 1477 - 1478 1466 ctl = phys_enc->hw_ctl; 1479 - if (ctl && ctl->ops.trigger_start) { 1467 + if (ctl->ops.trigger_start) { 1480 1468 ctl->ops.trigger_start(ctl); 1481 1469 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx); 1482 1470 } ··· 1508 1506 struct dpu_hw_ctl *ctl; 1509 1507 int rc; 1510 1508 1511 - if (!phys_enc) { 1512 - DPU_ERROR("invalid encoder\n"); 1513 - return; 1514 - } 1515 1509 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 1516 1510 ctl = phys_enc->hw_ctl; 1517 1511 1518 - if (!ctl || !ctl->ops.reset) 1512 + if (!ctl->ops.reset) 1519 1513 return; 1520 1514 1521 1515 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent), ··· 1548 1550 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1549 1551 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1550 1552 1551 - if (!phys || phys->enable_state == DPU_ENC_DISABLED) 1553 + if (phys->enable_state == DPU_ENC_DISABLED) 1552 1554 continue; 1553 1555 1554 1556 ctl = phys->hw_ctl; 1555 - if (!ctl) 1556 - continue; 1557 1557 1558 1558 /* 1559 1559 * This is cleared in frame_done worker, which isn't invoked ··· 1599 1603 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1600 1604 phys = dpu_enc->phys_encs[i]; 1601 1605 1602 - if (phys && phys->hw_ctl) { 1603 - ctl = phys->hw_ctl; 1604 - if (ctl->ops.clear_pending_flush) 1605 - ctl->ops.clear_pending_flush(ctl); 1606 + ctl = phys->hw_ctl; 1607 + if (ctl->ops.clear_pending_flush) 1608 + ctl->ops.clear_pending_flush(ctl); 1606 1609 1607 - /* update only for command mode primary ctl */ 1608 - if ((phys == dpu_enc->cur_master) && 1609 - (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) 1610 - && ctl->ops.trigger_pending) 1611 - ctl->ops.trigger_pending(ctl); 1612 - } 1610 + /* update only for command mode primary ctl */ 1611 + if ((phys == dpu_enc->cur_master) && 1612 + (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) 1613 + && ctl->ops.trigger_pending) 1614 + ctl->ops.trigger_pending(ctl); 1613 1615 } 1614 1616 } 1615 1617 ··· 1767 1773 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff"); 1768 1774 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1769 1775 phys = dpu_enc->phys_encs[i]; 1770 - if (phys) { 1771 - if (phys->ops.prepare_for_kickoff) 1772 - phys->ops.prepare_for_kickoff(phys); 1773 - if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) 1774 - needs_hw_reset = true; 1775 - } 1776 + if (phys->ops.prepare_for_kickoff) 1777 + phys->ops.prepare_for_kickoff(phys); 1778 + if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) 1779 + needs_hw_reset = true; 1776 1780 } 1777 1781 DPU_ATRACE_END("enc_prepare_for_kickoff"); 1778 1782 ··· 1811 1819 /* allow phys encs to handle any post-kickoff business */ 1812 1820 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1813 1821 phys = dpu_enc->phys_encs[i]; 1814 - if (phys && phys->ops.handle_post_kickoff) 1822 + if (phys->ops.handle_post_kickoff) 1815 1823 phys->ops.handle_post_kickoff(phys); 1816 1824 } 1817 1825 ··· 1840 1848 1841 1849 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1842 1850 phys = dpu_enc->phys_encs[i]; 1843 - if (phys && phys->ops.prepare_commit) 1851 + if (phys->ops.prepare_commit) 1844 1852 phys->ops.prepare_commit(phys); 1845 1853 } 1846 1854 } ··· 1854 1862 mutex_lock(&dpu_enc->enc_lock); 1855 1863 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1856 1864 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1857 - 1858 - if (!phys) 1859 - continue; 1860 1865 1861 1866 seq_printf(s, "intf:%d vsync:%8d underrun:%8d ", 1862 1867 phys->intf_idx - INTF_0, ··· 1913 1924 dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops); 1914 1925 1915 1926 for (i = 0; i < dpu_enc->num_phys_encs; i++) 1916 - if (dpu_enc->phys_encs[i] && 1917 - dpu_enc->phys_encs[i]->ops.late_register) 1927 + if (dpu_enc->phys_encs[i]->ops.late_register) 1918 1928 dpu_enc->phys_encs[i]->ops.late_register( 1919 1929 dpu_enc->phys_encs[i], 1920 1930 dpu_enc->debugfs_root); ··· 2082 2094 2083 2095 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2084 2096 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2085 - 2086 - if (phys) { 2087 - atomic_set(&phys->vsync_cnt, 0); 2088 - atomic_set(&phys->underrun_cnt, 0); 2089 - } 2097 + atomic_set(&phys->vsync_cnt, 0); 2098 + atomic_set(&phys->underrun_cnt, 0); 2090 2099 } 2091 2100 mutex_unlock(&dpu_enc->enc_lock); 2092 2101 ··· 2225 2240 2226 2241 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2227 2242 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2228 - if (!phys) 2229 - continue; 2230 2243 2231 2244 switch (event) { 2232 2245 case MSM_ENC_COMMIT_DONE: ··· 2240 2257 DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n", 2241 2258 event); 2242 2259 return -EINVAL; 2243 - }; 2260 + } 2244 2261 2245 2262 if (fn_wait) { 2246 2263 DPU_ATRACE_BEGIN("wait_for_completion_event"); ··· 2257 2274 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) 2258 2275 { 2259 2276 struct dpu_encoder_virt *dpu_enc = NULL; 2260 - int i; 2261 2277 2262 2278 if (!encoder) { 2263 2279 DPU_ERROR("invalid encoder\n"); ··· 2267 2285 if (dpu_enc->cur_master) 2268 2286 return dpu_enc->cur_master->intf_mode; 2269 2287 2270 - for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2271 - struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2272 - 2273 - if (phys) 2274 - return phys->intf_mode; 2275 - } 2288 + if (dpu_enc->num_phys_encs) 2289 + return dpu_enc->phys_encs[0]->intf_mode; 2276 2290 2277 2291 return INTF_MODE_NONE; 2278 2292 }
+15 -58
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
··· 45 45 const struct drm_display_mode *mode, 46 46 struct drm_display_mode *adj_mode) 47 47 { 48 - if (phys_enc) 49 - DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n"); 48 + DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n"); 50 49 return true; 51 50 } 52 51 ··· 57 58 struct dpu_hw_ctl *ctl; 58 59 struct dpu_hw_intf_cfg intf_cfg = { 0 }; 59 60 60 - if (!phys_enc) 61 - return; 62 - 63 61 ctl = phys_enc->hw_ctl; 64 - if (!ctl || !ctl->ops.setup_intf_cfg) 62 + if (!ctl->ops.setup_intf_cfg) 65 63 return; 66 64 67 65 intf_cfg.intf = phys_enc->intf_idx; ··· 75 79 int new_cnt; 76 80 u32 event = DPU_ENCODER_FRAME_EVENT_DONE; 77 81 78 - if (!phys_enc || !phys_enc->hw_pp) 82 + if (!phys_enc->hw_pp) 79 83 return; 80 84 81 85 DPU_ATRACE_BEGIN("pp_done_irq"); ··· 102 106 struct dpu_encoder_phys *phys_enc = arg; 103 107 struct dpu_encoder_phys_cmd *cmd_enc; 104 108 105 - if (!phys_enc || !phys_enc->hw_pp) 109 + if (!phys_enc->hw_pp) 106 110 return; 107 111 108 112 DPU_ATRACE_BEGIN("rd_ptr_irq"); ··· 121 125 { 122 126 struct dpu_encoder_phys *phys_enc = arg; 123 127 124 - if (!phys_enc || !phys_enc->hw_ctl) 125 - return; 126 - 127 128 DPU_ATRACE_BEGIN("ctl_start_irq"); 128 129 129 130 atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0); ··· 133 140 static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx) 134 141 { 135 142 struct dpu_encoder_phys *phys_enc = arg; 136 - 137 - if (!phys_enc) 138 - return; 139 143 140 144 if (phys_enc->parent_ops->handle_underrun_virt) 141 145 phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent, ··· 169 179 struct dpu_encoder_phys_cmd *cmd_enc = 170 180 to_dpu_encoder_phys_cmd(phys_enc); 171 181 172 - if (!phys_enc || !mode || !adj_mode) { 182 + if (!mode || !adj_mode) { 173 183 DPU_ERROR("invalid args\n"); 174 184 return; 175 185 } ··· 188 198 u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR; 189 199 bool do_log = false; 190 200 191 - if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl) 201 + if (!phys_enc->hw_pp) 192 202 return -EINVAL; 193 203 194 204 cmd_enc->pp_timeout_report_cnt++; ··· 237 247 struct dpu_encoder_wait_info wait_info; 238 248 int ret; 239 249 240 - if (!phys_enc) { 241 - DPU_ERROR("invalid encoder\n"); 242 - return -EINVAL; 243 - } 244 - 245 250 wait_info.wq = &phys_enc->pending_kickoff_wq; 246 251 wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt; 247 252 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; ··· 258 273 int ret = 0; 259 274 int refcount; 260 275 261 - if (!phys_enc || !phys_enc->hw_pp) { 276 + if (!phys_enc->hw_pp) { 262 277 DPU_ERROR("invalid encoder\n"); 263 278 return -EINVAL; 264 279 } ··· 299 314 static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc, 300 315 bool enable) 301 316 { 302 - if (!phys_enc) 303 - return; 304 - 305 317 trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent), 306 318 phys_enc->hw_pp->idx - PINGPONG_0, 307 319 enable, atomic_read(&phys_enc->vblank_refcount)); ··· 333 351 u32 vsync_hz; 334 352 struct dpu_kms *dpu_kms; 335 353 336 - if (!phys_enc || !phys_enc->hw_pp) { 354 + if (!phys_enc->hw_pp) { 337 355 DPU_ERROR("invalid encoder\n"); 338 356 return; 339 357 } ··· 410 428 struct dpu_encoder_phys_cmd *cmd_enc = 411 429 to_dpu_encoder_phys_cmd(phys_enc); 412 430 413 - if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp 414 - || !phys_enc->hw_ctl->ops.setup_intf_cfg) { 431 + if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) { 415 432 DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0); 416 433 return; 417 434 } ··· 439 458 struct dpu_hw_ctl *ctl; 440 459 u32 flush_mask = 0; 441 460 442 - if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) { 461 + if (!phys_enc->hw_pp) { 443 462 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); 444 463 return; 445 464 } ··· 461 480 struct dpu_encoder_phys_cmd *cmd_enc = 462 481 to_dpu_encoder_phys_cmd(phys_enc); 463 482 464 - if (!phys_enc || !phys_enc->hw_pp) { 483 + if (!phys_enc->hw_pp) { 465 484 DPU_ERROR("invalid phys encoder\n"); 466 485 return; 467 486 } ··· 480 499 static void _dpu_encoder_phys_cmd_connect_te( 481 500 struct dpu_encoder_phys *phys_enc, bool enable) 482 501 { 483 - if (!phys_enc || !phys_enc->hw_pp || 484 - !phys_enc->hw_pp->ops.connect_external_te) 502 + if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te) 485 503 return; 486 504 487 505 trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable); ··· 498 518 { 499 519 struct dpu_hw_pingpong *hw_pp; 500 520 501 - if (!phys_enc || !phys_enc->hw_pp) 521 + if (!phys_enc->hw_pp) 502 522 return -EINVAL; 503 523 504 524 if (!dpu_encoder_phys_cmd_is_master(phys_enc)) ··· 516 536 struct dpu_encoder_phys_cmd *cmd_enc = 517 537 to_dpu_encoder_phys_cmd(phys_enc); 518 538 519 - if (!phys_enc || !phys_enc->hw_pp) { 539 + if (!phys_enc->hw_pp) { 520 540 DPU_ERROR("invalid encoder\n"); 521 541 return; 522 542 } ··· 539 559 struct dpu_encoder_phys_cmd *cmd_enc = 540 560 to_dpu_encoder_phys_cmd(phys_enc); 541 561 542 - if (!phys_enc) { 543 - DPU_ERROR("invalid encoder\n"); 544 - return; 545 - } 546 562 kfree(cmd_enc); 547 563 } 548 564 ··· 556 580 to_dpu_encoder_phys_cmd(phys_enc); 557 581 int ret; 558 582 559 - if (!phys_enc || !phys_enc->hw_pp) { 583 + if (!phys_enc->hw_pp) { 560 584 DPU_ERROR("invalid encoder\n"); 561 585 return; 562 586 } ··· 590 614 struct dpu_encoder_wait_info wait_info; 591 615 int ret; 592 616 593 - if (!phys_enc || !phys_enc->hw_ctl) { 594 - DPU_ERROR("invalid argument(s)\n"); 595 - return -EINVAL; 596 - } 597 - 598 617 wait_info.wq = &phys_enc->pending_kickoff_wq; 599 618 wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt; 600 619 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; ··· 610 639 { 611 640 int rc; 612 641 613 - if (!phys_enc) 614 - return -EINVAL; 615 - 616 642 rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc); 617 643 if (rc) { 618 644 DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n", ··· 625 657 { 626 658 int rc = 0; 627 659 struct dpu_encoder_phys_cmd *cmd_enc; 628 - 629 - if (!phys_enc) 630 - return -EINVAL; 631 660 632 661 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); 633 662 ··· 645 680 int rc = 0; 646 681 struct dpu_encoder_phys_cmd *cmd_enc; 647 682 struct dpu_encoder_wait_info wait_info; 648 - 649 - if (!phys_enc) 650 - return -EINVAL; 651 683 652 684 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); 653 685 ··· 677 715 static void dpu_encoder_phys_cmd_trigger_start( 678 716 struct dpu_encoder_phys *phys_enc) 679 717 { 680 - if (!phys_enc) 681 - return; 682 - 683 718 dpu_encoder_helper_trigger_start(phys_enc); 684 719 } 685 720 ··· 775 816 DPU_DEBUG_CMDENC(cmd_enc, "created\n"); 776 817 777 818 return phys_enc; 778 - 779 - return ERR_PTR(ret); 780 819 }
+25 -48
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
··· 220 220 const struct drm_display_mode *mode, 221 221 struct drm_display_mode *adj_mode) 222 222 { 223 - if (phys_enc) 224 - DPU_DEBUG_VIDENC(phys_enc, "\n"); 223 + DPU_DEBUG_VIDENC(phys_enc, "\n"); 225 224 226 225 /* 227 226 * Modifying mode has consequences when the mode comes back to us ··· 238 239 unsigned long lock_flags; 239 240 struct dpu_hw_intf_cfg intf_cfg = { 0 }; 240 241 241 - if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) { 242 + if (!phys_enc->hw_ctl->ops.setup_intf_cfg) { 242 243 DPU_ERROR("invalid encoder %d\n", phys_enc != 0); 243 244 return; 244 245 } ··· 279 280 phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf, 280 281 &timing_params, fmt); 281 282 phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg); 283 + 284 + /* setup which pp blk will connect to this intf */ 285 + if (phys_enc->hw_intf->ops.bind_pingpong_blk) 286 + phys_enc->hw_intf->ops.bind_pingpong_blk( 287 + phys_enc->hw_intf, 288 + true, 289 + phys_enc->hw_pp->idx); 290 + 282 291 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); 283 292 284 293 programmable_fetch_config(phys_enc, &timing_params); ··· 300 293 u32 flush_register = 0; 301 294 int new_cnt = -1, old_cnt = -1; 302 295 303 - if (!phys_enc) 304 - return; 305 - 306 296 hw_ctl = phys_enc->hw_ctl; 307 - if (!hw_ctl) 308 - return; 309 297 310 298 DPU_ATRACE_BEGIN("vblank_irq"); 311 299 ··· 316 314 * so we need to double-check with hw that it accepted the flush bits 317 315 */ 318 316 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); 319 - if (hw_ctl && hw_ctl->ops.get_flush_register) 317 + if (hw_ctl->ops.get_flush_register) 320 318 flush_register = hw_ctl->ops.get_flush_register(hw_ctl); 321 319 322 320 if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl))) ··· 336 334 static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx) 337 335 { 338 336 struct dpu_encoder_phys *phys_enc = arg; 339 - 340 - if (!phys_enc) 341 - return; 342 337 343 338 if (phys_enc->parent_ops->handle_underrun_virt) 344 339 phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent, ··· 373 374 struct drm_display_mode *mode, 374 375 struct drm_display_mode *adj_mode) 375 376 { 376 - if (!phys_enc) { 377 - DPU_ERROR("invalid encoder/kms\n"); 378 - return; 379 - } 380 - 381 377 if (adj_mode) { 382 378 phys_enc->cached_mode = *adj_mode; 383 379 drm_mode_debug_printmodeline(adj_mode); ··· 388 394 { 389 395 int ret = 0; 390 396 int refcount; 391 - 392 - if (!phys_enc) { 393 - DPU_ERROR("invalid encoder\n"); 394 - return -EINVAL; 395 - } 396 397 397 398 refcount = atomic_read(&phys_enc->vblank_refcount); 398 399 ··· 424 435 { 425 436 struct dpu_hw_ctl *ctl; 426 437 u32 flush_mask = 0; 438 + u32 intf_flush_mask = 0; 427 439 428 440 ctl = phys_enc->hw_ctl; 429 441 ··· 449 459 ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx); 450 460 ctl->ops.update_pending_flush(ctl, flush_mask); 451 461 462 + if (ctl->ops.get_bitmask_active_intf) 463 + ctl->ops.get_bitmask_active_intf(ctl, &intf_flush_mask, 464 + phys_enc->hw_intf->idx); 465 + 466 + if (ctl->ops.update_pending_intf_flush) 467 + ctl->ops.update_pending_intf_flush(ctl, intf_flush_mask); 468 + 452 469 skip_flush: 453 470 DPU_DEBUG_VIDENC(phys_enc, 454 - "update pending flush ctl %d flush_mask %x\n", 455 - ctl->idx - CTL_0, flush_mask); 471 + "update pending flush ctl %d flush_mask 0%x intf_mask 0x%x\n", 472 + ctl->idx - CTL_0, flush_mask, intf_flush_mask); 473 + 456 474 457 475 /* ctl_flush & timing engine enable will be triggered by framework */ 458 476 if (phys_enc->enable_state == DPU_ENC_DISABLED) ··· 469 471 470 472 static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc) 471 473 { 472 - if (!phys_enc) { 473 - DPU_ERROR("invalid encoder\n"); 474 - return; 475 - } 476 - 477 474 DPU_DEBUG_VIDENC(phys_enc, "\n"); 478 475 kfree(phys_enc); 479 476 } ··· 485 492 { 486 493 struct dpu_encoder_wait_info wait_info; 487 494 int ret; 488 - 489 - if (!phys_enc) { 490 - pr_err("invalid encoder\n"); 491 - return -EINVAL; 492 - } 493 495 494 496 wait_info.wq = &phys_enc->pending_kickoff_wq; 495 497 wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt; ··· 531 543 struct dpu_hw_ctl *ctl; 532 544 int rc; 533 545 534 - if (!phys_enc) { 535 - DPU_ERROR("invalid encoder/parameters\n"); 536 - return; 537 - } 538 - 539 546 ctl = phys_enc->hw_ctl; 540 - if (!ctl || !ctl->ops.wait_reset_status) 547 + if (!ctl->ops.wait_reset_status) 541 548 return; 542 549 543 550 /* ··· 552 569 unsigned long lock_flags; 553 570 int ret; 554 571 555 - if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev) { 572 + if (!phys_enc->parent || !phys_enc->parent->dev) { 556 573 DPU_ERROR("invalid encoder/device\n"); 557 574 return; 558 575 } 559 576 560 - if (!phys_enc->hw_intf || !phys_enc->hw_ctl) { 577 + if (!phys_enc->hw_intf) { 561 578 DPU_ERROR("invalid hw_intf %d hw_ctl %d\n", 562 579 phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0); 563 580 return; ··· 622 639 { 623 640 int ret; 624 641 625 - if (!phys_enc) 626 - return; 627 - 628 642 trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent), 629 643 phys_enc->hw_intf->idx - INTF_0, 630 644 enable, ··· 642 662 static int dpu_encoder_phys_vid_get_line_count( 643 663 struct dpu_encoder_phys *phys_enc) 644 664 { 645 - if (!phys_enc) 646 - return -EINVAL; 647 - 648 665 if (!dpu_encoder_phys_vid_is_master(phys_enc)) 649 666 return -EINVAL; 650 667
+18
drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
··· 489 489 true, 4, DPU_FORMAT_FLAG_COMPRESSED, 490 490 DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), 491 491 492 + /* ARGB8888 and ABGR8888 purposely have the same color 493 + * ordering. The hardware only supports ABGR8888 UBWC 494 + * natively. 495 + */ 496 + INTERLEAVED_RGB_FMT_TILED(ARGB8888, 497 + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 498 + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, 499 + true, 4, DPU_FORMAT_FLAG_COMPRESSED, 500 + DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), 501 + 492 502 INTERLEAVED_RGB_FMT_TILED(XBGR8888, 503 + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 504 + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, 505 + false, 4, DPU_FORMAT_FLAG_COMPRESSED, 506 + DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), 507 + 508 + INTERLEAVED_RGB_FMT_TILED(XRGB8888, 493 509 COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, 494 510 C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, 495 511 false, 4, DPU_FORMAT_FLAG_COMPRESSED, ··· 566 550 { 567 551 static const struct dpu_media_color_map dpu_media_ubwc_map[] = { 568 552 {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC}, 553 + {DRM_FORMAT_ARGB8888, COLOR_FMT_RGBA8888_UBWC}, 569 554 {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC}, 555 + {DRM_FORMAT_XRGB8888, COLOR_FMT_RGBA8888_UBWC}, 570 556 {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC}, 571 557 {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC}, 572 558 {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
+209 -32
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
··· 11 11 #include "dpu_hw_catalog_format.h" 12 12 #include "dpu_kms.h" 13 13 14 - #define VIG_SDM845_MASK \ 15 - (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\ 14 + #define VIG_MASK \ 15 + (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\ 16 16 BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\ 17 17 BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT)) 18 + 19 + #define VIG_SDM845_MASK \ 20 + (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3)) 21 + 22 + #define VIG_SC7180_MASK \ 23 + (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4)) 18 24 19 25 #define DMA_SDM845_MASK \ 20 26 (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\ ··· 32 26 33 27 #define MIXER_SDM845_MASK \ 34 28 (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER)) 29 + 30 + #define MIXER_SC7180_MASK \ 31 + (BIT(DPU_DIM_LAYER)) 35 32 36 33 #define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER) 37 34 ··· 67 58 .has_src_split = true, 68 59 .has_dim_layer = true, 69 60 .has_idle_pc = true, 61 + .has_3d_merge = true, 70 62 }; 71 63 72 - static struct dpu_mdp_cfg sdm845_mdp[] = { 64 + static const struct dpu_caps sc7180_dpu_caps = { 65 + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 66 + .max_mixer_blendstages = 0x9, 67 + .qseed_type = DPU_SSPP_SCALER_QSEED4, 68 + .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, 69 + .ubwc_version = DPU_HW_UBWC_VER_20, 70 + .has_dim_layer = true, 71 + .has_idle_pc = true, 72 + }; 73 + 74 + static const struct dpu_mdp_cfg sdm845_mdp[] = { 73 75 { 74 76 .name = "top_0", .id = MDP_TOP, 75 77 .base = 0x0, .len = 0x45C, ··· 105 85 }, 106 86 }; 107 87 88 + static const struct dpu_mdp_cfg sc7180_mdp[] = { 89 + { 90 + .name = "top_0", .id = MDP_TOP, 91 + .base = 0x0, .len = 0x494, 92 + .features = 0, 93 + .highest_bank_bit = 0x3, 94 + .clk_ctrls[DPU_CLK_CTRL_VIG0] = { 95 + .reg_off = 0x2AC, .bit_off = 0}, 96 + .clk_ctrls[DPU_CLK_CTRL_DMA0] = { 97 + .reg_off = 0x2AC, .bit_off = 8}, 98 + .clk_ctrls[DPU_CLK_CTRL_DMA1] = { 99 + .reg_off = 0x2B4, .bit_off = 8}, 100 + .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { 101 + .reg_off = 0x2BC, .bit_off = 8}, 102 + }, 103 + }; 104 + 108 105 /************************************************************* 109 106 * CTL sub blocks config 110 107 *************************************************************/ 111 - static struct dpu_ctl_cfg sdm845_ctl[] = { 108 + static const struct dpu_ctl_cfg sdm845_ctl[] = { 112 109 { 113 110 .name = "ctl_0", .id = CTL_0, 114 111 .base = 0x1000, .len = 0xE4, ··· 153 116 }, 154 117 }; 155 118 119 + static const struct dpu_ctl_cfg sc7180_ctl[] = { 120 + { 121 + .name = "ctl_0", .id = CTL_0, 122 + .base = 0x1000, .len = 0xE4, 123 + .features = BIT(DPU_CTL_ACTIVE_CFG) 124 + }, 125 + { 126 + .name = "ctl_1", .id = CTL_1, 127 + .base = 0x1200, .len = 0xE4, 128 + .features = BIT(DPU_CTL_ACTIVE_CFG) 129 + }, 130 + { 131 + .name = "ctl_2", .id = CTL_2, 132 + .base = 0x1400, .len = 0xE4, 133 + .features = BIT(DPU_CTL_ACTIVE_CFG) 134 + }, 135 + }; 136 + 156 137 /************************************************************* 157 138 * SSPP sub blocks config 158 139 *************************************************************/ ··· 183 128 .maxvdeciexp = MAX_VERT_DECIMATION, 184 129 }; 185 130 186 - #define _VIG_SBLK(num, sdma_pri) \ 131 + #define _VIG_SBLK(num, sdma_pri, qseed_ver) \ 187 132 { \ 188 133 .common = &sdm845_sspp_common, \ 189 134 .maxdwnscale = MAX_DOWNSCALE_RATIO, \ ··· 192 137 .src_blk = {.name = STRCAT("sspp_src_", num), \ 193 138 .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \ 194 139 .scaler_blk = {.name = STRCAT("sspp_scaler", num), \ 195 - .id = DPU_SSPP_SCALER_QSEED3, \ 140 + .id = qseed_ver, \ 196 141 .base = 0xa00, .len = 0xa0,}, \ 197 142 .csc_blk = {.name = STRCAT("sspp_csc", num), \ 198 143 .id = DPU_SSPP_CSC_10BIT, \ ··· 217 162 .virt_num_formats = ARRAY_SIZE(plane_formats), \ 218 163 } 219 164 220 - static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5); 221 - static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6); 222 - static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7); 223 - static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8); 165 + static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = 166 + _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3); 167 + static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = 168 + _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3); 169 + static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = 170 + _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3); 171 + static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = 172 + _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3); 224 173 225 174 static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1); 226 175 static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2); ··· 243 184 .clk_ctrl = _clkctrl \ 244 185 } 245 186 246 - static struct dpu_sspp_cfg sdm845_sspp[] = { 187 + static const struct dpu_sspp_cfg sdm845_sspp[] = { 247 188 SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK, 248 189 sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), 249 190 SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK, ··· 262 203 sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), 263 204 }; 264 205 206 + static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 = 207 + _VIG_SBLK("0", 4, DPU_SSPP_SCALER_QSEED4); 208 + 209 + static const struct dpu_sspp_cfg sc7180_sspp[] = { 210 + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK, 211 + sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), 212 + SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, 213 + sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), 214 + SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK, 215 + sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), 216 + SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, 217 + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), 218 + }; 219 + 265 220 /************************************************************* 266 221 * MIXER sub blocks config 267 222 *************************************************************/ 223 + 224 + /* SDM845 */ 225 + 268 226 static const struct dpu_lm_sub_blks sdm845_lm_sblk = { 269 227 .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 270 228 .maxblendstages = 11, /* excluding base layer */ ··· 291 215 }, 292 216 }; 293 217 294 - #define LM_BLK(_name, _id, _base, _pp, _lmpair) \ 218 + #define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair) \ 295 219 { \ 296 220 .name = _name, .id = _id, \ 297 221 .base = _base, .len = 0x320, \ 298 - .features = MIXER_SDM845_MASK, \ 299 - .sblk = &sdm845_lm_sblk, \ 222 + .features = _fmask, \ 223 + .sblk = _sblk, \ 300 224 .pingpong = _pp, \ 301 225 .lm_pair_mask = (1 << _lmpair) \ 302 226 } 303 227 304 - static struct dpu_lm_cfg sdm845_lm[] = { 305 - LM_BLK("lm_0", LM_0, 0x44000, PINGPONG_0, LM_1), 306 - LM_BLK("lm_1", LM_1, 0x45000, PINGPONG_1, LM_0), 307 - LM_BLK("lm_2", LM_2, 0x46000, PINGPONG_2, LM_5), 308 - LM_BLK("lm_3", LM_3, 0x0, PINGPONG_MAX, 0), 309 - LM_BLK("lm_4", LM_4, 0x0, PINGPONG_MAX, 0), 310 - LM_BLK("lm_5", LM_5, 0x49000, PINGPONG_3, LM_2), 228 + static const struct dpu_lm_cfg sdm845_lm[] = { 229 + LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK, 230 + &sdm845_lm_sblk, PINGPONG_0, LM_1), 231 + LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK, 232 + &sdm845_lm_sblk, PINGPONG_1, LM_0), 233 + LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK, 234 + &sdm845_lm_sblk, PINGPONG_2, LM_5), 235 + LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK, 236 + &sdm845_lm_sblk, PINGPONG_MAX, 0), 237 + LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK, 238 + &sdm845_lm_sblk, PINGPONG_MAX, 0), 239 + LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK, 240 + &sdm845_lm_sblk, PINGPONG_3, LM_2), 241 + }; 242 + 243 + /* SC7180 */ 244 + 245 + static const struct dpu_lm_sub_blks sc7180_lm_sblk = { 246 + .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, 247 + .maxblendstages = 7, /* excluding base layer */ 248 + .blendstage_base = { /* offsets relative to mixer base */ 249 + 0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0 250 + }, 251 + }; 252 + 253 + static const struct dpu_lm_cfg sc7180_lm[] = { 254 + LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK, 255 + &sc7180_lm_sblk, PINGPONG_0, LM_1), 256 + LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK, 257 + &sc7180_lm_sblk, PINGPONG_1, LM_0), 311 258 }; 312 259 313 260 /************************************************************* ··· 363 264 .sblk = &sdm845_pp_sblk \ 364 265 } 365 266 366 - static struct dpu_pingpong_cfg sdm845_pp[] = { 267 + static const struct dpu_pingpong_cfg sdm845_pp[] = { 367 268 PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000), 368 269 PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800), 369 270 PP_BLK("pingpong_2", PINGPONG_2, 0x71000), 370 271 PP_BLK("pingpong_3", PINGPONG_3, 0x71800), 272 + }; 273 + 274 + static struct dpu_pingpong_cfg sc7180_pp[] = { 275 + PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000), 276 + PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800), 371 277 }; 372 278 373 279 /************************************************************* ··· 382 278 {\ 383 279 .name = _name, .id = _id, \ 384 280 .base = _base, .len = 0x280, \ 281 + .features = BIT(DPU_CTL_ACTIVE_CFG), \ 385 282 .type = _type, \ 386 283 .controller_id = _ctrl_id, \ 387 284 .prog_fetch_lines_worst_case = 24 \ 388 285 } 389 286 390 - static struct dpu_intf_cfg sdm845_intf[] = { 287 + static const struct dpu_intf_cfg sdm845_intf[] = { 391 288 INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0), 392 289 INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0), 393 290 INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1), 394 291 INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1), 395 292 }; 396 293 294 + static const struct dpu_intf_cfg sc7180_intf[] = { 295 + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0), 296 + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0), 297 + }; 298 + 397 299 /************************************************************* 398 300 * VBIF sub blocks config 399 301 *************************************************************/ 400 302 /* VBIF QOS remap */ 401 - static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6}; 402 - static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3}; 303 + static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6}; 304 + static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3}; 403 305 404 - static struct dpu_vbif_cfg sdm845_vbif[] = { 306 + static const struct dpu_vbif_cfg sdm845_vbif[] = { 405 307 { 406 308 .name = "vbif_0", .id = VBIF_0, 407 309 .base = 0, .len = 0x1040, ··· 426 316 }, 427 317 }; 428 318 429 - static struct dpu_reg_dma_cfg sdm845_regdma = { 319 + static const struct dpu_reg_dma_cfg sdm845_regdma = { 430 320 .base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c 431 321 }; 432 322 ··· 435 325 *************************************************************/ 436 326 437 327 /* SSPP QOS LUTs */ 438 - static struct dpu_qos_lut_entry sdm845_qos_linear[] = { 328 + static const struct dpu_qos_lut_entry sdm845_qos_linear[] = { 439 329 {.fl = 4, .lut = 0x357}, 440 330 {.fl = 5, .lut = 0x3357}, 441 331 {.fl = 6, .lut = 0x23357}, ··· 450 340 {.fl = 0, .lut = 0x11222222223357} 451 341 }; 452 342 453 - static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = { 343 + static const struct dpu_qos_lut_entry sc7180_qos_linear[] = { 344 + {.fl = 0, .lut = 0x0011222222335777}, 345 + }; 346 + 347 + static const struct dpu_qos_lut_entry sdm845_qos_macrotile[] = { 454 348 {.fl = 10, .lut = 0x344556677}, 455 349 {.fl = 11, .lut = 0x3344556677}, 456 350 {.fl = 12, .lut = 0x23344556677}, ··· 463 349 {.fl = 0, .lut = 0x112233344556677}, 464 350 }; 465 351 466 - static struct dpu_qos_lut_entry sdm845_qos_nrt[] = { 352 + static const struct dpu_qos_lut_entry sc7180_qos_macrotile[] = { 353 + {.fl = 0, .lut = 0x0011223344556677}, 354 + }; 355 + 356 + static const struct dpu_qos_lut_entry sdm845_qos_nrt[] = { 467 357 {.fl = 0, .lut = 0x0}, 468 358 }; 469 359 470 - static struct dpu_perf_cfg sdm845_perf_data = { 360 + static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { 361 + {.fl = 0, .lut = 0x0}, 362 + }; 363 + 364 + static const struct dpu_perf_cfg sdm845_perf_data = { 471 365 .max_bw_low = 6800000, 472 366 .max_bw_high = 6800000, 473 367 .min_core_ib = 2400000, ··· 514 392 }, 515 393 }; 516 394 395 + static const struct dpu_perf_cfg sc7180_perf_data = { 396 + .max_bw_low = 3900000, 397 + .max_bw_high = 5500000, 398 + .min_core_ib = 2400000, 399 + .min_llcc_ib = 800000, 400 + .min_dram_ib = 800000, 401 + .danger_lut_tbl = {0xff, 0xffff, 0x0}, 402 + .qos_lut_tbl = { 403 + {.nentry = ARRAY_SIZE(sc7180_qos_linear), 404 + .entries = sc7180_qos_linear 405 + }, 406 + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), 407 + .entries = sc7180_qos_macrotile 408 + }, 409 + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), 410 + .entries = sc7180_qos_nrt 411 + }, 412 + }, 413 + .cdp_cfg = { 414 + {.rd_enable = 1, .wr_enable = 1}, 415 + {.rd_enable = 1, .wr_enable = 0} 416 + }, 417 + }; 418 + 517 419 /************************************************************* 518 420 * Hardware catalog init 519 421 *************************************************************/ ··· 567 421 .reg_dma_count = 1, 568 422 .dma_cfg = sdm845_regdma, 569 423 .perf = sdm845_perf_data, 424 + .mdss_irqs = 0x3ff, 570 425 }; 571 426 } 572 427 573 - static struct dpu_mdss_hw_cfg_handler cfg_handler[] = { 428 + /* 429 + * sc7180_cfg_init(): populate sc7180 dpu sub-blocks reg offsets 430 + * and instance counts. 431 + */ 432 + static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg) 433 + { 434 + *dpu_cfg = (struct dpu_mdss_cfg){ 435 + .caps = &sc7180_dpu_caps, 436 + .mdp_count = ARRAY_SIZE(sc7180_mdp), 437 + .mdp = sc7180_mdp, 438 + .ctl_count = ARRAY_SIZE(sc7180_ctl), 439 + .ctl = sc7180_ctl, 440 + .sspp_count = ARRAY_SIZE(sc7180_sspp), 441 + .sspp = sc7180_sspp, 442 + .mixer_count = ARRAY_SIZE(sc7180_lm), 443 + .mixer = sc7180_lm, 444 + .pingpong_count = ARRAY_SIZE(sc7180_pp), 445 + .pingpong = sc7180_pp, 446 + .intf_count = ARRAY_SIZE(sc7180_intf), 447 + .intf = sc7180_intf, 448 + .vbif_count = ARRAY_SIZE(sdm845_vbif), 449 + .vbif = sdm845_vbif, 450 + .reg_dma_count = 1, 451 + .dma_cfg = sdm845_regdma, 452 + .perf = sc7180_perf_data, 453 + .mdss_irqs = 0x3f, 454 + }; 455 + } 456 + 457 + static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = { 574 458 { .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init}, 575 459 { .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init}, 460 + { .hw_rev = DPU_HW_VER_620, .cfg_init = sc7180_cfg_init}, 576 461 }; 577 462 578 463 void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
+25 -13
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
··· 38 38 #define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */ 39 39 #define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */ 40 40 #define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */ 41 + #define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */ 41 42 42 43 43 44 #define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170) ··· 46 45 #define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400) 47 46 #define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410) 48 47 #define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500) 48 + #define IS_SC7180_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_620) 49 49 50 50 51 51 #define DPU_HW_BLK_NAME_LEN 16 ··· 94 92 * @DPU_SSPP_SRC Src and fetch part of the pipes, 95 93 * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support 96 94 * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support 95 + * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support 97 96 * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes 98 97 * @DPU_SSPP_CSC, Support of Color space converion 99 98 * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion ··· 113 110 DPU_SSPP_SRC = 0x1, 114 111 DPU_SSPP_SCALER_QSEED2, 115 112 DPU_SSPP_SCALER_QSEED3, 113 + DPU_SSPP_SCALER_QSEED4, 116 114 DPU_SSPP_SCALER_RGB, 117 115 DPU_SSPP_CSC, 118 116 DPU_SSPP_CSC_10BIT, ··· 170 166 */ 171 167 enum { 172 168 DPU_CTL_SPLIT_DISPLAY = 0x1, 169 + DPU_CTL_ACTIVE_CFG, 173 170 DPU_CTL_MAX 174 171 }; 175 172 ··· 274 269 */ 275 270 struct dpu_qos_lut_tbl { 276 271 u32 nentry; 277 - struct dpu_qos_lut_entry *entries; 272 + const struct dpu_qos_lut_entry *entries; 278 273 }; 279 274 280 275 /** ··· 288 283 * @has_src_split source split feature status 289 284 * @has_dim_layer dim layer feature status 290 285 * @has_idle_pc indicate if idle power collapse feature is supported 286 + * @has_3d_merge indicate if 3D merge is supported 291 287 */ 292 288 struct dpu_caps { 293 289 u32 max_mixer_width; ··· 299 293 bool has_src_split; 300 294 bool has_dim_layer; 301 295 bool has_idle_pc; 296 + bool has_3d_merge; 302 297 }; 303 298 304 299 /** ··· 327 320 * @maxupscale: maxupscale ratio supported 328 321 * @smart_dma_priority: hw priority of rect1 of multirect pipe 329 322 * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps 323 + * @qseed_ver: qseed version 330 324 * @src_blk: 331 325 * @scaler_blk: 332 326 * @csc_blk: ··· 348 340 u32 maxupscale; 349 341 u32 smart_dma_priority; 350 342 u32 max_per_pipe_bw; 343 + u32 qseed_ver; 351 344 struct dpu_src_blk src_blk; 352 345 struct dpu_scaler_blk scaler_blk; 353 346 struct dpu_pp_blk csc_blk; ··· 520 511 */ 521 512 struct dpu_vbif_dynamic_ot_tbl { 522 513 u32 count; 523 - struct dpu_vbif_dynamic_ot_cfg *cfg; 514 + const struct dpu_vbif_dynamic_ot_cfg *cfg; 524 515 }; 525 516 526 517 /** ··· 530 521 */ 531 522 struct dpu_vbif_qos_tbl { 532 523 u32 npriority_lvl; 533 - u32 *priority_lvl; 524 + const u32 *priority_lvl; 534 525 }; 535 526 536 527 /** ··· 655 646 * @dma_formats Supported formats for dma pipe 656 647 * @cursor_formats Supported formats for cursor pipe 657 648 * @vig_formats Supported formats for vig pipe 649 + * @mdss_irqs: Bitmap with the irqs supported by the target 658 650 */ 659 651 struct dpu_mdss_cfg { 660 652 u32 hwversion; ··· 663 653 const struct dpu_caps *caps; 664 654 665 655 u32 mdp_count; 666 - struct dpu_mdp_cfg *mdp; 656 + const struct dpu_mdp_cfg *mdp; 667 657 668 658 u32 ctl_count; 669 - struct dpu_ctl_cfg *ctl; 659 + const struct dpu_ctl_cfg *ctl; 670 660 671 661 u32 sspp_count; 672 - struct dpu_sspp_cfg *sspp; 662 + const struct dpu_sspp_cfg *sspp; 673 663 674 664 u32 mixer_count; 675 - struct dpu_lm_cfg *mixer; 665 + const struct dpu_lm_cfg *mixer; 676 666 677 667 u32 pingpong_count; 678 - struct dpu_pingpong_cfg *pingpong; 668 + const struct dpu_pingpong_cfg *pingpong; 679 669 680 670 u32 intf_count; 681 - struct dpu_intf_cfg *intf; 671 + const struct dpu_intf_cfg *intf; 682 672 683 673 u32 vbif_count; 684 - struct dpu_vbif_cfg *vbif; 674 + const struct dpu_vbif_cfg *vbif; 685 675 686 676 u32 reg_dma_count; 687 677 struct dpu_reg_dma_cfg dma_cfg; ··· 691 681 /* Add additional block data structures here */ 692 682 693 683 struct dpu_perf_cfg perf; 694 - struct dpu_format_extended *dma_formats; 695 - struct dpu_format_extended *cursor_formats; 696 - struct dpu_format_extended *vig_formats; 684 + const struct dpu_format_extended *dma_formats; 685 + const struct dpu_format_extended *cursor_formats; 686 + const struct dpu_format_extended *vig_formats; 687 + 688 + unsigned long mdss_irqs; 697 689 }; 698 690 699 691 struct dpu_mdss_hw_cfg_handler {
+4
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
··· 6 6 7 7 static const uint32_t qcom_compressed_supported_formats[] = { 8 8 DRM_FORMAT_ABGR8888, 9 + DRM_FORMAT_ARGB8888, 9 10 DRM_FORMAT_XBGR8888, 11 + DRM_FORMAT_XRGB8888, 10 12 DRM_FORMAT_BGR565, 13 + 14 + DRM_FORMAT_NV12, 11 15 }; 12 16 13 17 static const uint32_t plane_formats[] = {
+85 -7
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
··· 22 22 #define CTL_PREPARE 0x0d0 23 23 #define CTL_SW_RESET 0x030 24 24 #define CTL_LAYER_EXTN_OFFSET 0x40 25 + #define CTL_INTF_ACTIVE 0x0F4 26 + #define CTL_INTF_FLUSH 0x110 27 + #define CTL_INTF_MASTER 0x134 25 28 26 29 #define CTL_MIXER_BORDER_OUT BIT(24) 27 30 #define CTL_FLUSH_MASK_CTL BIT(17) 28 31 29 32 #define DPU_REG_RESET_TIMEOUT_US 2000 33 + #define INTF_IDX 31 30 34 31 - static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl, 32 - struct dpu_mdss_cfg *m, 35 + static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl, 36 + const struct dpu_mdss_cfg *m, 33 37 void __iomem *addr, 34 38 struct dpu_hw_blk_reg_map *b) 35 39 { ··· 104 100 ctx->pending_flush_mask |= flushbits; 105 101 } 106 102 103 + static inline void dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl *ctx, 104 + u32 flushbits) 105 + { 106 + ctx->pending_intf_flush_mask |= flushbits; 107 + } 108 + 107 109 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx) 108 110 { 109 111 return ctx->pending_flush_mask; 112 + } 113 + 114 + static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx) 115 + { 116 + 117 + if (ctx->pending_flush_mask & BIT(INTF_IDX)) 118 + DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH, 119 + ctx->pending_intf_flush_mask); 120 + 121 + DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); 110 122 } 111 123 112 124 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx) ··· 238 218 break; 239 219 default: 240 220 return -EINVAL; 221 + } 222 + return 0; 223 + } 224 + 225 + static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx, 226 + u32 *flushbits, enum dpu_intf intf) 227 + { 228 + switch (intf) { 229 + case INTF_0: 230 + case INTF_1: 231 + *flushbits |= BIT(31); 232 + break; 233 + default: 234 + return 0; 235 + } 236 + return 0; 237 + } 238 + 239 + static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx, 240 + u32 *flushbits, enum dpu_intf intf) 241 + { 242 + switch (intf) { 243 + case INTF_0: 244 + *flushbits |= BIT(0); 245 + break; 246 + case INTF_1: 247 + *flushbits |= BIT(1); 248 + break; 249 + default: 250 + return 0; 241 251 } 242 252 return 0; 243 253 } ··· 472 422 DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3); 473 423 } 474 424 425 + 426 + static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, 427 + struct dpu_hw_intf_cfg *cfg) 428 + { 429 + struct dpu_hw_blk_reg_map *c = &ctx->hw; 430 + u32 intf_active = 0; 431 + u32 mode_sel = 0; 432 + 433 + if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD) 434 + mode_sel |= BIT(17); 435 + 436 + intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE); 437 + intf_active |= BIT(cfg->intf - INTF_0); 438 + 439 + DPU_REG_WRITE(c, CTL_TOP, mode_sel); 440 + DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); 441 + } 442 + 475 443 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, 476 444 struct dpu_hw_intf_cfg *cfg) 477 445 { ··· 523 455 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, 524 456 unsigned long cap) 525 457 { 458 + if (cap & BIT(DPU_CTL_ACTIVE_CFG)) { 459 + ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1; 460 + ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1; 461 + ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf_v1; 462 + ops->get_bitmask_active_intf = 463 + dpu_hw_ctl_active_get_bitmask_intf; 464 + ops->update_pending_intf_flush = 465 + dpu_hw_ctl_update_pending_intf_flush; 466 + } else { 467 + ops->trigger_flush = dpu_hw_ctl_trigger_flush; 468 + ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg; 469 + ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf; 470 + } 526 471 ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush; 527 472 ops->update_pending_flush = dpu_hw_ctl_update_pending_flush; 528 473 ops->get_pending_flush = dpu_hw_ctl_get_pending_flush; 529 - ops->trigger_flush = dpu_hw_ctl_trigger_flush; 530 474 ops->get_flush_register = dpu_hw_ctl_get_flush_register; 531 475 ops->trigger_start = dpu_hw_ctl_trigger_start; 532 476 ops->trigger_pending = dpu_hw_ctl_trigger_pending; 533 - ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg; 534 477 ops->reset = dpu_hw_ctl_reset_control; 535 478 ops->wait_reset_status = dpu_hw_ctl_wait_reset_status; 536 479 ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages; 537 480 ops->setup_blendstage = dpu_hw_ctl_setup_blendstage; 538 481 ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp; 539 482 ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer; 540 - ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf; 541 483 }; 542 484 543 485 static struct dpu_hw_blk_ops dpu_hw_ops; 544 486 545 487 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx, 546 488 void __iomem *addr, 547 - struct dpu_mdss_cfg *m) 489 + const struct dpu_mdss_cfg *m) 548 490 { 549 491 struct dpu_hw_ctl *c; 550 - struct dpu_ctl_cfg *cfg; 492 + const struct dpu_ctl_cfg *cfg; 551 493 552 494 c = kzalloc(sizeof(*c), GFP_KERNEL); 553 495 if (!c)
+25 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
··· 91 91 u32 flushbits); 92 92 93 93 /** 94 + * OR in the given flushbits to the cached pending_intf_flush_mask 95 + * No effect on hardware 96 + * @ctx : ctl path ctx pointer 97 + * @flushbits : module flushmask 98 + */ 99 + void (*update_pending_intf_flush)(struct dpu_hw_ctl *ctx, 100 + u32 flushbits); 101 + 102 + /** 94 103 * Write the value of the pending_flush_mask to hardware 95 104 * @ctx : ctl path ctx pointer 96 105 */ ··· 139 130 uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx, 140 131 enum dpu_lm blk); 141 132 133 + /** 134 + * Query the value of the intf flush mask 135 + * No effect on hardware 136 + * @ctx : ctl path ctx pointer 137 + */ 142 138 int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx, 143 139 u32 *flushbits, 144 140 enum dpu_intf blk); 141 + 142 + /** 143 + * Query the value of the intf active flush mask 144 + * No effect on hardware 145 + * @ctx : ctl path ctx pointer 146 + */ 147 + int (*get_bitmask_active_intf)(struct dpu_hw_ctl *ctx, 148 + u32 *flushbits, enum dpu_intf blk); 145 149 146 150 /** 147 151 * Set all blend stages to disabled ··· 181 159 * @mixer_count: number of mixers 182 160 * @mixer_hw_caps: mixer hardware capabilities 183 161 * @pending_flush_mask: storage for pending ctl_flush managed via ops 162 + * @pending_intf_flush_mask: pending INTF flush 184 163 * @ops: operation list 185 164 */ 186 165 struct dpu_hw_ctl { ··· 194 171 int mixer_count; 195 172 const struct dpu_lm_cfg *mixer_hw_caps; 196 173 u32 pending_flush_mask; 174 + u32 pending_intf_flush_mask; 197 175 198 176 /* ops */ 199 177 struct dpu_hw_ctl_ops ops; ··· 219 195 */ 220 196 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx, 221 197 void __iomem *addr, 222 - struct dpu_mdss_cfg *m); 198 + const struct dpu_mdss_cfg *m); 223 199 224 200 /** 225 201 * dpu_hw_ctl_destroy(): Destroys ctl driver context
+16 -6
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
··· 800 800 start_idx = reg_idx * 32; 801 801 end_idx = start_idx + 32; 802 802 803 - if (start_idx >= ARRAY_SIZE(dpu_irq_map) || 804 - end_idx > ARRAY_SIZE(dpu_irq_map)) 803 + if (!test_bit(reg_idx, &intr->irq_mask) || 804 + start_idx >= ARRAY_SIZE(dpu_irq_map)) 805 805 continue; 806 806 807 807 /* ··· 955 955 if (!intr) 956 956 return -EINVAL; 957 957 958 - for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) 959 - DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff); 958 + for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { 959 + if (test_bit(i, &intr->irq_mask)) 960 + DPU_REG_WRITE(&intr->hw, 961 + dpu_intr_set[i].clr_off, 0xffffffff); 962 + } 960 963 961 964 /* ensure register writes go through */ 962 965 wmb(); ··· 974 971 if (!intr) 975 972 return -EINVAL; 976 973 977 - for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) 978 - DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000); 974 + for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { 975 + if (test_bit(i, &intr->irq_mask)) 976 + DPU_REG_WRITE(&intr->hw, 977 + dpu_intr_set[i].en_off, 0x00000000); 978 + } 979 979 980 980 /* ensure register writes go through */ 981 981 wmb(); ··· 997 991 998 992 spin_lock_irqsave(&intr->irq_lock, irq_flags); 999 993 for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { 994 + if (!test_bit(i, &intr->irq_mask)) 995 + continue; 996 + 1000 997 /* Read interrupt status */ 1001 998 intr->save_irq_status[i] = DPU_REG_READ(&intr->hw, 1002 999 dpu_intr_set[i].status_off); ··· 1124 1115 return ERR_PTR(-ENOMEM); 1125 1116 } 1126 1117 1118 + intr->irq_mask = m->mdss_irqs; 1127 1119 spin_lock_init(&intr->irq_lock); 1128 1120 1129 1121 return intr;
+1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
··· 187 187 u32 *save_irq_status; 188 188 u32 irq_idx_tbl_size; 189 189 spinlock_t irq_lock; 190 + unsigned long irq_mask; 190 191 }; 191 192 192 193 /**
+32 -4
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
··· 56 56 #define INTF_FRAME_COUNT 0x0AC 57 57 #define INTF_LINE_COUNT 0x0B0 58 58 59 - static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf, 60 - struct dpu_mdss_cfg *m, 59 + #define INTF_MUX 0x25C 60 + 61 + static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf, 62 + const struct dpu_mdss_cfg *m, 61 63 void __iomem *addr, 62 64 struct dpu_hw_blk_reg_map *b) 63 65 { ··· 220 218 DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable); 221 219 } 222 220 221 + static void dpu_hw_intf_bind_pingpong_blk( 222 + struct dpu_hw_intf *intf, 223 + bool enable, 224 + const enum dpu_pingpong pp) 225 + { 226 + struct dpu_hw_blk_reg_map *c; 227 + u32 mux_cfg; 228 + 229 + if (!intf) 230 + return; 231 + 232 + c = &intf->hw; 233 + 234 + mux_cfg = DPU_REG_READ(c, INTF_MUX); 235 + mux_cfg &= ~0xf; 236 + 237 + if (enable) 238 + mux_cfg |= (pp - PINGPONG_0) & 0x7; 239 + else 240 + mux_cfg |= 0xf; 241 + 242 + DPU_REG_WRITE(c, INTF_MUX, mux_cfg); 243 + } 244 + 223 245 static void dpu_hw_intf_get_status( 224 246 struct dpu_hw_intf *intf, 225 247 struct intf_status *s) ··· 280 254 ops->get_status = dpu_hw_intf_get_status; 281 255 ops->enable_timing = dpu_hw_intf_enable_timing_engine; 282 256 ops->get_line_count = dpu_hw_intf_get_line_count; 257 + if (cap & BIT(DPU_CTL_ACTIVE_CFG)) 258 + ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk; 283 259 } 284 260 285 261 static struct dpu_hw_blk_ops dpu_hw_ops; 286 262 287 263 struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx, 288 264 void __iomem *addr, 289 - struct dpu_mdss_cfg *m) 265 + const struct dpu_mdss_cfg *m) 290 266 { 291 267 struct dpu_hw_intf *c; 292 - struct dpu_intf_cfg *cfg; 268 + const struct dpu_intf_cfg *cfg; 293 269 294 270 c = kzalloc(sizeof(*c), GFP_KERNEL); 295 271 if (!c)
+7 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
··· 52 52 * @ enable_timing: enable/disable timing engine 53 53 * @ get_status: returns if timing engine is enabled or not 54 54 * @ get_line_count: reads current vertical line counter 55 + * @bind_pingpong_blk: enable/disable the connection with pingpong which will 56 + * feed pixels to this interface 55 57 */ 56 58 struct dpu_hw_intf_ops { 57 59 void (*setup_timing_gen)(struct dpu_hw_intf *intf, ··· 70 68 struct intf_status *status); 71 69 72 70 u32 (*get_line_count)(struct dpu_hw_intf *intf); 71 + 72 + void (*bind_pingpong_blk)(struct dpu_hw_intf *intf, 73 + bool enable, 74 + const enum dpu_pingpong pp); 73 75 }; 74 76 75 77 struct dpu_hw_intf { ··· 98 92 */ 99 93 struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx, 100 94 void __iomem *addr, 101 - struct dpu_mdss_cfg *m); 95 + const struct dpu_mdss_cfg *m); 102 96 103 97 /** 104 98 * dpu_hw_intf_destroy(): Destroys INTF driver context
+7 -6
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
··· 24 24 #define LM_BLEND0_FG_ALPHA 0x04 25 25 #define LM_BLEND0_BG_ALPHA 0x08 26 26 27 - static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer, 28 - struct dpu_mdss_cfg *m, 27 + static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer, 28 + const struct dpu_mdss_cfg *m, 29 29 void __iomem *addr, 30 30 struct dpu_hw_blk_reg_map *b) 31 31 { ··· 147 147 DPU_REG_WRITE(c, LM_OP_MODE, op_mode); 148 148 } 149 149 150 - static void _setup_mixer_ops(struct dpu_mdss_cfg *m, 150 + static void _setup_mixer_ops(const struct dpu_mdss_cfg *m, 151 151 struct dpu_hw_lm_ops *ops, 152 152 unsigned long features) 153 153 { 154 154 ops->setup_mixer_out = dpu_hw_lm_setup_out; 155 - if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion)) 155 + if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion) 156 + || IS_SC7180_TARGET(m->hwversion)) 156 157 ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845; 157 158 else 158 159 ops->setup_blend_config = dpu_hw_lm_setup_blend_config; ··· 165 164 166 165 struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx, 167 166 void __iomem *addr, 168 - struct dpu_mdss_cfg *m) 167 + const struct dpu_mdss_cfg *m) 169 168 { 170 169 struct dpu_hw_mixer *c; 171 - struct dpu_lm_cfg *cfg; 170 + const struct dpu_lm_cfg *cfg; 172 171 173 172 c = kzalloc(sizeof(*c), GFP_KERNEL); 174 173 if (!c)
+1 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
··· 91 91 */ 92 92 struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx, 93 93 void __iomem *addr, 94 - struct dpu_mdss_cfg *m); 94 + const struct dpu_mdss_cfg *m); 95 95 96 96 /** 97 97 * dpu_hw_lm_destroy(): Destroys layer mixer driver context
+4 -4
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
··· 28 28 #define PP_FBC_BUDGET_CTL 0x038 29 29 #define PP_FBC_LOSSY_MODE 0x03C 30 30 31 - static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp, 32 - struct dpu_mdss_cfg *m, 31 + static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp, 32 + const struct dpu_mdss_cfg *m, 33 33 void __iomem *addr, 34 34 struct dpu_hw_blk_reg_map *b) 35 35 { ··· 195 195 196 196 struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx, 197 197 void __iomem *addr, 198 - struct dpu_mdss_cfg *m) 198 + const struct dpu_mdss_cfg *m) 199 199 { 200 200 struct dpu_hw_pingpong *c; 201 - struct dpu_pingpong_cfg *cfg; 201 + const struct dpu_pingpong_cfg *cfg; 202 202 203 203 c = kzalloc(sizeof(*c), GFP_KERNEL); 204 204 if (!c)
+1 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
··· 106 106 */ 107 107 struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx, 108 108 void __iomem *addr, 109 - struct dpu_mdss_cfg *m); 109 + const struct dpu_mdss_cfg *m); 110 110 111 111 /** 112 112 * dpu_hw_pingpong_destroy - destroys pingpong driver context
+5 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
··· 132 132 /* traffic shaper clock in Hz */ 133 133 #define TS_CLK 19200000 134 134 135 + 135 136 static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx, 136 137 int s_id, 137 138 u32 *idx) ··· 658 657 test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features)) 659 658 c->ops.setup_multirect = dpu_hw_sspp_setup_multirect; 660 659 661 - if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) { 660 + if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) || 661 + test_bit(DPU_SSPP_SCALER_QSEED4, &features)) { 662 662 c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3; 663 663 c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver; 664 664 } ··· 668 666 c->ops.setup_cdp = dpu_hw_sspp_setup_cdp; 669 667 } 670 668 671 - static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp, 669 + static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp, 672 670 void __iomem *addr, 673 671 struct dpu_mdss_cfg *catalog, 674 672 struct dpu_hw_blk_reg_map *b) ··· 698 696 bool is_virtual_pipe) 699 697 { 700 698 struct dpu_hw_pipe *hw_pipe; 701 - struct dpu_sspp_cfg *cfg; 699 + const struct dpu_sspp_cfg *cfg; 702 700 703 701 if (!addr || !catalog) 704 702 return ERR_PTR(-EINVAL);
+3 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
··· 27 27 */ 28 28 #define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \ 29 29 (1UL << DPU_SSPP_SCALER_QSEED2) | \ 30 - (1UL << DPU_SSPP_SCALER_QSEED3)) 30 + (1UL << DPU_SSPP_SCALER_QSEED3) | \ 31 + (1UL << DPU_SSPP_SCALER_QSEED4)) 31 32 32 33 /** 33 34 * Component indices ··· 374 373 struct dpu_hw_blk base; 375 374 struct dpu_hw_blk_reg_map hw; 376 375 struct dpu_mdss_cfg *catalog; 377 - struct dpu_mdp_cfg *mdp; 376 + const struct dpu_mdp_cfg *mdp; 378 377 379 378 /* Pipe */ 380 379 enum dpu_sspp idx;
+8 -19
drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
··· 93 93 DEV_DBG("%pS->%s: enable '%s'\n", 94 94 __builtin_return_address(0), __func__, 95 95 clk_arry[i].clk_name); 96 - if (clk_arry[i].clk) { 97 - rc = clk_prepare_enable(clk_arry[i].clk); 98 - if (rc) 99 - DEV_ERR("%pS->%s: %s en fail. rc=%d\n", 100 - __builtin_return_address(0), 101 - __func__, 102 - clk_arry[i].clk_name, rc); 103 - } else { 104 - DEV_ERR("%pS->%s: '%s' is not available\n", 105 - __builtin_return_address(0), __func__, 106 - clk_arry[i].clk_name); 107 - rc = -EPERM; 108 - } 96 + rc = clk_prepare_enable(clk_arry[i].clk); 97 + if (rc) 98 + DEV_ERR("%pS->%s: %s en fail. rc=%d\n", 99 + __builtin_return_address(0), 100 + __func__, 101 + clk_arry[i].clk_name, rc); 109 102 110 103 if (rc && i) { 111 104 msm_dss_enable_clk(&clk_arry[i - 1], ··· 112 119 __builtin_return_address(0), __func__, 113 120 clk_arry[i].clk_name); 114 121 115 - if (clk_arry[i].clk) 116 - clk_disable_unprepare(clk_arry[i].clk); 117 - else 118 - DEV_ERR("%pS->%s: '%s' is not available\n", 119 - __builtin_return_address(0), __func__, 120 - clk_arry[i].clk_name); 122 + clk_disable_unprepare(clk_arry[i].clk); 121 123 } 122 124 } 123 125 ··· 175 187 continue; 176 188 mp->clk_config[i].rate = rate; 177 189 mp->clk_config[i].type = DSS_CLK_PCLK; 190 + mp->clk_config[i].max_rate = rate; 178 191 } 179 192 180 193 mp->num_clk = num_clk;
+1
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
··· 1059 1059 1060 1060 static const struct of_device_id dpu_dt_match[] = { 1061 1061 { .compatible = "qcom,sdm845-dpu", }, 1062 + { .compatible = "qcom,sc7180-dpu", }, 1062 1063 {} 1063 1064 }; 1064 1065 MODULE_DEVICE_TABLE(of, dpu_dt_match);
+24 -10
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
··· 53 53 R_MAX 54 54 }; 55 55 56 + /* 57 + * Default Preload Values 58 + */ 56 59 #define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4 57 60 #define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3 61 + #define DPU_QSEED4_DEFAULT_PRELOAD_V 0x2 62 + #define DPU_QSEED4_DEFAULT_PRELOAD_H 0x4 58 63 59 64 #define DEFAULT_REFRESH_RATE 60 60 65 ··· 482 477 scale_cfg->src_width[i] /= chroma_subsmpl_h; 483 478 scale_cfg->src_height[i] /= chroma_subsmpl_v; 484 479 } 485 - scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H; 486 - scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V; 480 + 481 + if (pdpu->pipe_hw->cap->features & 482 + BIT(DPU_SSPP_SCALER_QSEED4)) { 483 + scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H; 484 + scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V; 485 + } else { 486 + scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H; 487 + scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V; 488 + } 489 + 487 490 pstate->pixel_ext.num_ext_pxls_top[i] = 488 491 scale_cfg->src_height[i]; 489 492 pstate->pixel_ext.num_ext_pxls_left[i] = ··· 751 738 } else { 752 739 pstate[R0]->multirect_index = DPU_SSPP_RECT_0; 753 740 pstate[R1]->multirect_index = DPU_SSPP_RECT_1; 754 - }; 741 + } 755 742 756 743 DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n", 757 744 pstate[R0]->multirect_mode, pstate[R0]->multirect_index); ··· 871 858 pdpu->pipe_sblk->maxupscale << 16, 872 859 true, true); 873 860 if (ret) { 874 - DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret); 861 + DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret); 875 862 return ret; 876 863 } 877 864 if (!state->visible) ··· 897 884 (!(pdpu->features & DPU_SSPP_SCALER) || 898 885 !(pdpu->features & (BIT(DPU_SSPP_CSC) 899 886 | BIT(DPU_SSPP_CSC_10BIT))))) { 900 - DPU_ERROR_PLANE(pdpu, 887 + DPU_DEBUG_PLANE(pdpu, 901 888 "plane doesn't have scaler/csc for yuv\n"); 902 889 return -EINVAL; 903 890 904 891 /* check src bounds */ 905 892 } else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) { 906 - DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n", 893 + DPU_DEBUG_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n", 907 894 DRM_RECT_ARG(&src)); 908 895 return -E2BIG; 909 896 ··· 912 899 (src.x1 & 0x1 || src.y1 & 0x1 || 913 900 drm_rect_width(&src) & 0x1 || 914 901 drm_rect_height(&src) & 0x1)) { 915 - DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n", 902 + DPU_DEBUG_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n", 916 903 DRM_RECT_ARG(&src)); 917 904 return -EINVAL; 918 905 919 906 /* min dst support */ 920 907 } else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) { 921 - DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n", 908 + DPU_DEBUG_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n", 922 909 DRM_RECT_ARG(&dst)); 923 910 return -EINVAL; 924 911 925 912 /* check decimated source width */ 926 913 } else if (drm_rect_width(&src) > max_linewidth) { 927 - DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", 914 + DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", 928 915 DRM_RECT_ARG(&src), max_linewidth); 929 916 return -E2BIG; 930 917 } ··· 1350 1337 pdpu->debugfs_root, &pdpu->debugfs_src); 1351 1338 1352 1339 if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) || 1353 - cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) { 1340 + cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) || 1341 + cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) { 1354 1342 dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler, 1355 1343 sblk->scaler_blk.base + cfg->base, 1356 1344 sblk->scaler_blk.len,
+3 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
··· 141 141 142 142 static int _dpu_rm_hw_blk_create( 143 143 struct dpu_rm *rm, 144 - struct dpu_mdss_cfg *cat, 144 + const struct dpu_mdss_cfg *cat, 145 145 void __iomem *mmio, 146 146 enum dpu_hw_blk_type type, 147 147 uint32_t id, 148 - void *hw_catalog_info) 148 + const void *hw_catalog_info) 149 149 { 150 150 struct dpu_rm_hw_blk *blk; 151 151 void *hw; ··· 215 215 216 216 /* Interrogate HW catalog and create tracking items for hw blocks */ 217 217 for (i = 0; i < cat->mixer_count; i++) { 218 - struct dpu_lm_cfg *lm = &cat->mixer[i]; 218 + const struct dpu_lm_cfg *lm = &cat->mixer[i]; 219 219 220 220 if (lm->pingpong == PINGPONG_MAX) { 221 221 DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
+3 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
··· 299 299 entry = debugfs_create_dir("vbif", debugfs_root); 300 300 301 301 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { 302 - struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; 302 + const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; 303 303 304 304 snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id); 305 305 ··· 318 318 (u32 *)&vbif->default_ot_wr_limit); 319 319 320 320 for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) { 321 - struct dpu_vbif_dynamic_ot_cfg *cfg = 321 + const struct dpu_vbif_dynamic_ot_cfg *cfg = 322 322 &vbif->dynamic_ot_rd_tbl.cfg[j]; 323 323 324 324 snprintf(vbif_name, sizeof(vbif_name), ··· 332 332 } 333 333 334 334 for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) { 335 - struct dpu_vbif_dynamic_ot_cfg *cfg = 335 + const struct dpu_vbif_dynamic_ot_cfg *cfg = 336 336 &vbif->dynamic_ot_wr_tbl.cfg[j]; 337 337 338 338 snprintf(vbif_name, sizeof(vbif_name),
+1 -1
drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
··· 121 121 if (mdp4_dsi_encoder->enabled) 122 122 return; 123 123 124 - mdp4_crtc_set_config(encoder->crtc, 124 + mdp4_crtc_set_config(encoder->crtc, 125 125 MDP4_DMA_CONFIG_PACK_ALIGN_MSB | 126 126 MDP4_DMA_CONFIG_DEFLKR_EN | 127 127 MDP4_DMA_CONFIG_DITHER_EN |
+1 -1
drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
··· 902 902 major, minor); 903 903 ret = -ENXIO; 904 904 goto fail; 905 - }; 905 + } 906 906 907 907 /* only after mdp5_cfg global pointer's init can we access the hw */ 908 908 for (i = 0; i < num_handlers; i++) {
+2
drivers/gpu/drm/msm/dsi/dsi.h
··· 178 178 int msm_dsi_host_init(struct msm_dsi *msm_dsi); 179 179 int msm_dsi_runtime_suspend(struct device *dev); 180 180 int msm_dsi_runtime_resume(struct device *dev); 181 + int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host); 182 + int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host); 181 183 int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host); 182 184 int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host); 183 185 void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
+24
drivers/gpu/drm/msm/dsi/dsi_cfg.c
··· 153 153 "iface", "bus", 154 154 }; 155 155 156 + static const char * const dsi_sc7180_bus_clk_names[] = { 157 + "iface", "bus", 158 + }; 159 + 156 160 static const struct msm_dsi_config sdm845_dsi_cfg = { 157 161 .io_offset = DSI_6G_REG_SHIFT, 158 162 .reg_cfg = { ··· 171 167 .num_dsi = 2, 172 168 }; 173 169 170 + static const struct msm_dsi_config sc7180_dsi_cfg = { 171 + .io_offset = DSI_6G_REG_SHIFT, 172 + .reg_cfg = { 173 + .num = 1, 174 + .regs = { 175 + {"vdda", 21800, 4 }, /* 1.2 V */ 176 + }, 177 + }, 178 + .bus_clk_names = dsi_sc7180_bus_clk_names, 179 + .num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names), 180 + .io_start = { 0xae94000 }, 181 + .num_dsi = 1, 182 + }; 183 + 174 184 static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { 185 + .link_clk_set_rate = dsi_link_clk_set_rate_v2, 175 186 .link_clk_enable = dsi_link_clk_enable_v2, 176 187 .link_clk_disable = dsi_link_clk_disable_v2, 177 188 .clk_init_ver = dsi_clk_init_v2, ··· 198 179 }; 199 180 200 181 static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { 182 + .link_clk_set_rate = dsi_link_clk_set_rate_6g, 201 183 .link_clk_enable = dsi_link_clk_enable_6g, 202 184 .link_clk_disable = dsi_link_clk_disable_6g, 203 185 .clk_init_ver = NULL, ··· 210 190 }; 211 191 212 192 static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = { 193 + .link_clk_set_rate = dsi_link_clk_set_rate_6g, 213 194 .link_clk_enable = dsi_link_clk_enable_6g, 214 195 .link_clk_disable = dsi_link_clk_disable_6g, 215 196 .clk_init_ver = dsi_clk_init_6g_v2, ··· 244 223 &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops}, 245 224 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, 246 225 &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, 226 + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1, 227 + &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops}, 228 + 247 229 }; 248 230 249 231 const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
+2
drivers/gpu/drm/msm/dsi/dsi_cfg.h
··· 20 20 #define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002 21 21 #define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000 22 22 #define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 23 + #define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001 23 24 24 25 #define MSM_DSI_V2_VER_MINOR_8064 0x0 25 26 ··· 36 35 }; 37 36 38 37 struct msm_dsi_host_cfg_ops { 38 + int (*link_clk_set_rate)(struct msm_dsi_host *msm_host); 39 39 int (*link_clk_enable)(struct msm_dsi_host *msm_host); 40 40 void (*link_clk_disable)(struct msm_dsi_host *msm_host); 41 41 int (*clk_init_ver)(struct msm_dsi_host *msm_host);
+33 -13
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 505 505 return dsi_bus_clk_enable(msm_host); 506 506 } 507 507 508 - int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) 508 + int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host) 509 509 { 510 510 int ret; 511 511 ··· 515 515 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate); 516 516 if (ret) { 517 517 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret); 518 - goto error; 518 + return ret; 519 519 } 520 520 521 521 ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate); 522 522 if (ret) { 523 523 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); 524 - goto error; 524 + return ret; 525 525 } 526 526 527 527 if (msm_host->byte_intf_clk) { ··· 530 530 if (ret) { 531 531 pr_err("%s: Failed to set rate byte intf clk, %d\n", 532 532 __func__, ret); 533 - goto error; 533 + return ret; 534 534 } 535 535 } 536 + 537 + return 0; 538 + } 539 + 540 + 541 + int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) 542 + { 543 + int ret; 536 544 537 545 ret = clk_prepare_enable(msm_host->esc_clk); 538 546 if (ret) { ··· 581 573 return ret; 582 574 } 583 575 584 - int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host) 576 + int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host) 585 577 { 586 578 int ret; 587 579 ··· 592 584 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate); 593 585 if (ret) { 594 586 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret); 595 - goto error; 587 + return ret; 596 588 } 597 589 598 590 ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate); 599 591 if (ret) { 600 592 pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret); 601 - goto error; 593 + return ret; 602 594 } 603 595 604 596 ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate); 605 597 if (ret) { 606 598 pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret); 607 - goto error; 599 + return ret; 608 600 } 609 601 610 602 ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate); 611 603 if (ret) { 612 604 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); 613 - goto error; 605 + return ret; 614 606 } 607 + 608 + return 0; 609 + } 610 + 611 + int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host) 612 + { 613 + int ret; 615 614 616 615 ret = clk_prepare_enable(msm_host->byte_clk); 617 616 if (ret) { ··· 833 818 u32 flags = msm_host->mode_flags; 834 819 enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; 835 820 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 836 - u32 data = 0; 821 + u32 data = 0, lane_ctrl = 0; 837 822 838 823 if (!enable) { 839 824 dsi_write(msm_host, REG_DSI_CTRL, 0); ··· 921 906 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, 922 907 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap)); 923 908 924 - if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) 909 + if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) { 910 + lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL); 925 911 dsi_write(msm_host, REG_DSI_LANE_CTRL, 926 - DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST); 912 + lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST); 913 + } 927 914 928 915 data |= DSI_CTRL_ENABLE; 929 916 ··· 2013 1996 * mdp clock need to be enabled to receive dsi interrupt 2014 1997 */ 2015 1998 pm_runtime_get_sync(&msm_host->pdev->dev); 1999 + cfg_hnd->ops->link_clk_set_rate(msm_host); 2016 2000 cfg_hnd->ops->link_clk_enable(msm_host); 2017 2001 2018 2002 /* TODO: vote for bus bandwidth */ ··· 2362 2344 } 2363 2345 2364 2346 pm_runtime_get_sync(&msm_host->pdev->dev); 2365 - ret = cfg_hnd->ops->link_clk_enable(msm_host); 2347 + ret = cfg_hnd->ops->link_clk_set_rate(msm_host); 2348 + if (!ret) 2349 + ret = cfg_hnd->ops->link_clk_enable(msm_host); 2366 2350 if (ret) { 2367 2351 pr_err("%s: failed to enable link clocks. ret=%d\n", 2368 2352 __func__, ret);
+41 -21
drivers/gpu/drm/msm/dsi/dsi_manager.c
··· 432 432 } 433 433 } 434 434 435 - if (panel) { 436 - ret = drm_panel_enable(panel); 437 - if (ret) { 438 - pr_err("%s: enable panel %d failed, %d\n", __func__, id, 439 - ret); 440 - goto panel_en_fail; 441 - } 442 - } 443 - 444 435 return; 445 436 446 - panel_en_fail: 447 - if (is_dual_dsi && msm_dsi1) 448 - msm_dsi_host_disable(msm_dsi1->host); 449 437 host1_en_fail: 450 438 msm_dsi_host_disable(host); 451 439 host_en_fail: ··· 452 464 453 465 static void dsi_mgr_bridge_enable(struct drm_bridge *bridge) 454 466 { 455 - DBG(""); 467 + int id = dsi_mgr_bridge_get_id(bridge); 468 + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 469 + struct drm_panel *panel = msm_dsi->panel; 470 + bool is_dual_dsi = IS_DUAL_DSI(); 471 + int ret; 472 + 473 + DBG("id=%d", id); 474 + if (!msm_dsi_device_connected(msm_dsi)) 475 + return; 476 + 477 + /* Do nothing with the host if it is slave-DSI in case of dual DSI */ 478 + if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) 479 + return; 480 + 481 + if (panel) { 482 + ret = drm_panel_enable(panel); 483 + if (ret) { 484 + pr_err("%s: enable panel %d failed, %d\n", __func__, id, 485 + ret); 486 + } 487 + } 456 488 } 457 489 458 490 static void dsi_mgr_bridge_disable(struct drm_bridge *bridge) 459 491 { 460 - DBG(""); 492 + int id = dsi_mgr_bridge_get_id(bridge); 493 + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 494 + struct drm_panel *panel = msm_dsi->panel; 495 + bool is_dual_dsi = IS_DUAL_DSI(); 496 + int ret; 497 + 498 + DBG("id=%d", id); 499 + if (!msm_dsi_device_connected(msm_dsi)) 500 + return; 501 + 502 + /* Do nothing with the host if it is slave-DSI in case of dual DSI */ 503 + if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) 504 + return; 505 + 506 + if (panel) { 507 + ret = drm_panel_disable(panel); 508 + if (ret) 509 + pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, 510 + ret); 511 + } 461 512 } 462 513 463 514 static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) ··· 521 494 */ 522 495 if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) 523 496 goto disable_phy; 524 - 525 - if (panel) { 526 - ret = drm_panel_disable(panel); 527 - if (ret) 528 - pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, 529 - ret); 530 - } 531 497 532 498 ret = msm_dsi_host_disable(host); 533 499 if (ret)
+1 -1
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
··· 101 101 102 102 gpiod_set_value_cansleep(gpio.gpiod, value); 103 103 } 104 - }; 104 + } 105 105 106 106 DBG("gpio off"); 107 107 }
+3 -1
drivers/gpu/drm/msm/msm_drv.c
··· 1192 1192 * the interfaces to our components list. 1193 1193 */ 1194 1194 if (of_device_is_compatible(dev->of_node, "qcom,mdss") || 1195 - of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) { 1195 + of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss") || 1196 + of_device_is_compatible(dev->of_node, "qcom,sc7180-mdss")) { 1196 1197 ret = of_platform_populate(dev->of_node, NULL, NULL, dev); 1197 1198 if (ret) { 1198 1199 DRM_DEV_ERROR(dev, "failed to populate children devices\n"); ··· 1318 1317 { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, 1319 1318 { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, 1320 1319 { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU }, 1320 + { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU }, 1321 1321 {} 1322 1322 }; 1323 1323 MODULE_DEVICE_TABLE(of, dt_match);
+7
drivers/gpu/drm/msm/msm_gpu.h
··· 111 111 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk; 112 112 uint32_t fast_rate; 113 113 114 + /* The gfx-mem interconnect path that's used by all GPU types. */ 114 115 struct icc_path *icc_path; 116 + 117 + /* 118 + * Second interconnect path for some A3xx and all A4xx GPUs to the 119 + * On Chip MEMory (OCMEM). 120 + */ 121 + struct icc_path *ocmem_icc_path; 115 122 116 123 /* Hang and Inactivity Detection: 117 124 */