Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2025-09-04' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

drm-misc-next for v6.18:

Cross-subsystem Changes:

- Update a number of DT bindings for STM32MP25 Arm SoC

Core Changes:

gem:
- Simplify locking for GPUVM

panel-backlight-quirks:
- Add additional quirks for EDID, DMI, brightness

sched:
- Fix race condition in trace code
- Clean up

sysfb:
- Clean up

Driver Changes:

amdgpu:
- Give kernel jobs a unique id for better tracing

amdxdna:
- Improve error reporting

bridge:
- Improve ref counting on bridge management
- adv7511: Provide SPD and HDMI infoframes
- it6505: Replace crypto_shash with sha()
- synopsys: Add support for DW DPTX Controller plus DT bindings

gud:
- Replace simple-KMS pipe with regular atomic helpers

imagination:
- Improve power management
- Add support for TH1520 GPU
- Support Risc-V architectures

ivpu:
- Clean up

nouveau:
- Improve error reporting

panthor:
- Fail VM bind if BO has offset
- Clean up

rcar-du:
- Make number of lanes configurable

rockchip:
- Add support for RK3588 DPTX output

rocket:
- Use kfree() and sizeof() correctly
- Test DMA status
- Clean up

sitronix:
- st7571-i2c: Add support for inverted displays and 2-bit grayscale
- Clean up

stm:
- ltdc: Add support support for STM32MP257F-EV1 plus DT bindings

tidss:
- Convert to kernel's FIELD_ macros

v3d:
- Improve job management and locking

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://lore.kernel.org/r/20250904090932.GA193997@linux.fritz.box

+3877 -706
+21 -10
Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml
··· 36 36 clocks: 37 37 maxItems: 1 38 38 39 + "#clock-cells": 40 + const: 0 41 + 39 42 required: 40 43 - compatible 41 44 - reg 42 45 43 - if: 44 - properties: 45 - compatible: 46 - contains: 47 - enum: 48 - - st,stm32mp157-syscfg 49 - - st,stm32f4-gcan 50 - then: 51 - required: 52 - - clocks 46 + allOf: 47 + - if: 48 + properties: 49 + compatible: 50 + contains: 51 + enum: 52 + - st,stm32mp157-syscfg 53 + - st,stm32f4-gcan 54 + then: 55 + required: 56 + - clocks 57 + - if: 58 + properties: 59 + compatible: 60 + const: st,stm32mp25-syscfg 61 + then: 62 + required: 63 + - "#clock-cells" 53 64 54 65 additionalProperties: false 55 66
+150
Documentation/devicetree/bindings/display/rockchip/rockchip,dw-dp.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/rockchip/rockchip,dw-dp.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Rockchip DW DisplayPort Transmitter 8 + 9 + maintainers: 10 + - Andy Yan <andy.yan@rock-chips.com> 11 + 12 + description: | 13 + The Rockchip RK3588 SoC integrates the Synopsys DesignWare DPTX controller 14 + which is compliant with the DisplayPort Specification Version 1.4 with the 15 + following features: 16 + 17 + * DisplayPort 1.4a 18 + * Main Link: 1/2/4 lanes 19 + * Main Link Support 1.62Gbps, 2.7Gbps, 5.4Gbps and 8.1Gbps 20 + * AUX channel 1Mbps 21 + * Single Stream Transport(SST) 22 + * Multistream Transport (MST) 23 + * Type-C support (alternate mode) 24 + * HDCP 2.2, HDCP 1.3 25 + * Supports up to 8/10 bits per color component 26 + * Supports RBG, YCbCr4:4:4, YCbCr4:2:2, YCbCr4:2:0 27 + * Pixel clock up to 594MHz 28 + * I2S, SPDIF audio interface 29 + 30 + allOf: 31 + - $ref: /schemas/sound/dai-common.yaml# 32 + 33 + properties: 34 + compatible: 35 + enum: 36 + - rockchip,rk3588-dp 37 + 38 + reg: 39 + maxItems: 1 40 + 41 + interrupts: 42 + maxItems: 1 43 + 44 + clocks: 45 + items: 46 + - description: Peripheral/APB bus clock 47 + - description: DisplayPort AUX clock 48 + - description: HDCP clock 49 + - description: I2S interface clock 50 + - description: SPDIF interfce clock 51 + 52 + clock-names: 53 + items: 54 + - const: apb 55 + - const: aux 56 + - const: hdcp 57 + - const: i2s 58 + - const: spdif 59 + 60 + phys: 61 + maxItems: 1 62 + 63 + ports: 64 + $ref: /schemas/graph.yaml#/properties/ports 65 + 66 + properties: 67 + port@0: 68 + $ref: /schemas/graph.yaml#/properties/port 69 + description: Video port for RGB/YUV input. 70 + 71 + port@1: 72 + $ref: /schemas/graph.yaml#/properties/port 73 + description: Video port for DP output. 74 + 75 + required: 76 + - port@0 77 + - port@1 78 + 79 + power-domains: 80 + maxItems: 1 81 + 82 + resets: 83 + maxItems: 1 84 + 85 + "#sound-dai-cells": 86 + const: 0 87 + 88 + required: 89 + - compatible 90 + - reg 91 + - clocks 92 + - clock-names 93 + - interrupts 94 + - phys 95 + - ports 96 + - resets 97 + 98 + unevaluatedProperties: false 99 + 100 + examples: 101 + - | 102 + #include <dt-bindings/clock/rockchip,rk3588-cru.h> 103 + #include <dt-bindings/phy/phy.h> 104 + #include <dt-bindings/interrupt-controller/arm-gic.h> 105 + #include <dt-bindings/interrupt-controller/irq.h> 106 + #include <dt-bindings/power/rk3588-power.h> 107 + #include <dt-bindings/reset/rockchip,rk3588-cru.h> 108 + 109 + soc { 110 + #address-cells = <2>; 111 + #size-cells = <2>; 112 + 113 + dp@fde50000 { 114 + compatible = "rockchip,rk3588-dp"; 115 + reg = <0x0 0xfde50000 0x0 0x4000>; 116 + interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH 0>; 117 + clocks = <&cru PCLK_DP0>, <&cru CLK_AUX16M_0>, 118 + <&cru CLK_DP0>, <&cru MCLK_I2S4_8CH_TX>, 119 + <&cru MCLK_SPDIF2_DP0>; 120 + clock-names = "apb", "aux", "hdcp", "i2s", "spdif"; 121 + assigned-clocks = <&cru CLK_AUX16M_0>; 122 + assigned-clock-rates = <16000000>; 123 + resets = <&cru SRST_DP0>; 124 + phys = <&usbdp_phy0 PHY_TYPE_DP>; 125 + power-domains = <&power RK3588_PD_VO0>; 126 + #sound-dai-cells = <0>; 127 + 128 + 129 + ports { 130 + #address-cells = <1>; 131 + #size-cells = <0>; 132 + 133 + port@0 { 134 + reg = <0>; 135 + 136 + dp0_in_vp2: endpoint { 137 + remote-endpoint = <&vp2_out_dp0>; 138 + }; 139 + }; 140 + 141 + port@1 { 142 + reg = <1>; 143 + 144 + dp0_out_con0: endpoint { 145 + remote-endpoint = <&dp_con0_in>; 146 + }; 147 + }; 148 + }; 149 + }; 150 + };
+5
Documentation/devicetree/bindings/display/sitronix,st7567.yaml
··· 23 23 reg: 24 24 maxItems: 1 25 25 26 + sitronix,inverted: 27 + type: boolean 28 + description: 29 + Display pixels are inverted, i.e. 0 is white and 1 is black. 30 + 26 31 width-mm: true 27 32 height-mm: true 28 33 panel-timing: true
+5
Documentation/devicetree/bindings/display/sitronix,st7571.yaml
··· 28 28 description: 29 29 Display supports 4-level grayscale. 30 30 31 + sitronix,inverted: 32 + type: boolean 33 + description: 34 + Display pixels are inverted, i.e. 0 is white and 1 is black. 35 + 31 36 reset-gpios: true 32 37 width-mm: true 33 38 height-mm: true
+53 -2
Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
··· 12 12 13 13 properties: 14 14 compatible: 15 - const: st,stm32-ltdc 15 + enum: 16 + - st,stm32-ltdc 17 + - st,stm32mp251-ltdc 18 + - st,stm32mp255-ltdc 16 19 17 20 reg: 18 21 maxItems: 1 ··· 27 24 minItems: 1 28 25 29 26 clocks: 30 - maxItems: 1 27 + minItems: 1 28 + maxItems: 4 31 29 32 30 clock-names: 33 31 items: 34 32 - const: lcd 33 + - const: bus 34 + - const: ref 35 + - const: lvds 36 + minItems: 1 35 37 36 38 resets: 39 + maxItems: 1 40 + 41 + access-controllers: 37 42 maxItems: 1 38 43 39 44 port: ··· 61 50 - clock-names 62 51 - resets 63 52 - port 53 + 54 + allOf: 55 + - if: 56 + properties: 57 + compatible: 58 + contains: 59 + enum: 60 + - st,stm32-ltdc 61 + then: 62 + properties: 63 + clocks: 64 + maxItems: 1 65 + clock-names: 66 + maxItems: 1 67 + - if: 68 + properties: 69 + compatible: 70 + contains: 71 + enum: 72 + - st,stm32mp251-ltdc 73 + then: 74 + properties: 75 + clocks: 76 + minItems: 2 77 + maxItems: 2 78 + clock-names: 79 + minItems: 2 80 + maxItems: 2 81 + - if: 82 + properties: 83 + compatible: 84 + contains: 85 + enum: 86 + - st,stm32mp255-ltdc 87 + then: 88 + properties: 89 + clocks: 90 + minItems: 4 91 + clock-names: 92 + minItems: 4 64 93 65 94 additionalProperties: false 66 95
+12 -1
Documentation/devicetree/bindings/display/st,stm32mp25-lvds.yaml
··· 31 31 32 32 properties: 33 33 compatible: 34 - const: st,stm32mp25-lvds 34 + oneOf: 35 + - items: 36 + - enum: 37 + - st,stm32mp255-lvds 38 + - const: st,stm32mp25-lvds 39 + - const: st,stm32mp25-lvds 35 40 36 41 "#clock-cells": 37 42 const: 0 ··· 57 52 - const: ref 58 53 59 54 resets: 55 + maxItems: 1 56 + 57 + access-controllers: 58 + maxItems: 1 59 + 60 + power-domains: 60 61 maxItems: 1 61 62 62 63 ports:
+35 -14
Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml
··· 23 23 - const: img,img-rogue 24 24 - items: 25 25 - enum: 26 + - thead,th1520-gpu 27 + - const: img,img-bxm-4-64 28 + - const: img,img-rogue 29 + - items: 30 + - enum: 26 31 - ti,j721s2-gpu 27 32 - const: img,img-bxs-4-64 28 33 - const: img,img-rogue ··· 82 77 additionalProperties: false 83 78 84 79 allOf: 85 - # Constraints added alongside the new compatible strings that would otherwise 86 - # create an ABI break. 87 - - if: 88 - properties: 89 - compatible: 90 - contains: 91 - const: img,img-rogue 92 - then: 93 - required: 94 - - power-domains 95 - - power-domain-names 96 - 97 80 - if: 98 81 properties: 99 82 compatible: ··· 90 97 then: 91 98 properties: 92 99 power-domains: 93 - maxItems: 1 100 + items: 101 + - description: Power domain A 94 102 power-domain-names: 95 103 maxItems: 1 104 + required: 105 + - power-domains 106 + - power-domain-names 107 + 108 + - if: 109 + properties: 110 + compatible: 111 + contains: 112 + const: thead,th1520-gpu 113 + then: 114 + properties: 115 + clocks: 116 + minItems: 3 117 + clock-names: 118 + minItems: 3 119 + power-domains: 120 + items: 121 + - description: The single, unified power domain for the GPU on the 122 + TH1520 SoC, integrating all internal IP power domains. 123 + power-domain-names: false 124 + required: 125 + - power-domains 96 126 97 127 - if: 98 128 properties: ··· 125 109 then: 126 110 properties: 127 111 power-domains: 128 - minItems: 2 112 + items: 113 + - description: Power domain A 114 + - description: Power domain B 129 115 power-domain-names: 130 116 minItems: 2 117 + required: 118 + - power-domains 119 + - power-domain-names 131 120 132 121 - if: 133 122 properties:
+9 -1
MAINTAINERS
··· 7505 7505 S: Supported 7506 7506 T: git https://gitlab.freedesktop.org/drm/misc/kernel.git 7507 7507 F: Documentation/accel/rocket/ 7508 - F: Documentation/devicetree/bindings/npu/rockchip,rknn-core.yaml 7508 + F: Documentation/devicetree/bindings/npu/rockchip,rk3588-rknn-core.yaml 7509 7509 F: drivers/accel/rocket/ 7510 7510 F: include/uapi/drm/rocket_accel.h 7511 7511 ··· 7998 7998 S: Maintained 7999 7999 F: Documentation/devicetree/bindings/display/panel/synaptics,r63353.yaml 8000 8000 F: drivers/gpu/drm/panel/panel-synaptics-r63353.c 8001 + 8002 + DRM DRIVER FOR SYNOPSYS DESIGNWARE DISPLAYPORT BRIDGE 8003 + M: Andy Yan <andy.yan@rock-chips.com> 8004 + S: Maintained 8005 + T: git https://gitlab.freedesktop.org/drm/misc/kernel.git 8006 + F: Documentation/devicetree/bindings/display/rockchip/rockchip,dw-dp.yaml 8007 + F: drivers/gpu/drm/bridge/synopsys/dw-dp.c 8008 + F: include/drm/bridge/dw_dp.h 8001 8009 8002 8010 DRM DRIVER FOR TI DLPC3433 MIPI DSI TO DMD BRIDGE 8003 8011 M: Jagan Teki <jagan@amarulasolutions.com>
+3 -3
drivers/accel/amdxdna/aie2_ctx.c
··· 199 199 { 200 200 struct amdxdna_sched_job *job = handle; 201 201 struct amdxdna_gem_obj *cmd_abo; 202 - u32 ret = 0; 202 + int ret = 0; 203 203 u32 status; 204 204 205 205 cmd_abo = job->cmd_bo; ··· 229 229 aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size) 230 230 { 231 231 struct amdxdna_sched_job *job = handle; 232 - u32 ret = 0; 232 + int ret = 0; 233 233 u32 status; 234 234 235 235 if (unlikely(!data)) ··· 257 257 u32 fail_cmd_status; 258 258 u32 fail_cmd_idx; 259 259 u32 cmd_status; 260 - u32 ret = 0; 260 + int ret = 0; 261 261 262 262 cmd_abo = job->cmd_bo; 263 263 if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
+1 -1
drivers/accel/ivpu/ivpu_fw.h
··· 45 45 int ivpu_fw_init(struct ivpu_device *vdev); 46 46 void ivpu_fw_fini(struct ivpu_device *vdev); 47 47 void ivpu_fw_load(struct ivpu_device *vdev); 48 - void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *bp); 48 + void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params); 49 49 50 50 static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev) 51 51 {
+1 -2
drivers/accel/ivpu/ivpu_hw_btrs.c
··· 33 33 34 34 #define PLL_CDYN_DEFAULT 0x80 35 35 #define PLL_EPP_DEFAULT 0x80 36 - #define PLL_CONFIG_DEFAULT 0x0 37 36 #define PLL_REF_CLK_FREQ 50000000ull 38 37 #define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ) 39 38 ··· 302 303 wp->epp = 0; 303 304 } else { 304 305 wp->target = hw->pll.pn_ratio; 305 - wp->cfg = enable ? PLL_CONFIG_DEFAULT : 0; 306 + wp->cfg = 0; 306 307 wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0; 307 308 wp->epp = enable ? PLL_EPP_DEFAULT : 0; 308 309 }
+1 -1
drivers/accel/ivpu/ivpu_hw_btrs.h
··· 36 36 bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq); 37 37 bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq); 38 38 int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable); 39 - void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent); 39 + void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent); 40 40 u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev); 41 41 u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev); 42 42 u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
+8 -8
drivers/accel/rocket/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 3 3 config DRM_ACCEL_ROCKET 4 - tristate "Rocket (support for Rockchip NPUs)" 5 - depends on DRM 6 - depends on (ARCH_ROCKCHIP && ARM64) || COMPILE_TEST 7 - depends on ROCKCHIP_IOMMU || COMPILE_TEST 8 - depends on MMU 9 - select DRM_SCHED 10 - select DRM_GEM_SHMEM_HELPER 11 - help 4 + tristate "Rocket (support for Rockchip NPUs)" 5 + depends on DRM_ACCEL 6 + depends on (ARCH_ROCKCHIP && ARM64) || COMPILE_TEST 7 + depends on ROCKCHIP_IOMMU || COMPILE_TEST 8 + depends on MMU 9 + select DRM_SCHED 10 + select DRM_GEM_SHMEM_HELPER 11 + help 12 12 Choose this option if you have a Rockchip SoC that contains a 13 13 compatible Neural Processing Unit (NPU), such as the RK3588. Called by 14 14 Rockchip either RKNN or RKNPU, it accelerates inference of neural
+1 -1
drivers/accel/rocket/rocket_core.c
··· 74 74 75 75 pm_runtime_enable(dev); 76 76 77 - err = pm_runtime_get_sync(dev); 77 + err = pm_runtime_resume_and_get(dev); 78 78 if (err) { 79 79 rocket_job_fini(core); 80 80 return err;
+5 -4
drivers/accel/rocket/rocket_job.c
··· 222 222 err_unlock: 223 223 drm_gem_unlock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx); 224 224 err: 225 - kfree(bos); 225 + kvfree(bos); 226 226 227 227 return ret; 228 228 } ··· 422 422 u32 raw_status = rocket_pc_readl(core, INTERRUPT_RAW_STATUS); 423 423 424 424 WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR); 425 - WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR); 425 + WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR); 426 426 427 427 if (!(raw_status & PC_INTERRUPT_RAW_STATUS_DPU_0 || 428 428 raw_status & PC_INTERRUPT_RAW_STATUS_DPU_1)) ··· 496 496 int rocket_job_open(struct rocket_file_priv *rocket_priv) 497 497 { 498 498 struct rocket_device *rdev = rocket_priv->rdev; 499 - struct drm_gpu_scheduler **scheds = kmalloc_array(rdev->num_cores, sizeof(scheds), 499 + struct drm_gpu_scheduler **scheds = kmalloc_array(rdev->num_cores, 500 + sizeof(*scheds), 500 501 GFP_KERNEL); 501 502 unsigned int core; 502 503 int ret; ··· 631 630 rocket_ioctl_submit_job(dev, file, &jobs[i]); 632 631 633 632 exit: 634 - kfree(jobs); 633 + kvfree(jobs); 635 634 636 635 return ret; 637 636 }
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 1474 1474 owner = (void *)(unsigned long)atomic_inc_return(&counter); 1475 1475 1476 1476 r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner, 1477 - 64, 0, &job); 1477 + 64, 0, &job, 1478 + AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER); 1478 1479 if (r) 1479 1480 goto err; 1480 1481
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 690 690 r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr, 691 691 AMDGPU_FENCE_OWNER_UNDEFINED, 692 692 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, 693 - &job); 693 + &job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB); 694 694 if (r) 695 695 goto error_alloc; 696 696
+3 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 209 209 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, 210 210 struct drm_sched_entity *entity, void *owner, 211 211 size_t size, enum amdgpu_ib_pool_type pool_type, 212 - struct amdgpu_job **job) 212 + struct amdgpu_job **job, u64 k_job_id) 213 213 { 214 214 int r; 215 215 216 - r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0); 216 + r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 217 + k_job_id); 217 218 if (r) 218 219 return r; 219 220
+18 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
··· 44 44 struct amdgpu_fence; 45 45 enum amdgpu_ib_pool_type; 46 46 47 + /* Internal kernel job ids. (decreasing values, starting from U64_MAX). */ 48 + #define AMDGPU_KERNEL_JOB_ID_VM_UPDATE (18446744073709551615ULL) 49 + #define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES (18446744073709551614ULL) 50 + #define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE (18446744073709551613ULL) 51 + #define AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR (18446744073709551612ULL) 52 + #define AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER (18446744073709551611ULL) 53 + #define AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA (18446744073709551610ULL) 54 + #define AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER (18446744073709551609ULL) 55 + #define AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE (18446744073709551608ULL) 56 + #define AMDGPU_KERNEL_JOB_ID_MOVE_BLIT (18446744073709551607ULL) 57 + #define AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER (18446744073709551606ULL) 58 + #define AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER (18446744073709551605ULL) 59 + #define AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB (18446744073709551604ULL) 60 + #define AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP (18446744073709551603ULL) 61 + #define AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST (18446744073709551602ULL) 62 + 47 63 struct amdgpu_job { 48 64 struct drm_sched_job base; 49 65 struct amdgpu_vm *vm; ··· 112 96 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, 113 97 struct drm_sched_entity *entity, void *owner, 114 98 size_t size, enum amdgpu_ib_pool_type pool_type, 115 - struct amdgpu_job **job); 99 + struct amdgpu_job **job, 100 + u64 k_job_id); 116 101 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, 117 102 struct amdgpu_bo *gws, struct amdgpu_bo *oa); 118 103 void amdgpu_job_free_resources(struct amdgpu_job *job);
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
··· 196 196 int i, r; 197 197 198 198 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 199 - AMDGPU_IB_POOL_DIRECT, &job); 199 + AMDGPU_IB_POOL_DIRECT, &job, 200 + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); 200 201 if (r) 201 202 return r; 202 203
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 1313 1313 if (r) 1314 1314 goto out; 1315 1315 1316 - r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true); 1316 + r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true, 1317 + AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE); 1317 1318 if (WARN_ON(r)) 1318 1319 goto out; 1319 1320
+17 -11
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 226 226 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, 227 227 AMDGPU_FENCE_OWNER_UNDEFINED, 228 228 num_dw * 4 + num_bytes, 229 - AMDGPU_IB_POOL_DELAYED, &job); 229 + AMDGPU_IB_POOL_DELAYED, &job, 230 + AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER); 230 231 if (r) 231 232 return r; 232 233 ··· 407 406 struct dma_fence *wipe_fence = NULL; 408 407 409 408 r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence, 410 - false); 409 + false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT); 411 410 if (r) { 412 411 goto error; 413 412 } else if (wipe_fence) { ··· 1511 1510 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, 1512 1511 AMDGPU_FENCE_OWNER_UNDEFINED, 1513 1512 num_dw * 4, AMDGPU_IB_POOL_DELAYED, 1514 - &job); 1513 + &job, 1514 + AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA); 1515 1515 if (r) 1516 1516 goto out; 1517 1517 ··· 2169 2167 struct dma_resv *resv, 2170 2168 bool vm_needs_flush, 2171 2169 struct amdgpu_job **job, 2172 - bool delayed) 2170 + bool delayed, u64 k_job_id) 2173 2171 { 2174 2172 enum amdgpu_ib_pool_type pool = direct_submit ? 2175 2173 AMDGPU_IB_POOL_DIRECT : ··· 2179 2177 &adev->mman.high_pr; 2180 2178 r = amdgpu_job_alloc_with_ib(adev, entity, 2181 2179 AMDGPU_FENCE_OWNER_UNDEFINED, 2182 - num_dw * 4, pool, job); 2180 + num_dw * 4, pool, job, k_job_id); 2183 2181 if (r) 2184 2182 return r; 2185 2183 ··· 2219 2217 num_loops = DIV_ROUND_UP(byte_count, max_bytes); 2220 2218 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); 2221 2219 r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw, 2222 - resv, vm_needs_flush, &job, false); 2220 + resv, vm_needs_flush, &job, false, 2221 + AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER); 2223 2222 if (r) 2224 2223 return r; 2225 2224 ··· 2255 2252 uint64_t dst_addr, uint32_t byte_count, 2256 2253 struct dma_resv *resv, 2257 2254 struct dma_fence **fence, 2258 - bool vm_needs_flush, bool delayed) 2255 + bool vm_needs_flush, bool delayed, 2256 + u64 k_job_id) 2259 2257 { 2260 2258 struct amdgpu_device *adev = ring->adev; 2261 2259 unsigned int num_loops, num_dw; ··· 2269 2265 num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes); 2270 2266 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8); 2271 2267 r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush, 2272 - &job, delayed); 2268 + &job, delayed, k_job_id); 2273 2269 if (r) 2274 2270 return r; 2275 2271 ··· 2339 2335 goto err; 2340 2336 2341 2337 r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv, 2342 - &next, true, true); 2338 + &next, true, true, 2339 + AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER); 2343 2340 if (r) 2344 2341 goto err; 2345 2342 ··· 2359 2354 uint32_t src_data, 2360 2355 struct dma_resv *resv, 2361 2356 struct dma_fence **f, 2362 - bool delayed) 2357 + bool delayed, 2358 + u64 k_job_id) 2363 2359 { 2364 2360 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 2365 2361 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; ··· 2390 2384 goto error; 2391 2385 2392 2386 r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv, 2393 - &next, true, delayed); 2387 + &next, true, delayed, k_job_id); 2394 2388 if (r) 2395 2389 goto error; 2396 2390
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 182 182 uint32_t src_data, 183 183 struct dma_resv *resv, 184 184 struct dma_fence **fence, 185 - bool delayed); 185 + bool delayed, 186 + u64 k_job_id); 186 187 187 188 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); 188 189 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 1136 1136 r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity, 1137 1137 AMDGPU_FENCE_OWNER_UNDEFINED, 1138 1138 64, direct ? AMDGPU_IB_POOL_DIRECT : 1139 - AMDGPU_IB_POOL_DELAYED, &job); 1139 + AMDGPU_IB_POOL_DELAYED, &job, 1140 + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); 1140 1141 if (r) 1141 1142 return r; 1142 1143
+3 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 449 449 r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, 450 450 AMDGPU_FENCE_OWNER_UNDEFINED, 451 451 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 452 - &job); 452 + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); 453 453 if (r) 454 454 return r; 455 455 ··· 540 540 AMDGPU_FENCE_OWNER_UNDEFINED, 541 541 ib_size_dw * 4, 542 542 direct ? AMDGPU_IB_POOL_DIRECT : 543 - AMDGPU_IB_POOL_DELAYED, &job); 543 + AMDGPU_IB_POOL_DELAYED, &job, 544 + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); 544 545 if (r) 545 546 return r; 546 547
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 628 628 629 629 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 630 630 64, AMDGPU_IB_POOL_DIRECT, 631 - &job); 631 + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); 632 632 if (r) 633 633 goto err; 634 634 ··· 808 808 809 809 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 810 810 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 811 - &job); 811 + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); 812 812 if (r) 813 813 goto err; 814 814 ··· 938 938 939 939 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 940 940 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 941 - &job); 941 + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); 942 942 if (r) 943 943 return r; 944 944 ··· 1005 1005 1006 1006 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 1007 1007 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 1008 - &job); 1008 + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); 1009 1009 if (r) 1010 1010 return r; 1011 1011
+4 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 977 977 params.vm = vm; 978 978 params.immediate = immediate; 979 979 980 - r = vm->update_funcs->prepare(&params, NULL); 980 + r = vm->update_funcs->prepare(&params, NULL, 981 + AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES); 981 982 if (r) 982 983 goto error; 983 984 ··· 1147 1146 dma_fence_put(tmp); 1148 1147 } 1149 1148 1150 - r = vm->update_funcs->prepare(&params, sync); 1149 + r = vm->update_funcs->prepare(&params, sync, 1150 + AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE); 1151 1151 if (r) 1152 1152 goto error_free; 1153 1153
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 308 308 struct amdgpu_vm_update_funcs { 309 309 int (*map_table)(struct amdgpu_bo_vm *bo); 310 310 int (*prepare)(struct amdgpu_vm_update_params *p, 311 - struct amdgpu_sync *sync); 311 + struct amdgpu_sync *sync, u64 k_job_id); 312 312 int (*update)(struct amdgpu_vm_update_params *p, 313 313 struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr, 314 314 unsigned count, uint32_t incr, uint64_t flags);
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
··· 40 40 * 41 41 * @p: see amdgpu_vm_update_params definition 42 42 * @sync: sync obj with fences to wait on 43 + * @k_job_id: the id for tracing/debug purposes 43 44 * 44 45 * Returns: 45 46 * Negativ errno, 0 for success. 46 47 */ 47 48 static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, 48 - struct amdgpu_sync *sync) 49 + struct amdgpu_sync *sync, 50 + u64 k_job_id) 49 51 { 50 52 if (!sync) 51 53 return 0;
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
··· 26 26 #include "amdgpu.h" 27 27 #include "amdgpu_trace.h" 28 28 #include "amdgpu_vm.h" 29 + #include "amdgpu_job.h" 29 30 30 31 /* 31 32 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt ··· 396 395 params.vm = vm; 397 396 params.immediate = immediate; 398 397 399 - r = vm->update_funcs->prepare(&params, NULL); 398 + r = vm->update_funcs->prepare(&params, NULL, 399 + AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR); 400 400 if (r) 401 401 goto exit; 402 402
+7 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
··· 40 40 41 41 /* Allocate a new job for @count PTE updates */ 42 42 static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, 43 - unsigned int count) 43 + unsigned int count, u64 k_job_id) 44 44 { 45 45 enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE 46 46 : AMDGPU_IB_POOL_DELAYED; ··· 56 56 ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); 57 57 58 58 r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM, 59 - ndw * 4, pool, &p->job); 59 + ndw * 4, pool, &p->job, k_job_id); 60 60 if (r) 61 61 return r; 62 62 ··· 69 69 * 70 70 * @p: see amdgpu_vm_update_params definition 71 71 * @sync: amdgpu_sync object with fences to wait for 72 + * @k_job_id: identifier of the job, for tracing purpose 72 73 * 73 74 * Returns: 74 75 * Negativ errno, 0 for success. 75 76 */ 76 77 static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, 77 - struct amdgpu_sync *sync) 78 + struct amdgpu_sync *sync, u64 k_job_id) 78 79 { 79 80 int r; 80 81 81 - r = amdgpu_vm_sdma_alloc_job(p, 0); 82 + r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id); 82 83 if (r) 83 84 return r; 84 85 ··· 250 249 if (r) 251 250 return r; 252 251 253 - r = amdgpu_vm_sdma_alloc_job(p, count); 252 + r = amdgpu_vm_sdma_alloc_job(p, count, 253 + AMDGPU_KERNEL_JOB_ID_VM_UPDATE); 254 254 if (r) 255 255 return r; 256 256 }
+4 -2
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 217 217 int i, r; 218 218 219 219 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 220 - AMDGPU_IB_POOL_DIRECT, &job); 220 + AMDGPU_IB_POOL_DIRECT, &job, 221 + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); 221 222 if (r) 222 223 return r; 223 224 ··· 282 281 int i, r; 283 282 284 283 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 285 - AMDGPU_IB_POOL_DIRECT, &job); 284 + AMDGPU_IB_POOL_DIRECT, &job, 285 + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); 286 286 if (r) 287 287 return r; 288 288
+4 -2
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
··· 225 225 int i, r; 226 226 227 227 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 228 - AMDGPU_IB_POOL_DIRECT, &job); 228 + AMDGPU_IB_POOL_DIRECT, &job, 229 + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); 229 230 if (r) 230 231 return r; 231 232 ··· 289 288 int i, r; 290 289 291 290 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 292 - AMDGPU_IB_POOL_DIRECT, &job); 291 + AMDGPU_IB_POOL_DIRECT, &job, 292 + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); 293 293 if (r) 294 294 return r; 295 295
+2 -1
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 68 68 AMDGPU_FENCE_OWNER_UNDEFINED, 69 69 num_dw * 4 + num_bytes, 70 70 AMDGPU_IB_POOL_DELAYED, 71 - &job); 71 + &job, 72 + AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP); 72 73 if (r) 73 74 return r; 74 75
+26 -5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 3656 3656 3657 3657 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 3658 3658 { 3659 + const struct drm_panel_backlight_quirk *panel_backlight_quirk; 3659 3660 struct amdgpu_dm_backlight_caps *caps; 3660 3661 struct drm_connector *conn_base; 3661 3662 struct amdgpu_device *adev; 3662 3663 struct drm_luminance_range_info *luminance_range; 3663 - int min_input_signal_override; 3664 + struct drm_device *drm; 3664 3665 3665 3666 if (aconnector->bl_idx == -1 || 3666 3667 aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) 3667 3668 return; 3668 3669 3669 3670 conn_base = &aconnector->base; 3670 - adev = drm_to_adev(conn_base->dev); 3671 + drm = conn_base->dev; 3672 + adev = drm_to_adev(drm); 3671 3673 3672 3674 caps = &adev->dm.backlight_caps[aconnector->bl_idx]; 3673 3675 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; ··· 3702 3700 else 3703 3701 caps->aux_min_input_signal = 1; 3704 3702 3705 - min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid); 3706 - if (min_input_signal_override >= 0) 3707 - caps->min_input_signal = min_input_signal_override; 3703 + panel_backlight_quirk = 3704 + drm_get_panel_backlight_quirk(aconnector->drm_edid); 3705 + if (!IS_ERR_OR_NULL(panel_backlight_quirk)) { 3706 + if (panel_backlight_quirk->min_brightness) { 3707 + caps->min_input_signal = 3708 + panel_backlight_quirk->min_brightness - 1; 3709 + drm_info(drm, 3710 + "Applying panel backlight quirk, min_brightness: %d\n", 3711 + caps->min_input_signal); 3712 + } 3713 + if (panel_backlight_quirk->brightness_mask) { 3714 + drm_info(drm, 3715 + "Applying panel backlight quirk, brightness_mask: 0x%X\n", 3716 + panel_backlight_quirk->brightness_mask); 3717 + caps->brightness_mask = 3718 + panel_backlight_quirk->brightness_mask; 3719 + } 3720 + } 3708 3721 } 3709 3722 3710 3723 DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T)) ··· 4930 4913 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); 4931 4914 brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]); 4932 4915 link = (struct dc_link *)dm->backlight_link[bl_idx]; 4916 + 4917 + /* Apply brightness quirk */ 4918 + if (caps->brightness_mask) 4919 + brightness |= caps->brightness_mask; 4933 4920 4934 4921 /* Change brightness based on AUX property */ 4935 4922 mutex_lock(&dm->dc_lock);
+5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 216 216 */ 217 217 bool aux_support; 218 218 /** 219 + * @brightness_mask: After deriving brightness, OR it with this mask. 220 + * Workaround for panels with issues with certain brightness values. 221 + */ 222 + u32 brightness_mask; 223 + /** 219 224 * @ac_level: the default brightness if booted on AC 220 225 */ 221 226 u8 ac_level;
+1 -2
drivers/gpu/drm/bridge/Kconfig
··· 120 120 select DRM_DISPLAY_DP_AUX_BUS 121 121 select DRM_KMS_HELPER 122 122 select EXTCON 123 - select CRYPTO 124 - select CRYPTO_HASH 123 + select CRYPTO_LIB_SHA1 125 124 select REGMAP_I2C 126 125 help 127 126 ITE IT6505 DisplayPort bridge chip driver.
+4 -2
drivers/gpu/drm/bridge/adv7511/adv7511.h
··· 195 195 #define ADV7511_I2S_IEC958_DIRECT 3 196 196 197 197 #define ADV7511_PACKET(p, x) ((p) * 0x20 + (x)) 198 - #define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x) 198 + #define ADV7511_PACKET_SPD(x) ADV7511_PACKET(0, x) 199 199 #define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x) 200 200 #define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x) 201 201 #define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x) 202 202 #define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x) 203 203 #define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x) 204 - #define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x) 204 + #define ADV7511_PACKET_SPARE1(x) ADV7511_PACKET(6, x) 205 + #define ADV7511_PACKET_SPARE2(x) ADV7511_PACKET(7, x) 205 206 206 207 #define ADV7511_REG_CEC_TX_FRAME_HDR 0x00 207 208 #define ADV7511_REG_CEC_TX_FRAME_DATA0 0x01 ··· 349 348 struct i2c_client *i2c_cec; 350 349 351 350 struct regmap *regmap; 351 + struct regmap *regmap_packet; 352 352 struct regmap *regmap_cec; 353 353 enum drm_connector_status status; 354 354 bool powered;
+30
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
··· 132 132 .volatile_reg = adv7511_register_volatile, 133 133 }; 134 134 135 + static const struct regmap_config adv7511_packet_config = { 136 + .reg_bits = 8, 137 + .val_bits = 8, 138 + 139 + .max_register = 0xff, 140 + }; 141 + 135 142 /* ----------------------------------------------------------------------------- 136 143 * Hardware configuration 137 144 */ ··· 896 889 case HDMI_INFOFRAME_TYPE_AVI: 897 890 adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME); 898 891 break; 892 + case HDMI_INFOFRAME_TYPE_SPD: 893 + adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPD); 894 + break; 895 + case HDMI_INFOFRAME_TYPE_VENDOR: 896 + adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPARE1); 897 + break; 899 898 default: 900 899 drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type); 901 900 break; ··· 925 912 buffer + 1, len - 1); 926 913 927 914 adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME); 915 + break; 916 + case HDMI_INFOFRAME_TYPE_SPD: 917 + regmap_bulk_write(adv7511->regmap_packet, ADV7511_PACKET_SPD(0), 918 + buffer, len); 919 + adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_SPD); 920 + break; 921 + case HDMI_INFOFRAME_TYPE_VENDOR: 922 + regmap_bulk_write(adv7511->regmap_packet, ADV7511_PACKET_SPARE1(0), 923 + buffer, len); 924 + adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_SPARE1); 928 925 break; 929 926 default: 930 927 drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type); ··· 1263 1240 if (IS_ERR(adv7511->i2c_packet)) { 1264 1241 ret = PTR_ERR(adv7511->i2c_packet); 1265 1242 goto err_i2c_unregister_edid; 1243 + } 1244 + 1245 + adv7511->regmap_packet = devm_regmap_init_i2c(adv7511->i2c_packet, 1246 + &adv7511_packet_config); 1247 + if (IS_ERR(adv7511->regmap_packet)) { 1248 + ret = PTR_ERR(adv7511->regmap_packet); 1249 + goto err_i2c_unregister_packet; 1266 1250 } 1267 1251 1268 1252 regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
+5 -7
drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
··· 492 492 static enum drm_mode_status 493 493 imx93_dsi_validate_mode(struct imx93_dsi *dsi, const struct drm_display_mode *mode) 494 494 { 495 - struct drm_bridge *bridge = dw_mipi_dsi_get_bridge(dsi->dmd); 495 + struct drm_bridge *dmd_bridge = dw_mipi_dsi_get_bridge(dsi->dmd); 496 + struct drm_bridge *last_bridge __free(drm_bridge_put) = 497 + drm_bridge_chain_get_last_bridge(dmd_bridge->encoder); 496 498 497 - /* Get the last bridge */ 498 - while (drm_bridge_get_next_bridge(bridge)) 499 - bridge = drm_bridge_get_next_bridge(bridge); 500 - 501 - if ((bridge->ops & DRM_BRIDGE_OP_DETECT) && 502 - (bridge->ops & DRM_BRIDGE_OP_EDID)) { 499 + if ((last_bridge->ops & DRM_BRIDGE_OP_DETECT) && 500 + (last_bridge->ops & DRM_BRIDGE_OP_EDID)) { 503 501 unsigned long pixel_clock_rate = mode->clock * 1000; 504 502 unsigned long rounded_rate; 505 503
+2 -31
drivers/gpu/drm/bridge/ite-it6505.c
··· 21 21 #include <linux/wait.h> 22 22 #include <linux/bitfield.h> 23 23 24 - #include <crypto/hash.h> 24 + #include <crypto/sha1.h> 25 25 26 26 #include <drm/display/drm_dp_helper.h> 27 27 #include <drm/display/drm_hdcp_helper.h> ··· 2107 2107 it6505->hdcp_status = HDCP_AUTH_GOING; 2108 2108 } 2109 2109 2110 - static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input, 2111 - unsigned int size, u8 *output_av) 2112 - { 2113 - struct shash_desc *desc; 2114 - struct crypto_shash *tfm; 2115 - int err; 2116 - struct device *dev = it6505->dev; 2117 - 2118 - tfm = crypto_alloc_shash("sha1", 0, 0); 2119 - if (IS_ERR(tfm)) { 2120 - dev_err(dev, "crypto_alloc_shash sha1 failed"); 2121 - return PTR_ERR(tfm); 2122 - } 2123 - desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); 2124 - if (!desc) { 2125 - crypto_free_shash(tfm); 2126 - return -ENOMEM; 2127 - } 2128 - 2129 - desc->tfm = tfm; 2130 - err = crypto_shash_digest(desc, sha1_input, size, output_av); 2131 - if (err) 2132 - dev_err(dev, "crypto_shash_digest sha1 failed"); 2133 - 2134 - crypto_free_shash(tfm); 2135 - kfree(desc); 2136 - return err; 2137 - } 2138 - 2139 2110 static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input) 2140 2111 { 2141 2112 struct device *dev = it6505->dev; ··· 2176 2205 return false; 2177 2206 } 2178 2207 2179 - it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av); 2208 + sha1(it6505->sha1_input, i, (u8 *)av); 2180 2209 /*1B-05 V' must retry 3 times */ 2181 2210 for (retry = 0; retry < 3; retry++) { 2182 2211 err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
+7
drivers/gpu/drm/bridge/synopsys/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 + config DRM_DW_DP 3 + tristate 4 + select DRM_DISPLAY_HELPER 5 + select DRM_DISPLAY_DP_HELPER 6 + select DRM_KMS_HELPER 7 + select REGMAP_MMIO 8 + 2 9 config DRM_DW_HDMI 3 10 tristate 4 11 select DRM_DISPLAY_HDMI_HELPER
+1
drivers/gpu/drm/bridge/synopsys/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 + obj-$(CONFIG_DRM_DW_DP) += dw-dp.o 2 3 obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o 3 4 obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o 4 5 obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o
+2095
drivers/gpu/drm/bridge/synopsys/dw-dp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Synopsys DesignWare Cores DisplayPort Transmitter Controller 4 + * 5 + * Copyright (c) 2025 Rockchip Electronics Co., Ltd. 6 + * 7 + * Author: Andy Yan <andy.yan@rock-chips.com> 8 + */ 9 + #include <linux/bitfield.h> 10 + #include <linux/clk.h> 11 + #include <linux/iopoll.h> 12 + #include <linux/irq.h> 13 + #include <linux/media-bus-format.h> 14 + #include <linux/of_device.h> 15 + #include <linux/platform_device.h> 16 + #include <linux/regmap.h> 17 + #include <linux/reset.h> 18 + #include <linux/phy/phy.h> 19 + #include <linux/unaligned.h> 20 + 21 + #include <drm/bridge/dw_dp.h> 22 + #include <drm/drm_atomic_helper.h> 23 + #include <drm/drm_bridge.h> 24 + #include <drm/drm_bridge_connector.h> 25 + #include <drm/display/drm_dp_helper.h> 26 + #include <drm/drm_edid.h> 27 + #include <drm/drm_of.h> 28 + #include <drm/drm_print.h> 29 + #include <drm/drm_probe_helper.h> 30 + #include <drm/drm_simple_kms_helper.h> 31 + 32 + #define DW_DP_VERSION_NUMBER 0x0000 33 + #define DW_DP_VERSION_TYPE 0x0004 34 + #define DW_DP_ID 0x0008 35 + 36 + #define DW_DP_CONFIG_REG1 0x0100 37 + #define DW_DP_CONFIG_REG2 0x0104 38 + #define DW_DP_CONFIG_REG3 0x0108 39 + 40 + #define DW_DP_CCTL 0x0200 41 + #define FORCE_HPD BIT(4) 42 + #define DEFAULT_FAST_LINK_TRAIN_EN BIT(2) 43 + #define ENHANCE_FRAMING_EN BIT(1) 44 + #define SCRAMBLE_DIS BIT(0) 45 + #define DW_DP_SOFT_RESET_CTRL 0x0204 46 + #define VIDEO_RESET BIT(5) 47 + #define AUX_RESET BIT(4) 48 + #define AUDIO_SAMPLER_RESET BIT(3) 49 + #define HDCP_MODULE_RESET BIT(2) 50 + #define PHY_SOFT_RESET BIT(1) 51 + #define CONTROLLER_RESET BIT(0) 52 + 53 + #define DW_DP_VSAMPLE_CTRL 0x0300 54 + #define PIXEL_MODE_SELECT GENMASK(22, 21) 55 + #define VIDEO_MAPPING GENMASK(20, 16) 56 + #define VIDEO_STREAM_ENABLE BIT(5) 57 + 58 + #define DW_DP_VSAMPLE_STUFF_CTRL1 0x0304 59 + 60 + #define DW_DP_VSAMPLE_STUFF_CTRL2 0x0308 61 + 62 + #define DW_DP_VINPUT_POLARITY_CTRL 0x030c 63 + #define DE_IN_POLARITY BIT(2) 64 + #define HSYNC_IN_POLARITY BIT(1) 65 + #define VSYNC_IN_POLARITY BIT(0) 66 + 67 + #define DW_DP_VIDEO_CONFIG1 0x0310 68 + #define HACTIVE GENMASK(31, 16) 69 + #define HBLANK GENMASK(15, 2) 70 + #define I_P BIT(1) 71 + #define R_V_BLANK_IN_OSC BIT(0) 72 + 73 + #define DW_DP_VIDEO_CONFIG2 0x0314 74 + #define VBLANK GENMASK(31, 16) 75 + #define VACTIVE GENMASK(15, 0) 76 + 77 + #define DW_DP_VIDEO_CONFIG3 0x0318 78 + #define H_SYNC_WIDTH GENMASK(31, 16) 79 + #define H_FRONT_PORCH GENMASK(15, 0) 80 + 81 + #define DW_DP_VIDEO_CONFIG4 0x031c 82 + #define V_SYNC_WIDTH GENMASK(31, 16) 83 + #define V_FRONT_PORCH GENMASK(15, 0) 84 + 85 + #define DW_DP_VIDEO_CONFIG5 0x0320 86 + #define INIT_THRESHOLD_HI GENMASK(22, 21) 87 + #define AVERAGE_BYTES_PER_TU_FRAC GENMASK(19, 16) 88 + #define INIT_THRESHOLD GENMASK(13, 7) 89 + #define AVERAGE_BYTES_PER_TU GENMASK(6, 0) 90 + 91 + #define DW_DP_VIDEO_MSA1 0x0324 92 + #define VSTART GENMASK(31, 16) 93 + #define HSTART GENMASK(15, 0) 94 + 95 + #define DW_DP_VIDEO_MSA2 0x0328 96 + #define MISC0 GENMASK(31, 24) 97 + 98 + #define DW_DP_VIDEO_MSA3 0x032c 99 + #define MISC1 GENMASK(31, 24) 100 + 101 + #define DW_DP_VIDEO_HBLANK_INTERVAL 0x0330 102 + #define HBLANK_INTERVAL_EN BIT(16) 103 + #define HBLANK_INTERVAL GENMASK(15, 0) 104 + 105 + #define DW_DP_AUD_CONFIG1 0x0400 106 + #define AUDIO_TIMESTAMP_VERSION_NUM GENMASK(29, 24) 107 + #define AUDIO_PACKET_ID GENMASK(23, 16) 108 + #define AUDIO_MUTE BIT(15) 109 + #define NUM_CHANNELS GENMASK(14, 12) 110 + #define HBR_MODE_ENABLE BIT(10) 111 + #define AUDIO_DATA_WIDTH GENMASK(9, 5) 112 + #define AUDIO_DATA_IN_EN GENMASK(4, 1) 113 + #define AUDIO_INF_SELECT BIT(0) 114 + 115 + #define DW_DP_SDP_VERTICAL_CTRL 0x0500 116 + #define EN_VERTICAL_SDP BIT(2) 117 + #define EN_AUDIO_STREAM_SDP BIT(1) 118 + #define EN_AUDIO_TIMESTAMP_SDP BIT(0) 119 + #define DW_DP_SDP_HORIZONTAL_CTRL 0x0504 120 + #define EN_HORIZONTAL_SDP BIT(2) 121 + #define DW_DP_SDP_STATUS_REGISTER 0x0508 122 + #define DW_DP_SDP_MANUAL_CTRL 0x050c 123 + #define DW_DP_SDP_STATUS_EN 0x0510 124 + 125 + #define DW_DP_SDP_REGISTER_BANK 0x0600 126 + #define SDP_REGS GENMASK(31, 0) 127 + 128 + #define DW_DP_PHYIF_CTRL 0x0a00 129 + #define PHY_WIDTH BIT(25) 130 + #define PHY_POWERDOWN GENMASK(20, 17) 131 + #define PHY_BUSY GENMASK(15, 12) 132 + #define SSC_DIS BIT(16) 133 + #define XMIT_ENABLE GENMASK(11, 8) 134 + #define PHY_LANES GENMASK(7, 6) 135 + #define PHY_RATE GENMASK(5, 4) 136 + #define TPS_SEL GENMASK(3, 0) 137 + 138 + #define DW_DP_PHY_TX_EQ 0x0a04 139 + #define DW_DP_CUSTOMPAT0 0x0a08 140 + #define DW_DP_CUSTOMPAT1 0x0a0c 141 + #define DW_DP_CUSTOMPAT2 0x0a10 142 + #define DW_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET 0x0a14 143 + #define DW_DP_PHYIF_PWRDOWN_CTRL 0x0a18 144 + 145 + #define DW_DP_AUX_CMD 0x0b00 146 + #define AUX_CMD_TYPE GENMASK(31, 28) 147 + #define AUX_ADDR GENMASK(27, 8) 148 + #define I2C_ADDR_ONLY BIT(4) 149 + #define AUX_LEN_REQ GENMASK(3, 0) 150 + 151 + #define DW_DP_AUX_STATUS 0x0b04 152 + #define AUX_TIMEOUT BIT(17) 153 + #define AUX_BYTES_READ GENMASK(23, 19) 154 + #define AUX_STATUS GENMASK(7, 4) 155 + 156 + #define DW_DP_AUX_DATA0 0x0b08 157 + #define DW_DP_AUX_DATA1 0x0b0c 158 + #define DW_DP_AUX_DATA2 0x0b10 159 + #define DW_DP_AUX_DATA3 0x0b14 160 + 161 + #define DW_DP_GENERAL_INTERRUPT 0x0d00 162 + #define VIDEO_FIFO_OVERFLOW_STREAM0 BIT(6) 163 + #define AUDIO_FIFO_OVERFLOW_STREAM0 BIT(5) 164 + #define SDP_EVENT_STREAM0 BIT(4) 165 + #define AUX_CMD_INVALID BIT(3) 166 + #define HDCP_EVENT BIT(2) 167 + #define AUX_REPLY_EVENT BIT(1) 168 + #define HPD_EVENT BIT(0) 169 + 170 + #define DW_DP_GENERAL_INTERRUPT_ENABLE 0x0d04 171 + #define HDCP_EVENT_EN BIT(2) 172 + #define AUX_REPLY_EVENT_EN BIT(1) 173 + #define HPD_EVENT_EN BIT(0) 174 + 175 + #define DW_DP_HPD_STATUS 0x0d08 176 + #define HPD_STATE GENMASK(11, 9) 177 + #define HPD_STATUS BIT(8) 178 + #define HPD_HOT_UNPLUG BIT(2) 179 + #define HPD_HOT_PLUG BIT(1) 180 + #define HPD_IRQ BIT(0) 181 + 182 + #define DW_DP_HPD_INTERRUPT_ENABLE 0x0d0c 183 + #define HPD_UNPLUG_ERR_EN BIT(3) 184 + #define HPD_UNPLUG_EN BIT(2) 185 + #define HPD_PLUG_EN BIT(1) 186 + #define HPD_IRQ_EN BIT(0) 187 + 188 + #define DW_DP_HDCP_CFG 0x0e00 189 + #define DPCD12PLUS BIT(7) 190 + #define CP_IRQ BIT(6) 191 + #define BYPENCRYPTION BIT(5) 192 + #define HDCP_LOCK BIT(4) 193 + #define ENCRYPTIONDISABLE BIT(3) 194 + #define ENABLE_HDCP_13 BIT(2) 195 + #define ENABLE_HDCP BIT(1) 196 + 197 + #define DW_DP_HDCP_OBS 0x0e04 198 + #define HDCP22_RE_AUTHENTICATION_REQ BIT(31) 199 + #define HDCP22_AUTHENTICATION_FAILED BIT(30) 200 + #define HDCP22_AUTHENTICATION_SUCCESS BIT(29) 201 + #define HDCP22_CAPABLE_SINK BIT(28) 202 + #define HDCP22_SINK_CAP_CHECK_COMPLETE BIT(27) 203 + #define HDCP22_STATE GENMASK(26, 24) 204 + #define HDCP22_BOOTED BIT(23) 205 + #define HDCP13_BSTATUS GENMASK(22, 19) 206 + #define REPEATER BIT(18) 207 + #define HDCP_CAPABLE BIT(17) 208 + #define STATEE GENMASK(16, 14) 209 + #define STATEOEG GENMASK(13, 11) 210 + #define STATER GENMASK(10, 8) 211 + #define STATEA GENMASK(7, 4) 212 + #define SUBSTATEA GENMASK(3, 1) 213 + #define HDCPENGAGED BIT(0) 214 + 215 + #define DW_DP_HDCP_APIINTCLR 0x0e08 216 + #define DW_DP_HDCP_APIINTSTAT 0x0e0c 217 + #define DW_DP_HDCP_APIINTMSK 0x0e10 218 + #define HDCP22_GPIOINT BIT(8) 219 + #define HDCP_ENGAGED BIT(7) 220 + #define HDCP_FAILED BIT(6) 221 + #define KSVSHA1CALCDONEINT BIT(5) 222 + #define AUXRESPNACK7TIMES BIT(4) 223 + #define AUXRESPTIMEOUT BIT(3) 224 + #define AUXRESPDEFER7TIMES BIT(2) 225 + #define KSVACCESSINT BIT(0) 226 + 227 + #define DW_DP_HDCP_KSVMEMCTRL 0x0e18 228 + #define KSVSHA1STATUS BIT(4) 229 + #define KSVMEMACCESS BIT(1) 230 + #define KSVMEMREQUEST BIT(0) 231 + 232 + #define DW_DP_HDCP_REG_BKSV0 0x3600 233 + #define DW_DP_HDCP_REG_BKSV1 0x3604 234 + #define DW_DP_HDCP_REG_ANCONF 0x3608 235 + #define AN_BYPASS BIT(0) 236 + 237 + #define DW_DP_HDCP_REG_AN0 0x360c 238 + #define DW_DP_HDCP_REG_AN1 0x3610 239 + #define DW_DP_HDCP_REG_RMLCTL 0x3614 240 + #define ODPK_DECRYPT_ENABLE BIT(0) 241 + 242 + #define DW_DP_HDCP_REG_RMLSTS 0x3618 243 + #define IDPK_WR_OK_STS BIT(6) 244 + #define IDPK_DATA_INDEX GENMASK(5, 0) 245 + #define DW_DP_HDCP_REG_SEED 0x361c 246 + #define DW_DP_HDCP_REG_DPK0 0x3620 247 + #define DW_DP_HDCP_REG_DPK1 0x3624 248 + #define DW_DP_HDCP22_GPIOSTS 0x3628 249 + #define DW_DP_HDCP22_GPIOCHNGSTS 0x362c 250 + #define DW_DP_HDCP_REG_DPK_CRC 0x3630 251 + 252 + #define DW_DP_MAX_REGISTER DW_DP_HDCP_REG_DPK_CRC 253 + 254 + #define SDP_REG_BANK_SIZE 16 255 + 256 + struct dw_dp_link_caps { 257 + bool enhanced_framing; 258 + bool tps3_supported; 259 + bool tps4_supported; 260 + bool fast_training; 261 + bool channel_coding; 262 + bool ssc; 263 + }; 264 + 265 + struct dw_dp_link_train_set { 266 + unsigned int voltage_swing[4]; 267 + unsigned int pre_emphasis[4]; 268 + bool voltage_max_reached[4]; 269 + bool pre_max_reached[4]; 270 + }; 271 + 272 + struct dw_dp_link_train { 273 + struct dw_dp_link_train_set adjust; 274 + bool clock_recovered; 275 + bool channel_equalized; 276 + }; 277 + 278 + struct dw_dp_link { 279 + u8 dpcd[DP_RECEIVER_CAP_SIZE]; 280 + unsigned char revision; 281 + unsigned int rate; 282 + unsigned int lanes; 283 + u8 sink_count; 284 + u8 vsc_sdp_supported; 285 + struct dw_dp_link_caps caps; 286 + struct dw_dp_link_train train; 287 + struct drm_dp_desc desc; 288 + }; 289 + 290 + struct dw_dp_bridge_state { 291 + struct drm_bridge_state base; 292 + struct drm_display_mode mode; 293 + u8 video_mapping; 294 + u8 color_format; 295 + u8 bpc; 296 + u8 bpp; 297 + }; 298 + 299 + struct dw_dp_sdp { 300 + struct dp_sdp base; 301 + unsigned long flags; 302 + }; 303 + 304 + struct dw_dp_hotplug { 305 + bool long_hpd; 306 + }; 307 + 308 + struct dw_dp { 309 + struct drm_bridge bridge; 310 + struct device *dev; 311 + struct regmap *regmap; 312 + struct phy *phy; 313 + struct clk *apb_clk; 314 + struct clk *aux_clk; 315 + struct clk *i2s_clk; 316 + struct clk *spdif_clk; 317 + struct clk *hdcp_clk; 318 + struct reset_control *rstc; 319 + struct completion complete; 320 + int irq; 321 + struct work_struct hpd_work; 322 + struct dw_dp_hotplug hotplug; 323 + /* Serialize hpd status access */ 324 + struct mutex irq_lock; 325 + 326 + struct drm_dp_aux aux; 327 + 328 + struct dw_dp_link link; 329 + struct dw_dp_plat_data plat_data; 330 + u8 pixel_mode; 331 + 332 + DECLARE_BITMAP(sdp_reg_bank, SDP_REG_BANK_SIZE); 333 + }; 334 + 335 + enum { 336 + DW_DP_RGB_6BIT, 337 + DW_DP_RGB_8BIT, 338 + DW_DP_RGB_10BIT, 339 + DW_DP_RGB_12BIT, 340 + DW_DP_RGB_16BIT, 341 + DW_DP_YCBCR444_8BIT, 342 + DW_DP_YCBCR444_10BIT, 343 + DW_DP_YCBCR444_12BIT, 344 + DW_DP_YCBCR444_16BIT, 345 + DW_DP_YCBCR422_8BIT, 346 + DW_DP_YCBCR422_10BIT, 347 + DW_DP_YCBCR422_12BIT, 348 + DW_DP_YCBCR422_16BIT, 349 + DW_DP_YCBCR420_8BIT, 350 + DW_DP_YCBCR420_10BIT, 351 + DW_DP_YCBCR420_12BIT, 352 + DW_DP_YCBCR420_16BIT, 353 + }; 354 + 355 + enum { 356 + DW_DP_MP_SINGLE_PIXEL, 357 + DW_DP_MP_DUAL_PIXEL, 358 + DW_DP_MP_QUAD_PIXEL, 359 + }; 360 + 361 + enum { 362 + DW_DP_SDP_VERTICAL_INTERVAL = BIT(0), 363 + DW_DP_SDP_HORIZONTAL_INTERVAL = BIT(1), 364 + }; 365 + 366 + enum { 367 + DW_DP_HPD_STATE_IDLE, 368 + DW_DP_HPD_STATE_UNPLUG, 369 + DP_DP_HPD_STATE_TIMEOUT = 4, 370 + DW_DP_HPD_STATE_PLUG = 7 371 + }; 372 + 373 + enum { 374 + DW_DP_PHY_PATTERN_NONE, 375 + DW_DP_PHY_PATTERN_TPS_1, 376 + DW_DP_PHY_PATTERN_TPS_2, 377 + DW_DP_PHY_PATTERN_TPS_3, 378 + DW_DP_PHY_PATTERN_TPS_4, 379 + DW_DP_PHY_PATTERN_SERM, 380 + DW_DP_PHY_PATTERN_PBRS7, 381 + DW_DP_PHY_PATTERN_CUSTOM_80BIT, 382 + DW_DP_PHY_PATTERN_CP2520_1, 383 + DW_DP_PHY_PATTERN_CP2520_2, 384 + }; 385 + 386 + struct dw_dp_output_format { 387 + u32 bus_format; 388 + u32 color_format; 389 + u8 video_mapping; 390 + u8 bpc; 391 + u8 bpp; 392 + }; 393 + 394 + #define to_dw_dp_bridge_state(s) container_of(s, struct dw_dp_bridge_state, base) 395 + 396 + static const struct dw_dp_output_format dw_dp_output_formats[] = { 397 + { MEDIA_BUS_FMT_RGB101010_1X30, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_10BIT, 10, 30 }, 398 + { MEDIA_BUS_FMT_RGB888_1X24, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_8BIT, 8, 24 }, 399 + { MEDIA_BUS_FMT_YUV10_1X30, DRM_COLOR_FORMAT_YCBCR444, DW_DP_YCBCR444_10BIT, 10, 30 }, 400 + { MEDIA_BUS_FMT_YUV8_1X24, DRM_COLOR_FORMAT_YCBCR444, DW_DP_YCBCR444_8BIT, 8, 24}, 401 + { MEDIA_BUS_FMT_YUYV10_1X20, DRM_COLOR_FORMAT_YCBCR422, DW_DP_YCBCR422_10BIT, 10, 20 }, 402 + { MEDIA_BUS_FMT_YUYV8_1X16, DRM_COLOR_FORMAT_YCBCR422, DW_DP_YCBCR422_8BIT, 8, 16 }, 403 + { MEDIA_BUS_FMT_UYYVYY10_0_5X30, DRM_COLOR_FORMAT_YCBCR420, DW_DP_YCBCR420_10BIT, 10, 15 }, 404 + { MEDIA_BUS_FMT_UYYVYY8_0_5X24, DRM_COLOR_FORMAT_YCBCR420, DW_DP_YCBCR420_8BIT, 8, 12 }, 405 + { MEDIA_BUS_FMT_RGB666_1X24_CPADHI, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_6BIT, 6, 18 }, 406 + }; 407 + 408 + static const struct dw_dp_output_format *dw_dp_get_output_format(u32 bus_format) 409 + { 410 + unsigned int i; 411 + 412 + for (i = 0; i < ARRAY_SIZE(dw_dp_output_formats); i++) 413 + if (dw_dp_output_formats[i].bus_format == bus_format) 414 + return &dw_dp_output_formats[i]; 415 + 416 + return NULL; 417 + } 418 + 419 + static inline struct dw_dp *bridge_to_dp(struct drm_bridge *b) 420 + { 421 + return container_of(b, struct dw_dp, bridge); 422 + } 423 + 424 + static struct dw_dp_bridge_state *dw_dp_get_bridge_state(struct dw_dp *dp) 425 + { 426 + struct dw_dp_bridge_state *dw_bridge_state; 427 + struct drm_bridge_state *state; 428 + 429 + state = drm_priv_to_bridge_state(dp->bridge.base.state); 430 + if (!state) 431 + return NULL; 432 + 433 + dw_bridge_state = to_dw_dp_bridge_state(state); 434 + if (!dw_bridge_state) 435 + return NULL; 436 + 437 + return dw_bridge_state; 438 + } 439 + 440 + static inline void dw_dp_phy_set_pattern(struct dw_dp *dp, u32 pattern) 441 + { 442 + regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, TPS_SEL, 443 + FIELD_PREP(TPS_SEL, pattern)); 444 + } 445 + 446 + static void dw_dp_phy_xmit_enable(struct dw_dp *dp, u32 lanes) 447 + { 448 + u32 xmit_enable; 449 + 450 + switch (lanes) { 451 + case 4: 452 + case 2: 453 + case 1: 454 + xmit_enable = GENMASK(lanes - 1, 0); 455 + break; 456 + case 0: 457 + default: 458 + xmit_enable = 0; 459 + break; 460 + } 461 + 462 + regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, XMIT_ENABLE, 463 + FIELD_PREP(XMIT_ENABLE, xmit_enable)); 464 + } 465 + 466 + static bool dw_dp_bandwidth_ok(struct dw_dp *dp, 467 + const struct drm_display_mode *mode, u32 bpp, 468 + unsigned int lanes, unsigned int rate) 469 + { 470 + u32 max_bw, req_bw; 471 + 472 + req_bw = mode->clock * bpp / 8; 473 + max_bw = lanes * rate; 474 + if (req_bw > max_bw) 475 + return false; 476 + 477 + return true; 478 + } 479 + 480 + static bool dw_dp_hpd_detect(struct dw_dp *dp) 481 + { 482 + u32 value; 483 + 484 + regmap_read(dp->regmap, DW_DP_HPD_STATUS, &value); 485 + 486 + return FIELD_GET(HPD_STATE, value) == DW_DP_HPD_STATE_PLUG; 487 + } 488 + 489 + static void dw_dp_link_caps_reset(struct dw_dp_link_caps *caps) 490 + { 491 + caps->enhanced_framing = false; 492 + caps->tps3_supported = false; 493 + caps->tps4_supported = false; 494 + caps->fast_training = false; 495 + caps->channel_coding = false; 496 + } 497 + 498 + static void dw_dp_link_reset(struct dw_dp_link *link) 499 + { 500 + link->vsc_sdp_supported = 0; 501 + link->sink_count = 0; 502 + link->revision = 0; 503 + link->rate = 0; 504 + link->lanes = 0; 505 + 506 + dw_dp_link_caps_reset(&link->caps); 507 + memset(link->dpcd, 0, sizeof(link->dpcd)); 508 + } 509 + 510 + static int dw_dp_link_parse(struct dw_dp *dp, struct drm_connector *connector) 511 + { 512 + struct dw_dp_link *link = &dp->link; 513 + int ret; 514 + 515 + dw_dp_link_reset(link); 516 + 517 + ret = drm_dp_read_dpcd_caps(&dp->aux, link->dpcd); 518 + if (ret < 0) 519 + return ret; 520 + 521 + drm_dp_read_desc(&dp->aux, &link->desc, drm_dp_is_branch(link->dpcd)); 522 + 523 + if (drm_dp_read_sink_count_cap(connector, link->dpcd, &link->desc)) { 524 + ret = drm_dp_read_sink_count(&dp->aux); 525 + if (ret < 0) 526 + return ret; 527 + 528 + link->sink_count = ret; 529 + 530 + /* Dongle connected, but no display */ 531 + if (!link->sink_count) 532 + return -ENODEV; 533 + } 534 + 535 + link->vsc_sdp_supported = drm_dp_vsc_sdp_supported(&dp->aux, link->dpcd); 536 + 537 + link->revision = link->dpcd[DP_DPCD_REV]; 538 + link->rate = min_t(u32, min(dp->plat_data.max_link_rate, 539 + dp->phy->attrs.max_link_rate * 100), 540 + drm_dp_max_link_rate(link->dpcd)); 541 + link->lanes = min_t(u8, phy_get_bus_width(dp->phy), 542 + drm_dp_max_lane_count(link->dpcd)); 543 + 544 + link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(link->dpcd); 545 + link->caps.tps3_supported = drm_dp_tps3_supported(link->dpcd); 546 + link->caps.tps4_supported = drm_dp_tps4_supported(link->dpcd); 547 + link->caps.fast_training = drm_dp_fast_training_cap(link->dpcd); 548 + link->caps.channel_coding = drm_dp_channel_coding_supported(link->dpcd); 549 + link->caps.ssc = !!(link->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5); 550 + 551 + return 0; 552 + } 553 + 554 + static int dw_dp_link_train_update_vs_emph(struct dw_dp *dp) 555 + { 556 + struct dw_dp_link *link = &dp->link; 557 + struct dw_dp_link_train_set *train_set = &link->train.adjust; 558 + unsigned int lanes = dp->link.lanes; 559 + union phy_configure_opts phy_cfg; 560 + unsigned int *vs, *pe; 561 + int i, ret; 562 + u8 buf[4]; 563 + 564 + vs = train_set->voltage_swing; 565 + pe = train_set->pre_emphasis; 566 + 567 + for (i = 0; i < lanes; i++) { 568 + phy_cfg.dp.voltage[i] = vs[i]; 569 + phy_cfg.dp.pre[i] = pe[i]; 570 + } 571 + 572 + phy_cfg.dp.set_lanes = false; 573 + phy_cfg.dp.set_rate = false; 574 + phy_cfg.dp.set_voltages = true; 575 + 576 + ret = phy_configure(dp->phy, &phy_cfg); 577 + if (ret) 578 + return ret; 579 + 580 + for (i = 0; i < lanes; i++) { 581 + buf[i] = (vs[i] << DP_TRAIN_VOLTAGE_SWING_SHIFT) | 582 + (pe[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT); 583 + if (train_set->voltage_max_reached[i]) 584 + buf[i] |= DP_TRAIN_MAX_SWING_REACHED; 585 + if (train_set->pre_max_reached[i]) 586 + buf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 587 + } 588 + 589 + ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, buf, lanes); 590 + if (ret < 0) 591 + return ret; 592 + 593 + return 0; 594 + } 595 + 596 + static int dw_dp_phy_configure(struct dw_dp *dp, unsigned int rate, 597 + unsigned int lanes, bool ssc) 598 + { 599 + union phy_configure_opts phy_cfg; 600 + int ret; 601 + 602 + /* Move PHY to P3 */ 603 + regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_POWERDOWN, 604 + FIELD_PREP(PHY_POWERDOWN, 0x3)); 605 + 606 + phy_cfg.dp.lanes = lanes; 607 + phy_cfg.dp.link_rate = rate / 100; 608 + phy_cfg.dp.ssc = ssc; 609 + phy_cfg.dp.set_lanes = true; 610 + phy_cfg.dp.set_rate = true; 611 + phy_cfg.dp.set_voltages = false; 612 + ret = phy_configure(dp->phy, &phy_cfg); 613 + if (ret) 614 + return ret; 615 + 616 + regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_LANES, 617 + FIELD_PREP(PHY_LANES, lanes / 2)); 618 + 619 + /* Move PHY to P0 */ 620 + regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_POWERDOWN, 621 + FIELD_PREP(PHY_POWERDOWN, 0x0)); 622 + 623 + dw_dp_phy_xmit_enable(dp, lanes); 624 + 625 + return 0; 626 + } 627 + 628 + static int dw_dp_link_configure(struct dw_dp *dp) 629 + { 630 + struct dw_dp_link *link = &dp->link; 631 + u8 buf[2]; 632 + int ret; 633 + 634 + ret = dw_dp_phy_configure(dp, link->rate, link->lanes, link->caps.ssc); 635 + if (ret) 636 + return ret; 637 + 638 + buf[0] = drm_dp_link_rate_to_bw_code(link->rate); 639 + buf[1] = link->lanes; 640 + 641 + if (link->caps.enhanced_framing) { 642 + buf[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 643 + regmap_update_bits(dp->regmap, DW_DP_CCTL, ENHANCE_FRAMING_EN, 644 + FIELD_PREP(ENHANCE_FRAMING_EN, 1)); 645 + } else { 646 + regmap_update_bits(dp->regmap, DW_DP_CCTL, ENHANCE_FRAMING_EN, 647 + FIELD_PREP(ENHANCE_FRAMING_EN, 0)); 648 + } 649 + 650 + ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf)); 651 + if (ret < 0) 652 + return ret; 653 + 654 + buf[0] = link->caps.ssc ? DP_SPREAD_AMP_0_5 : 0; 655 + buf[1] = link->caps.channel_coding ? DP_SET_ANSI_8B10B : 0; 656 + 657 + ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf)); 658 + if (ret < 0) 659 + return ret; 660 + 661 + return 0; 662 + } 663 + 664 + static void dw_dp_link_train_init(struct dw_dp_link_train *train) 665 + { 666 + struct dw_dp_link_train_set *adj = &train->adjust; 667 + unsigned int i; 668 + 669 + for (i = 0; i < 4; i++) { 670 + adj->voltage_swing[i] = 0; 671 + adj->pre_emphasis[i] = 0; 672 + adj->voltage_max_reached[i] = false; 673 + adj->pre_max_reached[i] = false; 674 + } 675 + 676 + train->clock_recovered = false; 677 + train->channel_equalized = false; 678 + } 679 + 680 + static bool dw_dp_link_train_valid(const struct dw_dp_link_train *train) 681 + { 682 + return train->clock_recovered && train->channel_equalized; 683 + } 684 + 685 + static int dw_dp_link_train_set_pattern(struct dw_dp *dp, u32 pattern) 686 + { 687 + u8 buf = 0; 688 + int ret; 689 + 690 + if (pattern && pattern != DP_TRAINING_PATTERN_4) { 691 + buf |= DP_LINK_SCRAMBLING_DISABLE; 692 + 693 + regmap_update_bits(dp->regmap, DW_DP_CCTL, SCRAMBLE_DIS, 694 + FIELD_PREP(SCRAMBLE_DIS, 1)); 695 + } else { 696 + regmap_update_bits(dp->regmap, DW_DP_CCTL, SCRAMBLE_DIS, 697 + FIELD_PREP(SCRAMBLE_DIS, 0)); 698 + } 699 + 700 + switch (pattern) { 701 + case DP_TRAINING_PATTERN_DISABLE: 702 + dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_NONE); 703 + break; 704 + case DP_TRAINING_PATTERN_1: 705 + dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_1); 706 + break; 707 + case DP_TRAINING_PATTERN_2: 708 + dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_2); 709 + break; 710 + case DP_TRAINING_PATTERN_3: 711 + dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_3); 712 + break; 713 + case DP_TRAINING_PATTERN_4: 714 + dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_4); 715 + break; 716 + default: 717 + return -EINVAL; 718 + } 719 + 720 + ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, 721 + buf | pattern); 722 + if (ret < 0) 723 + return ret; 724 + 725 + return 0; 726 + } 727 + 728 + static u8 dw_dp_voltage_max(u8 preemph) 729 + { 730 + switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) { 731 + case DP_TRAIN_PRE_EMPH_LEVEL_0: 732 + return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 733 + case DP_TRAIN_PRE_EMPH_LEVEL_1: 734 + return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 735 + case DP_TRAIN_PRE_EMPH_LEVEL_2: 736 + return DP_TRAIN_VOLTAGE_SWING_LEVEL_1; 737 + case DP_TRAIN_PRE_EMPH_LEVEL_3: 738 + default: 739 + return DP_TRAIN_VOLTAGE_SWING_LEVEL_0; 740 + } 741 + } 742 + 743 + static bool dw_dp_link_get_adjustments(struct dw_dp_link *link, 744 + u8 status[DP_LINK_STATUS_SIZE]) 745 + { 746 + struct dw_dp_link_train_set *adj = &link->train.adjust; 747 + unsigned int i; 748 + bool changed = false; 749 + u8 v = 0; 750 + u8 p = 0; 751 + 752 + for (i = 0; i < link->lanes; i++) { 753 + v = drm_dp_get_adjust_request_voltage(status, i); 754 + v >>= DP_TRAIN_VOLTAGE_SWING_SHIFT; 755 + p = drm_dp_get_adjust_request_pre_emphasis(status, i); 756 + p >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; 757 + 758 + if (v != adj->voltage_swing[i] || p != adj->pre_emphasis[i]) 759 + changed = true; 760 + 761 + if (p >= (DP_TRAIN_PRE_EMPH_LEVEL_3 >> DP_TRAIN_PRE_EMPHASIS_SHIFT)) { 762 + adj->pre_emphasis[i] = DP_TRAIN_PRE_EMPH_LEVEL_3 >> 763 + DP_TRAIN_PRE_EMPHASIS_SHIFT; 764 + adj->pre_max_reached[i] = true; 765 + } else { 766 + adj->pre_emphasis[i] = p; 767 + adj->pre_max_reached[i] = false; 768 + } 769 + 770 + v = min(v, dw_dp_voltage_max(p)); 771 + if (v >= (DP_TRAIN_VOLTAGE_SWING_LEVEL_3 >> DP_TRAIN_VOLTAGE_SWING_SHIFT)) { 772 + adj->voltage_swing[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 >> 773 + DP_TRAIN_VOLTAGE_SWING_SHIFT; 774 + adj->voltage_max_reached[i] = true; 775 + } else { 776 + adj->voltage_swing[i] = v; 777 + adj->voltage_max_reached[i] = false; 778 + } 779 + } 780 + 781 + return changed; 782 + } 783 + 784 + static int dw_dp_link_clock_recovery(struct dw_dp *dp) 785 + { 786 + struct dw_dp_link *link = &dp->link; 787 + u8 status[DP_LINK_STATUS_SIZE]; 788 + unsigned int tries = 0; 789 + int ret; 790 + bool adj_changed; 791 + 792 + ret = dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_1); 793 + if (ret) 794 + return ret; 795 + 796 + for (;;) { 797 + ret = dw_dp_link_train_update_vs_emph(dp); 798 + if (ret) 799 + return ret; 800 + 801 + drm_dp_link_train_clock_recovery_delay(&dp->aux, link->dpcd); 802 + 803 + ret = drm_dp_dpcd_read_link_status(&dp->aux, status); 804 + if (ret < 0) { 805 + dev_err(dp->dev, "failed to read link status: %d\n", ret); 806 + return ret; 807 + } 808 + 809 + if (drm_dp_clock_recovery_ok(status, link->lanes)) { 810 + link->train.clock_recovered = true; 811 + break; 812 + } 813 + 814 + /* 815 + * According to DP spec 1.4, if current ADJ is the same 816 + * with previous REQ, we need to retry 5 times. 817 + */ 818 + adj_changed = dw_dp_link_get_adjustments(link, status); 819 + if (!adj_changed) 820 + tries++; 821 + else 822 + tries = 0; 823 + 824 + if (tries == 5) 825 + break; 826 + } 827 + 828 + return 0; 829 + } 830 + 831 + static int dw_dp_link_channel_equalization(struct dw_dp *dp) 832 + { 833 + struct dw_dp_link *link = &dp->link; 834 + u8 status[DP_LINK_STATUS_SIZE], pattern; 835 + unsigned int tries; 836 + int ret; 837 + 838 + if (link->caps.tps4_supported) 839 + pattern = DP_TRAINING_PATTERN_4; 840 + else if (link->caps.tps3_supported) 841 + pattern = DP_TRAINING_PATTERN_3; 842 + else 843 + pattern = DP_TRAINING_PATTERN_2; 844 + ret = dw_dp_link_train_set_pattern(dp, pattern); 845 + if (ret) 846 + return ret; 847 + 848 + for (tries = 1; tries < 5; tries++) { 849 + ret = dw_dp_link_train_update_vs_emph(dp); 850 + if (ret) 851 + return ret; 852 + 853 + drm_dp_link_train_channel_eq_delay(&dp->aux, link->dpcd); 854 + 855 + ret = drm_dp_dpcd_read_link_status(&dp->aux, status); 856 + if (ret < 0) 857 + return ret; 858 + 859 + if (!drm_dp_clock_recovery_ok(status, link->lanes)) { 860 + dev_err(dp->dev, "clock recovery lost while equalizing channel\n"); 861 + link->train.clock_recovered = false; 862 + break; 863 + } 864 + 865 + if (drm_dp_channel_eq_ok(status, link->lanes)) { 866 + link->train.channel_equalized = true; 867 + break; 868 + } 869 + 870 + dw_dp_link_get_adjustments(link, status); 871 + } 872 + 873 + return 0; 874 + } 875 + 876 + static int dw_dp_link_downgrade(struct dw_dp *dp) 877 + { 878 + struct dw_dp_link *link = &dp->link; 879 + struct dw_dp_bridge_state *state; 880 + 881 + state = dw_dp_get_bridge_state(dp); 882 + 883 + switch (link->rate) { 884 + case 162000: 885 + return -EINVAL; 886 + case 270000: 887 + link->rate = 162000; 888 + break; 889 + case 540000: 890 + link->rate = 270000; 891 + break; 892 + case 810000: 893 + link->rate = 540000; 894 + break; 895 + } 896 + 897 + if (!dw_dp_bandwidth_ok(dp, &state->mode, state->bpp, link->lanes, 898 + link->rate)) 899 + return -E2BIG; 900 + 901 + return 0; 902 + } 903 + 904 + static int dw_dp_link_train_full(struct dw_dp *dp) 905 + { 906 + struct dw_dp_link *link = &dp->link; 907 + int ret; 908 + 909 + retry: 910 + dw_dp_link_train_init(&link->train); 911 + 912 + dev_dbg(dp->dev, "full-training link: %u lane%s at %u MHz\n", 913 + link->lanes, (link->lanes > 1) ? "s" : "", link->rate / 100); 914 + 915 + ret = dw_dp_link_configure(dp); 916 + if (ret < 0) { 917 + dev_err(dp->dev, "failed to configure DP link: %d\n", ret); 918 + return ret; 919 + } 920 + 921 + ret = dw_dp_link_clock_recovery(dp); 922 + if (ret < 0) { 923 + dev_err(dp->dev, "clock recovery failed: %d\n", ret); 924 + goto out; 925 + } 926 + 927 + if (!link->train.clock_recovered) { 928 + dev_err(dp->dev, "clock recovery failed, downgrading link\n"); 929 + 930 + ret = dw_dp_link_downgrade(dp); 931 + if (ret < 0) 932 + goto out; 933 + else 934 + goto retry; 935 + } 936 + 937 + dev_dbg(dp->dev, "clock recovery succeeded\n"); 938 + 939 + ret = dw_dp_link_channel_equalization(dp); 940 + if (ret < 0) { 941 + dev_err(dp->dev, "channel equalization failed: %d\n", ret); 942 + goto out; 943 + } 944 + 945 + if (!link->train.channel_equalized) { 946 + dev_err(dp->dev, "channel equalization failed, downgrading link\n"); 947 + 948 + ret = dw_dp_link_downgrade(dp); 949 + if (ret < 0) 950 + goto out; 951 + else 952 + goto retry; 953 + } 954 + 955 + dev_dbg(dp->dev, "channel equalization succeeded\n"); 956 + 957 + out: 958 + dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE); 959 + return ret; 960 + } 961 + 962 + static int dw_dp_link_train_fast(struct dw_dp *dp) 963 + { 964 + struct dw_dp_link *link = &dp->link; 965 + int ret; 966 + u8 status[DP_LINK_STATUS_SIZE]; 967 + u8 pattern; 968 + 969 + dw_dp_link_train_init(&link->train); 970 + 971 + dev_dbg(dp->dev, "fast-training link: %u lane%s at %u MHz\n", 972 + link->lanes, (link->lanes > 1) ? "s" : "", link->rate / 100); 973 + 974 + ret = dw_dp_link_configure(dp); 975 + if (ret < 0) { 976 + dev_err(dp->dev, "failed to configure DP link: %d\n", ret); 977 + return ret; 978 + } 979 + 980 + ret = dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_1); 981 + if (ret) 982 + goto out; 983 + 984 + usleep_range(500, 1000); 985 + 986 + if (link->caps.tps4_supported) 987 + pattern = DP_TRAINING_PATTERN_4; 988 + else if (link->caps.tps3_supported) 989 + pattern = DP_TRAINING_PATTERN_3; 990 + else 991 + pattern = DP_TRAINING_PATTERN_2; 992 + ret = dw_dp_link_train_set_pattern(dp, pattern); 993 + if (ret) 994 + goto out; 995 + 996 + usleep_range(500, 1000); 997 + 998 + ret = drm_dp_dpcd_read_link_status(&dp->aux, status); 999 + if (ret < 0) { 1000 + dev_err(dp->dev, "failed to read link status: %d\n", ret); 1001 + goto out; 1002 + } 1003 + 1004 + if (!drm_dp_clock_recovery_ok(status, link->lanes)) { 1005 + dev_err(dp->dev, "clock recovery failed\n"); 1006 + ret = -EIO; 1007 + goto out; 1008 + } 1009 + 1010 + if (!drm_dp_channel_eq_ok(status, link->lanes)) { 1011 + dev_err(dp->dev, "channel equalization failed\n"); 1012 + ret = -EIO; 1013 + goto out; 1014 + } 1015 + 1016 + out: 1017 + dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE); 1018 + return ret; 1019 + } 1020 + 1021 + static int dw_dp_link_train(struct dw_dp *dp) 1022 + { 1023 + struct dw_dp_link *link = &dp->link; 1024 + int ret; 1025 + 1026 + if (link->caps.fast_training) { 1027 + if (dw_dp_link_train_valid(&link->train)) { 1028 + ret = dw_dp_link_train_fast(dp); 1029 + if (ret < 0) 1030 + dev_err(dp->dev, "fast link training failed: %d\n", ret); 1031 + else 1032 + return 0; 1033 + } 1034 + } 1035 + 1036 + ret = dw_dp_link_train_full(dp); 1037 + if (ret < 0) { 1038 + dev_err(dp->dev, "full link training failed: %d\n", ret); 1039 + return ret; 1040 + } 1041 + 1042 + return 0; 1043 + } 1044 + 1045 + static int dw_dp_send_sdp(struct dw_dp *dp, struct dw_dp_sdp *sdp) 1046 + { 1047 + const u8 *payload = sdp->base.db; 1048 + u32 reg; 1049 + int i, nr; 1050 + 1051 + nr = find_first_zero_bit(dp->sdp_reg_bank, SDP_REG_BANK_SIZE); 1052 + if (nr < SDP_REG_BANK_SIZE) 1053 + set_bit(nr, dp->sdp_reg_bank); 1054 + else 1055 + return -EBUSY; 1056 + 1057 + reg = DW_DP_SDP_REGISTER_BANK + nr * 9 * 4; 1058 + 1059 + /* SDP header */ 1060 + regmap_write(dp->regmap, reg, get_unaligned_le32(&sdp->base.sdp_header)); 1061 + 1062 + /* SDP data payload */ 1063 + for (i = 1; i < 9; i++, payload += 4) 1064 + regmap_write(dp->regmap, reg + i * 4, 1065 + FIELD_PREP(SDP_REGS, get_unaligned_le32(payload))); 1066 + 1067 + if (sdp->flags & DW_DP_SDP_VERTICAL_INTERVAL) 1068 + regmap_update_bits(dp->regmap, DW_DP_SDP_VERTICAL_CTRL, 1069 + EN_VERTICAL_SDP << nr, 1070 + EN_VERTICAL_SDP << nr); 1071 + 1072 + if (sdp->flags & DW_DP_SDP_HORIZONTAL_INTERVAL) 1073 + regmap_update_bits(dp->regmap, DW_DP_SDP_HORIZONTAL_CTRL, 1074 + EN_HORIZONTAL_SDP << nr, 1075 + EN_HORIZONTAL_SDP << nr); 1076 + 1077 + return 0; 1078 + } 1079 + 1080 + static int dw_dp_send_vsc_sdp(struct dw_dp *dp) 1081 + { 1082 + struct dw_dp_bridge_state *state; 1083 + struct dw_dp_sdp sdp = {}; 1084 + struct drm_dp_vsc_sdp vsc = {}; 1085 + 1086 + state = dw_dp_get_bridge_state(dp); 1087 + if (!state) 1088 + return -EINVAL; 1089 + 1090 + vsc.bpc = state->bpc; 1091 + 1092 + vsc.sdp_type = DP_SDP_VSC; 1093 + vsc.revision = 0x5; 1094 + vsc.length = 0x13; 1095 + vsc.content_type = DP_CONTENT_TYPE_NOT_DEFINED; 1096 + 1097 + sdp.flags = DW_DP_SDP_VERTICAL_INTERVAL; 1098 + 1099 + switch (state->color_format) { 1100 + case DRM_COLOR_FORMAT_YCBCR444: 1101 + vsc.pixelformat = DP_PIXELFORMAT_YUV444; 1102 + break; 1103 + case DRM_COLOR_FORMAT_YCBCR420: 1104 + vsc.pixelformat = DP_PIXELFORMAT_YUV420; 1105 + break; 1106 + case DRM_COLOR_FORMAT_YCBCR422: 1107 + vsc.pixelformat = DP_PIXELFORMAT_YUV422; 1108 + break; 1109 + case DRM_COLOR_FORMAT_RGB444: 1110 + default: 1111 + vsc.pixelformat = DP_PIXELFORMAT_RGB; 1112 + break; 1113 + } 1114 + 1115 + if (state->color_format == DRM_COLOR_FORMAT_RGB444) { 1116 + vsc.colorimetry = DP_COLORIMETRY_DEFAULT; 1117 + vsc.dynamic_range = DP_DYNAMIC_RANGE_VESA; 1118 + } else { 1119 + vsc.colorimetry = DP_COLORIMETRY_BT709_YCC; 1120 + vsc.dynamic_range = DP_DYNAMIC_RANGE_CTA; 1121 + } 1122 + 1123 + drm_dp_vsc_sdp_pack(&vsc, &sdp.base); 1124 + 1125 + return dw_dp_send_sdp(dp, &sdp); 1126 + } 1127 + 1128 + static int dw_dp_video_set_pixel_mode(struct dw_dp *dp) 1129 + { 1130 + switch (dp->pixel_mode) { 1131 + case DW_DP_MP_SINGLE_PIXEL: 1132 + case DW_DP_MP_DUAL_PIXEL: 1133 + case DW_DP_MP_QUAD_PIXEL: 1134 + break; 1135 + default: 1136 + return -EINVAL; 1137 + } 1138 + 1139 + regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, PIXEL_MODE_SELECT, 1140 + FIELD_PREP(PIXEL_MODE_SELECT, dp->pixel_mode)); 1141 + 1142 + return 0; 1143 + } 1144 + 1145 + static bool dw_dp_video_need_vsc_sdp(struct dw_dp *dp) 1146 + { 1147 + struct dw_dp_link *link = &dp->link; 1148 + struct dw_dp_bridge_state *state; 1149 + 1150 + state = dw_dp_get_bridge_state(dp); 1151 + if (!state) 1152 + return -EINVAL; 1153 + 1154 + if (!link->vsc_sdp_supported) 1155 + return false; 1156 + 1157 + if (state->color_format == DRM_COLOR_FORMAT_YCBCR420) 1158 + return true; 1159 + 1160 + return false; 1161 + } 1162 + 1163 + static int dw_dp_video_set_msa(struct dw_dp *dp, u8 color_format, u8 bpc, 1164 + u16 vstart, u16 hstart) 1165 + { 1166 + u16 misc = 0; 1167 + 1168 + if (dw_dp_video_need_vsc_sdp(dp)) 1169 + misc |= DP_MSA_MISC_COLOR_VSC_SDP; 1170 + 1171 + switch (color_format) { 1172 + case DRM_COLOR_FORMAT_RGB444: 1173 + misc |= DP_MSA_MISC_COLOR_RGB; 1174 + break; 1175 + case DRM_COLOR_FORMAT_YCBCR444: 1176 + misc |= DP_MSA_MISC_COLOR_YCBCR_444_BT709; 1177 + break; 1178 + case DRM_COLOR_FORMAT_YCBCR422: 1179 + misc |= DP_MSA_MISC_COLOR_YCBCR_422_BT709; 1180 + break; 1181 + case DRM_COLOR_FORMAT_YCBCR420: 1182 + break; 1183 + default: 1184 + return -EINVAL; 1185 + } 1186 + 1187 + switch (bpc) { 1188 + case 6: 1189 + misc |= DP_MSA_MISC_6_BPC; 1190 + break; 1191 + case 8: 1192 + misc |= DP_MSA_MISC_8_BPC; 1193 + break; 1194 + case 10: 1195 + misc |= DP_MSA_MISC_10_BPC; 1196 + break; 1197 + case 12: 1198 + misc |= DP_MSA_MISC_12_BPC; 1199 + break; 1200 + case 16: 1201 + misc |= DP_MSA_MISC_16_BPC; 1202 + break; 1203 + default: 1204 + return -EINVAL; 1205 + } 1206 + 1207 + regmap_write(dp->regmap, DW_DP_VIDEO_MSA1, 1208 + FIELD_PREP(VSTART, vstart) | FIELD_PREP(HSTART, hstart)); 1209 + regmap_write(dp->regmap, DW_DP_VIDEO_MSA2, FIELD_PREP(MISC0, misc)); 1210 + regmap_write(dp->regmap, DW_DP_VIDEO_MSA3, FIELD_PREP(MISC1, misc >> 8)); 1211 + 1212 + return 0; 1213 + } 1214 + 1215 + static void dw_dp_video_disable(struct dw_dp *dp) 1216 + { 1217 + regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_STREAM_ENABLE, 1218 + FIELD_PREP(VIDEO_STREAM_ENABLE, 0)); 1219 + } 1220 + 1221 + static int dw_dp_video_enable(struct dw_dp *dp) 1222 + { 1223 + struct dw_dp_link *link = &dp->link; 1224 + struct dw_dp_bridge_state *state; 1225 + struct drm_display_mode *mode; 1226 + u8 color_format, bpc, bpp; 1227 + u8 init_threshold, vic; 1228 + u32 hstart, hactive, hblank, h_sync_width, h_front_porch; 1229 + u32 vstart, vactive, vblank, v_sync_width, v_front_porch; 1230 + u32 peak_stream_bandwidth, link_bandwidth; 1231 + u32 average_bytes_per_tu, average_bytes_per_tu_frac; 1232 + u32 ts, hblank_interval; 1233 + u32 value; 1234 + int ret; 1235 + 1236 + state = dw_dp_get_bridge_state(dp); 1237 + if (!state) 1238 + return -EINVAL; 1239 + 1240 + bpc = state->bpc; 1241 + bpp = state->bpp; 1242 + color_format = state->color_format; 1243 + mode = &state->mode; 1244 + 1245 + vstart = mode->vtotal - mode->vsync_start; 1246 + hstart = mode->htotal - mode->hsync_start; 1247 + 1248 + ret = dw_dp_video_set_pixel_mode(dp); 1249 + if (ret) 1250 + return ret; 1251 + 1252 + ret = dw_dp_video_set_msa(dp, color_format, bpc, vstart, hstart); 1253 + if (ret) 1254 + return ret; 1255 + 1256 + regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_MAPPING, 1257 + FIELD_PREP(VIDEO_MAPPING, state->video_mapping)); 1258 + 1259 + /* Configure DW_DP_VINPUT_POLARITY_CTRL register */ 1260 + value = 0; 1261 + if (mode->flags & DRM_MODE_FLAG_PHSYNC) 1262 + value |= FIELD_PREP(HSYNC_IN_POLARITY, 1); 1263 + if (mode->flags & DRM_MODE_FLAG_PVSYNC) 1264 + value |= FIELD_PREP(VSYNC_IN_POLARITY, 1); 1265 + regmap_write(dp->regmap, DW_DP_VINPUT_POLARITY_CTRL, value); 1266 + 1267 + /* Configure DW_DP_VIDEO_CONFIG1 register */ 1268 + hactive = mode->hdisplay; 1269 + hblank = mode->htotal - mode->hdisplay; 1270 + value = FIELD_PREP(HACTIVE, hactive) | FIELD_PREP(HBLANK, hblank); 1271 + if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1272 + value |= FIELD_PREP(I_P, 1); 1273 + vic = drm_match_cea_mode(mode); 1274 + if (vic == 5 || vic == 6 || vic == 7 || 1275 + vic == 10 || vic == 11 || vic == 20 || 1276 + vic == 21 || vic == 22 || vic == 39 || 1277 + vic == 25 || vic == 26 || vic == 40 || 1278 + vic == 44 || vic == 45 || vic == 46 || 1279 + vic == 50 || vic == 51 || vic == 54 || 1280 + vic == 55 || vic == 58 || vic == 59) 1281 + value |= R_V_BLANK_IN_OSC; 1282 + regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG1, value); 1283 + 1284 + /* Configure DW_DP_VIDEO_CONFIG2 register */ 1285 + vblank = mode->vtotal - mode->vdisplay; 1286 + vactive = mode->vdisplay; 1287 + regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG2, 1288 + FIELD_PREP(VBLANK, vblank) | FIELD_PREP(VACTIVE, vactive)); 1289 + 1290 + /* Configure DW_DP_VIDEO_CONFIG3 register */ 1291 + h_sync_width = mode->hsync_end - mode->hsync_start; 1292 + h_front_porch = mode->hsync_start - mode->hdisplay; 1293 + regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG3, 1294 + FIELD_PREP(H_SYNC_WIDTH, h_sync_width) | 1295 + FIELD_PREP(H_FRONT_PORCH, h_front_porch)); 1296 + 1297 + /* Configure DW_DP_VIDEO_CONFIG4 register */ 1298 + v_sync_width = mode->vsync_end - mode->vsync_start; 1299 + v_front_porch = mode->vsync_start - mode->vdisplay; 1300 + regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG4, 1301 + FIELD_PREP(V_SYNC_WIDTH, v_sync_width) | 1302 + FIELD_PREP(V_FRONT_PORCH, v_front_porch)); 1303 + 1304 + /* Configure DW_DP_VIDEO_CONFIG5 register */ 1305 + peak_stream_bandwidth = mode->clock * bpp / 8; 1306 + link_bandwidth = (link->rate / 1000) * link->lanes; 1307 + ts = peak_stream_bandwidth * 64 / link_bandwidth; 1308 + average_bytes_per_tu = ts / 1000; 1309 + average_bytes_per_tu_frac = ts / 100 - average_bytes_per_tu * 10; 1310 + if (dp->pixel_mode == DW_DP_MP_SINGLE_PIXEL) { 1311 + if (average_bytes_per_tu < 6) 1312 + init_threshold = 32; 1313 + else if (hblank <= 80 && color_format != DRM_COLOR_FORMAT_YCBCR420) 1314 + init_threshold = 12; 1315 + else if (hblank <= 40 && color_format == DRM_COLOR_FORMAT_YCBCR420) 1316 + init_threshold = 3; 1317 + else 1318 + init_threshold = 16; 1319 + } else { 1320 + u32 t1 = 0, t2 = 0, t3 = 0; 1321 + 1322 + switch (bpc) { 1323 + case 6: 1324 + t1 = (4 * 1000 / 9) * link->lanes; 1325 + break; 1326 + case 8: 1327 + if (color_format == DRM_COLOR_FORMAT_YCBCR422) { 1328 + t1 = (1000 / 2) * link->lanes; 1329 + } else { 1330 + if (dp->pixel_mode == DW_DP_MP_DUAL_PIXEL) 1331 + t1 = (1000 / 3) * link->lanes; 1332 + else 1333 + t1 = (3000 / 16) * link->lanes; 1334 + } 1335 + break; 1336 + case 10: 1337 + if (color_format == DRM_COLOR_FORMAT_YCBCR422) 1338 + t1 = (2000 / 5) * link->lanes; 1339 + else 1340 + t1 = (4000 / 15) * link->lanes; 1341 + break; 1342 + case 12: 1343 + if (color_format == DRM_COLOR_FORMAT_YCBCR422) { 1344 + if (dp->pixel_mode == DW_DP_MP_DUAL_PIXEL) 1345 + t1 = (1000 / 6) * link->lanes; 1346 + else 1347 + t1 = (1000 / 3) * link->lanes; 1348 + } else { 1349 + t1 = (2000 / 9) * link->lanes; 1350 + } 1351 + break; 1352 + case 16: 1353 + if (color_format != DRM_COLOR_FORMAT_YCBCR422 && 1354 + dp->pixel_mode == DW_DP_MP_DUAL_PIXEL) 1355 + t1 = (1000 / 6) * link->lanes; 1356 + else 1357 + t1 = (1000 / 4) * link->lanes; 1358 + break; 1359 + default: 1360 + return -EINVAL; 1361 + } 1362 + 1363 + if (color_format == DRM_COLOR_FORMAT_YCBCR420) 1364 + t2 = (link->rate / 4) * 1000 / (mode->clock / 2); 1365 + else 1366 + t2 = (link->rate / 4) * 1000 / mode->clock; 1367 + 1368 + if (average_bytes_per_tu_frac) 1369 + t3 = average_bytes_per_tu + 1; 1370 + else 1371 + t3 = average_bytes_per_tu; 1372 + init_threshold = t1 * t2 * t3 / (1000 * 1000); 1373 + if (init_threshold <= 16 || average_bytes_per_tu < 10) 1374 + init_threshold = 40; 1375 + } 1376 + 1377 + regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG5, 1378 + FIELD_PREP(INIT_THRESHOLD_HI, init_threshold >> 6) | 1379 + FIELD_PREP(AVERAGE_BYTES_PER_TU_FRAC, average_bytes_per_tu_frac) | 1380 + FIELD_PREP(INIT_THRESHOLD, init_threshold) | 1381 + FIELD_PREP(AVERAGE_BYTES_PER_TU, average_bytes_per_tu)); 1382 + 1383 + /* Configure DW_DP_VIDEO_HBLANK_INTERVAL register */ 1384 + hblank_interval = hblank * (link->rate / 4) / mode->clock; 1385 + regmap_write(dp->regmap, DW_DP_VIDEO_HBLANK_INTERVAL, 1386 + FIELD_PREP(HBLANK_INTERVAL_EN, 1) | 1387 + FIELD_PREP(HBLANK_INTERVAL, hblank_interval)); 1388 + 1389 + /* Video stream enable */ 1390 + regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_STREAM_ENABLE, 1391 + FIELD_PREP(VIDEO_STREAM_ENABLE, 1)); 1392 + 1393 + if (dw_dp_video_need_vsc_sdp(dp)) 1394 + dw_dp_send_vsc_sdp(dp); 1395 + 1396 + return 0; 1397 + } 1398 + 1399 + static void dw_dp_hpd_init(struct dw_dp *dp) 1400 + { 1401 + /* Enable all HPD interrupts */ 1402 + regmap_update_bits(dp->regmap, DW_DP_HPD_INTERRUPT_ENABLE, 1403 + HPD_UNPLUG_EN | HPD_PLUG_EN | HPD_IRQ_EN, 1404 + FIELD_PREP(HPD_UNPLUG_EN, 1) | 1405 + FIELD_PREP(HPD_PLUG_EN, 1) | 1406 + FIELD_PREP(HPD_IRQ_EN, 1)); 1407 + 1408 + /* Enable all top-level interrupts */ 1409 + regmap_update_bits(dp->regmap, DW_DP_GENERAL_INTERRUPT_ENABLE, 1410 + HPD_EVENT_EN, FIELD_PREP(HPD_EVENT_EN, 1)); 1411 + } 1412 + 1413 + static void dw_dp_aux_init(struct dw_dp *dp) 1414 + { 1415 + regmap_update_bits(dp->regmap, DW_DP_GENERAL_INTERRUPT_ENABLE, 1416 + AUX_REPLY_EVENT_EN, FIELD_PREP(AUX_REPLY_EVENT_EN, 1)); 1417 + } 1418 + 1419 + static void dw_dp_init_hw(struct dw_dp *dp) 1420 + { 1421 + regmap_update_bits(dp->regmap, DW_DP_CCTL, DEFAULT_FAST_LINK_TRAIN_EN, 1422 + FIELD_PREP(DEFAULT_FAST_LINK_TRAIN_EN, 0)); 1423 + 1424 + dw_dp_hpd_init(dp); 1425 + dw_dp_aux_init(dp); 1426 + } 1427 + 1428 + static int dw_dp_aux_write_data(struct dw_dp *dp, const u8 *buffer, size_t size) 1429 + { 1430 + size_t i, j; 1431 + 1432 + for (i = 0; i < DIV_ROUND_UP(size, 4); i++) { 1433 + size_t num = min_t(size_t, size - i * 4, 4); 1434 + u32 value = 0; 1435 + 1436 + for (j = 0; j < num; j++) 1437 + value |= buffer[i * 4 + j] << (j * 8); 1438 + 1439 + regmap_write(dp->regmap, DW_DP_AUX_DATA0 + i * 4, value); 1440 + } 1441 + 1442 + return size; 1443 + } 1444 + 1445 + static int dw_dp_aux_read_data(struct dw_dp *dp, u8 *buffer, size_t size) 1446 + { 1447 + size_t i, j; 1448 + 1449 + for (i = 0; i < DIV_ROUND_UP(size, 4); i++) { 1450 + size_t num = min_t(size_t, size - i * 4, 4); 1451 + u32 value; 1452 + 1453 + regmap_read(dp->regmap, DW_DP_AUX_DATA0 + i * 4, &value); 1454 + 1455 + for (j = 0; j < num; j++) 1456 + buffer[i * 4 + j] = value >> (j * 8); 1457 + } 1458 + 1459 + return size; 1460 + } 1461 + 1462 + static ssize_t dw_dp_aux_transfer(struct drm_dp_aux *aux, 1463 + struct drm_dp_aux_msg *msg) 1464 + { 1465 + struct dw_dp *dp = container_of(aux, struct dw_dp, aux); 1466 + unsigned long timeout = msecs_to_jiffies(10); 1467 + u32 status, value; 1468 + ssize_t ret = 0; 1469 + 1470 + if (WARN_ON(msg->size > 16)) 1471 + return -E2BIG; 1472 + 1473 + switch (msg->request & ~DP_AUX_I2C_MOT) { 1474 + case DP_AUX_NATIVE_WRITE: 1475 + case DP_AUX_I2C_WRITE: 1476 + case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1477 + ret = dw_dp_aux_write_data(dp, msg->buffer, msg->size); 1478 + if (ret < 0) 1479 + return ret; 1480 + break; 1481 + case DP_AUX_NATIVE_READ: 1482 + case DP_AUX_I2C_READ: 1483 + break; 1484 + default: 1485 + return -EINVAL; 1486 + } 1487 + 1488 + if (msg->size > 0) 1489 + value = FIELD_PREP(AUX_LEN_REQ, msg->size - 1); 1490 + else 1491 + value = FIELD_PREP(I2C_ADDR_ONLY, 1); 1492 + value |= FIELD_PREP(AUX_CMD_TYPE, msg->request); 1493 + value |= FIELD_PREP(AUX_ADDR, msg->address); 1494 + regmap_write(dp->regmap, DW_DP_AUX_CMD, value); 1495 + 1496 + status = wait_for_completion_timeout(&dp->complete, timeout); 1497 + if (!status) { 1498 + dev_err(dp->dev, "timeout waiting for AUX reply\n"); 1499 + return -ETIMEDOUT; 1500 + } 1501 + 1502 + regmap_read(dp->regmap, DW_DP_AUX_STATUS, &value); 1503 + if (value & AUX_TIMEOUT) 1504 + return -ETIMEDOUT; 1505 + 1506 + msg->reply = FIELD_GET(AUX_STATUS, value); 1507 + 1508 + if (msg->size > 0 && msg->reply == DP_AUX_NATIVE_REPLY_ACK) { 1509 + if (msg->request & DP_AUX_I2C_READ) { 1510 + size_t count = FIELD_GET(AUX_BYTES_READ, value) - 1; 1511 + 1512 + if (count != msg->size) 1513 + return -EBUSY; 1514 + 1515 + ret = dw_dp_aux_read_data(dp, msg->buffer, count); 1516 + if (ret < 0) 1517 + return ret; 1518 + } 1519 + } 1520 + 1521 + return ret; 1522 + } 1523 + 1524 + /* 1525 + * Limits for the video timing for DP: 1526 + * 1. the hfp should be 2 pixels aligned; 1527 + * 2. the minimum hsync should be 9 pixel; 1528 + * 3. the minimum hbp should be 16 pixel; 1529 + */ 1530 + static int dw_dp_bridge_atomic_check(struct drm_bridge *bridge, 1531 + struct drm_bridge_state *bridge_state, 1532 + struct drm_crtc_state *crtc_state, 1533 + struct drm_connector_state *conn_state) 1534 + { 1535 + struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 1536 + struct dw_dp *dp = bridge_to_dp(bridge); 1537 + struct dw_dp_bridge_state *state; 1538 + const struct dw_dp_output_format *fmt; 1539 + struct drm_display_mode *mode; 1540 + int min_hbp = 16; 1541 + int min_hsync = 9; 1542 + 1543 + state = to_dw_dp_bridge_state(bridge_state); 1544 + mode = &state->mode; 1545 + 1546 + fmt = dw_dp_get_output_format(bridge_state->output_bus_cfg.format); 1547 + if (!fmt) 1548 + return -EINVAL; 1549 + 1550 + state->video_mapping = fmt->video_mapping; 1551 + state->color_format = fmt->color_format; 1552 + state->bpc = fmt->bpc; 1553 + state->bpp = fmt->bpp; 1554 + 1555 + if ((adjusted_mode->hsync_start - adjusted_mode->hdisplay) & 0x1) { 1556 + adjusted_mode->hsync_start += 1; 1557 + dev_warn(dp->dev, "hfp is not 2 pixeel aligned, fixup to aligned hfp\n"); 1558 + } 1559 + 1560 + if (adjusted_mode->hsync_end - adjusted_mode->hsync_start < min_hsync) { 1561 + adjusted_mode->hsync_end = adjusted_mode->hsync_start + min_hsync; 1562 + dev_warn(dp->dev, "hsync is too narrow, fixup to min hsync:%d\n", min_hsync); 1563 + } 1564 + 1565 + if (adjusted_mode->htotal - adjusted_mode->hsync_end < min_hbp) { 1566 + adjusted_mode->htotal = adjusted_mode->hsync_end + min_hbp; 1567 + dev_warn(dp->dev, "hbp is too narrow, fixup to min hbp:%d\n", min_hbp); 1568 + } 1569 + 1570 + drm_mode_copy(mode, adjusted_mode); 1571 + 1572 + return 0; 1573 + } 1574 + 1575 + static enum drm_mode_status dw_dp_bridge_mode_valid(struct drm_bridge *bridge, 1576 + const struct drm_display_info *info, 1577 + const struct drm_display_mode *mode) 1578 + { 1579 + struct dw_dp *dp = bridge_to_dp(bridge); 1580 + struct dw_dp_link *link = &dp->link; 1581 + u32 min_bpp; 1582 + 1583 + if (info->color_formats & DRM_COLOR_FORMAT_YCBCR420 && 1584 + link->vsc_sdp_supported && 1585 + (drm_mode_is_420_only(info, mode) || drm_mode_is_420_also(info, mode))) 1586 + min_bpp = 12; 1587 + else if (info->color_formats & DRM_COLOR_FORMAT_YCBCR422) 1588 + min_bpp = 16; 1589 + else if (info->color_formats & DRM_COLOR_FORMAT_RGB444) 1590 + min_bpp = 18; 1591 + else 1592 + min_bpp = 24; 1593 + 1594 + if (!link->vsc_sdp_supported && 1595 + drm_mode_is_420_only(info, mode)) 1596 + return MODE_NO_420; 1597 + 1598 + if (!dw_dp_bandwidth_ok(dp, mode, min_bpp, link->lanes, link->rate)) 1599 + return MODE_CLOCK_HIGH; 1600 + 1601 + return MODE_OK; 1602 + } 1603 + 1604 + static bool dw_dp_needs_link_retrain(struct dw_dp *dp) 1605 + { 1606 + struct dw_dp_link *link = &dp->link; 1607 + u8 link_status[DP_LINK_STATUS_SIZE]; 1608 + 1609 + if (!dw_dp_link_train_valid(&link->train)) 1610 + return false; 1611 + 1612 + if (drm_dp_dpcd_read_link_status(&dp->aux, link_status) < 0) 1613 + return false; 1614 + 1615 + /* Retrain if Channel EQ or CR not ok */ 1616 + return !drm_dp_channel_eq_ok(link_status, dp->link.lanes); 1617 + } 1618 + 1619 + static void dw_dp_link_disable(struct dw_dp *dp) 1620 + { 1621 + struct dw_dp_link *link = &dp->link; 1622 + 1623 + if (dw_dp_hpd_detect(dp)) 1624 + drm_dp_link_power_down(&dp->aux, dp->link.revision); 1625 + 1626 + dw_dp_phy_xmit_enable(dp, 0); 1627 + 1628 + phy_power_off(dp->phy); 1629 + 1630 + link->train.clock_recovered = false; 1631 + link->train.channel_equalized = false; 1632 + } 1633 + 1634 + static int dw_dp_link_enable(struct dw_dp *dp) 1635 + { 1636 + int ret; 1637 + 1638 + ret = phy_power_on(dp->phy); 1639 + if (ret) 1640 + return ret; 1641 + 1642 + ret = drm_dp_link_power_up(&dp->aux, dp->link.revision); 1643 + if (ret < 0) 1644 + return ret; 1645 + 1646 + ret = dw_dp_link_train(dp); 1647 + 1648 + return ret; 1649 + } 1650 + 1651 + static void dw_dp_bridge_atomic_enable(struct drm_bridge *bridge, 1652 + struct drm_atomic_state *state) 1653 + { 1654 + struct dw_dp *dp = bridge_to_dp(bridge); 1655 + struct drm_connector *connector; 1656 + struct drm_connector_state *conn_state; 1657 + int ret; 1658 + 1659 + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); 1660 + if (!connector) { 1661 + dev_err(dp->dev, "failed to get connector\n"); 1662 + return; 1663 + } 1664 + 1665 + conn_state = drm_atomic_get_new_connector_state(state, connector); 1666 + if (!conn_state) { 1667 + dev_err(dp->dev, "failed to get connector state\n"); 1668 + return; 1669 + } 1670 + 1671 + set_bit(0, dp->sdp_reg_bank); 1672 + 1673 + ret = dw_dp_link_enable(dp); 1674 + if (ret < 0) { 1675 + dev_err(dp->dev, "failed to enable link: %d\n", ret); 1676 + return; 1677 + } 1678 + 1679 + ret = dw_dp_video_enable(dp); 1680 + if (ret < 0) { 1681 + dev_err(dp->dev, "failed to enable video: %d\n", ret); 1682 + return; 1683 + } 1684 + } 1685 + 1686 + static void dw_dp_reset(struct dw_dp *dp) 1687 + { 1688 + int val; 1689 + 1690 + disable_irq(dp->irq); 1691 + regmap_update_bits(dp->regmap, DW_DP_SOFT_RESET_CTRL, CONTROLLER_RESET, 1692 + FIELD_PREP(CONTROLLER_RESET, 1)); 1693 + usleep_range(10, 20); 1694 + regmap_update_bits(dp->regmap, DW_DP_SOFT_RESET_CTRL, CONTROLLER_RESET, 1695 + FIELD_PREP(CONTROLLER_RESET, 0)); 1696 + 1697 + dw_dp_init_hw(dp); 1698 + regmap_read_poll_timeout(dp->regmap, DW_DP_HPD_STATUS, val, 1699 + FIELD_GET(HPD_HOT_PLUG, val), 200, 200000); 1700 + regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_PLUG); 1701 + enable_irq(dp->irq); 1702 + } 1703 + 1704 + static void dw_dp_bridge_atomic_disable(struct drm_bridge *bridge, 1705 + struct drm_atomic_state *state) 1706 + { 1707 + struct dw_dp *dp = bridge_to_dp(bridge); 1708 + 1709 + dw_dp_video_disable(dp); 1710 + dw_dp_link_disable(dp); 1711 + bitmap_zero(dp->sdp_reg_bank, SDP_REG_BANK_SIZE); 1712 + dw_dp_reset(dp); 1713 + } 1714 + 1715 + static bool dw_dp_hpd_detect_link(struct dw_dp *dp, struct drm_connector *connector) 1716 + { 1717 + int ret; 1718 + 1719 + ret = phy_power_on(dp->phy); 1720 + if (ret < 0) 1721 + return false; 1722 + ret = dw_dp_link_parse(dp, connector); 1723 + phy_power_off(dp->phy); 1724 + 1725 + return !ret; 1726 + } 1727 + 1728 + static enum drm_connector_status dw_dp_bridge_detect(struct drm_bridge *bridge, 1729 + struct drm_connector *connector) 1730 + { 1731 + struct dw_dp *dp = bridge_to_dp(bridge); 1732 + 1733 + if (!dw_dp_hpd_detect(dp)) 1734 + return connector_status_disconnected; 1735 + 1736 + if (!dw_dp_hpd_detect_link(dp, connector)) 1737 + return connector_status_disconnected; 1738 + 1739 + return connector_status_connected; 1740 + } 1741 + 1742 + static const struct drm_edid *dw_dp_bridge_edid_read(struct drm_bridge *bridge, 1743 + struct drm_connector *connector) 1744 + { 1745 + struct dw_dp *dp = bridge_to_dp(bridge); 1746 + const struct drm_edid *edid; 1747 + int ret; 1748 + 1749 + ret = phy_power_on(dp->phy); 1750 + if (ret) 1751 + return NULL; 1752 + 1753 + edid = drm_edid_read_ddc(connector, &dp->aux.ddc); 1754 + 1755 + phy_power_off(dp->phy); 1756 + 1757 + return edid; 1758 + } 1759 + 1760 + static u32 *dw_dp_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, 1761 + struct drm_bridge_state *bridge_state, 1762 + struct drm_crtc_state *crtc_state, 1763 + struct drm_connector_state *conn_state, 1764 + unsigned int *num_output_fmts) 1765 + { 1766 + struct dw_dp *dp = bridge_to_dp(bridge); 1767 + struct dw_dp_link *link = &dp->link; 1768 + struct drm_display_info *di = &conn_state->connector->display_info; 1769 + struct drm_display_mode mode = crtc_state->mode; 1770 + const struct dw_dp_output_format *fmt; 1771 + u32 i, j = 0; 1772 + u32 *output_fmts; 1773 + 1774 + *num_output_fmts = 0; 1775 + 1776 + output_fmts = kcalloc(ARRAY_SIZE(dw_dp_output_formats), sizeof(*output_fmts), GFP_KERNEL); 1777 + if (!output_fmts) 1778 + return NULL; 1779 + 1780 + for (i = 0; i < ARRAY_SIZE(dw_dp_output_formats); i++) { 1781 + fmt = &dw_dp_output_formats[i]; 1782 + 1783 + if (fmt->bpc > conn_state->max_bpc) 1784 + continue; 1785 + 1786 + if (!(fmt->color_format & di->color_formats)) 1787 + continue; 1788 + 1789 + if (fmt->color_format == DRM_COLOR_FORMAT_YCBCR420 && 1790 + !link->vsc_sdp_supported) 1791 + continue; 1792 + 1793 + if (fmt->color_format != DRM_COLOR_FORMAT_YCBCR420 && 1794 + drm_mode_is_420_only(di, &mode)) 1795 + continue; 1796 + 1797 + if (!dw_dp_bandwidth_ok(dp, &mode, fmt->bpp, link->lanes, link->rate)) 1798 + continue; 1799 + 1800 + output_fmts[j++] = fmt->bus_format; 1801 + } 1802 + 1803 + *num_output_fmts = j; 1804 + 1805 + return output_fmts; 1806 + } 1807 + 1808 + static struct drm_bridge_state *dw_dp_bridge_atomic_duplicate_state(struct drm_bridge *bridge) 1809 + { 1810 + struct dw_dp_bridge_state *state; 1811 + 1812 + state = kzalloc(sizeof(*state), GFP_KERNEL); 1813 + if (!state) 1814 + return NULL; 1815 + 1816 + __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base); 1817 + 1818 + return &state->base; 1819 + } 1820 + 1821 + static const struct drm_bridge_funcs dw_dp_bridge_funcs = { 1822 + .atomic_duplicate_state = dw_dp_bridge_atomic_duplicate_state, 1823 + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, 1824 + .atomic_reset = drm_atomic_helper_bridge_reset, 1825 + .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt, 1826 + .atomic_get_output_bus_fmts = dw_dp_bridge_atomic_get_output_bus_fmts, 1827 + .atomic_check = dw_dp_bridge_atomic_check, 1828 + .mode_valid = dw_dp_bridge_mode_valid, 1829 + .atomic_enable = dw_dp_bridge_atomic_enable, 1830 + .atomic_disable = dw_dp_bridge_atomic_disable, 1831 + .detect = dw_dp_bridge_detect, 1832 + .edid_read = dw_dp_bridge_edid_read, 1833 + }; 1834 + 1835 + static int dw_dp_link_retrain(struct dw_dp *dp) 1836 + { 1837 + struct drm_device *dev = dp->bridge.dev; 1838 + struct drm_modeset_acquire_ctx ctx; 1839 + int ret; 1840 + 1841 + if (!dw_dp_needs_link_retrain(dp)) 1842 + return 0; 1843 + 1844 + dev_dbg(dp->dev, "Retraining link\n"); 1845 + 1846 + drm_modeset_acquire_init(&ctx, 0); 1847 + for (;;) { 1848 + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); 1849 + if (ret != -EDEADLK) 1850 + break; 1851 + 1852 + drm_modeset_backoff(&ctx); 1853 + } 1854 + 1855 + if (!ret) 1856 + ret = dw_dp_link_train(dp); 1857 + 1858 + drm_modeset_drop_locks(&ctx); 1859 + drm_modeset_acquire_fini(&ctx); 1860 + 1861 + return ret; 1862 + } 1863 + 1864 + static void dw_dp_hpd_work(struct work_struct *work) 1865 + { 1866 + struct dw_dp *dp = container_of(work, struct dw_dp, hpd_work); 1867 + bool long_hpd; 1868 + int ret; 1869 + 1870 + mutex_lock(&dp->irq_lock); 1871 + long_hpd = dp->hotplug.long_hpd; 1872 + mutex_unlock(&dp->irq_lock); 1873 + 1874 + dev_dbg(dp->dev, "[drm] Get hpd irq - %s\n", long_hpd ? "long" : "short"); 1875 + 1876 + if (!long_hpd) { 1877 + if (dw_dp_needs_link_retrain(dp)) { 1878 + ret = dw_dp_link_retrain(dp); 1879 + if (ret) 1880 + dev_warn(dp->dev, "Retrain link failed\n"); 1881 + } 1882 + } else { 1883 + drm_helper_hpd_irq_event(dp->bridge.dev); 1884 + } 1885 + } 1886 + 1887 + static void dw_dp_handle_hpd_event(struct dw_dp *dp) 1888 + { 1889 + u32 value; 1890 + 1891 + mutex_lock(&dp->irq_lock); 1892 + regmap_read(dp->regmap, DW_DP_HPD_STATUS, &value); 1893 + 1894 + if (value & HPD_IRQ) { 1895 + dev_dbg(dp->dev, "IRQ from the HPD\n"); 1896 + dp->hotplug.long_hpd = false; 1897 + regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_IRQ); 1898 + } 1899 + 1900 + if (value & HPD_HOT_PLUG) { 1901 + dev_dbg(dp->dev, "Hot plug detected\n"); 1902 + dp->hotplug.long_hpd = true; 1903 + regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_PLUG); 1904 + } 1905 + 1906 + if (value & HPD_HOT_UNPLUG) { 1907 + dev_dbg(dp->dev, "Unplug detected\n"); 1908 + dp->hotplug.long_hpd = true; 1909 + regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_UNPLUG); 1910 + } 1911 + mutex_unlock(&dp->irq_lock); 1912 + 1913 + schedule_work(&dp->hpd_work); 1914 + } 1915 + 1916 + static irqreturn_t dw_dp_irq(int irq, void *data) 1917 + { 1918 + struct dw_dp *dp = data; 1919 + u32 value; 1920 + 1921 + regmap_read(dp->regmap, DW_DP_GENERAL_INTERRUPT, &value); 1922 + if (!value) 1923 + return IRQ_NONE; 1924 + 1925 + if (value & HPD_EVENT) 1926 + dw_dp_handle_hpd_event(dp); 1927 + 1928 + if (value & AUX_REPLY_EVENT) { 1929 + regmap_write(dp->regmap, DW_DP_GENERAL_INTERRUPT, AUX_REPLY_EVENT); 1930 + complete(&dp->complete); 1931 + } 1932 + 1933 + return IRQ_HANDLED; 1934 + } 1935 + 1936 + static const struct regmap_range dw_dp_readable_ranges[] = { 1937 + regmap_reg_range(DW_DP_VERSION_NUMBER, DW_DP_ID), 1938 + regmap_reg_range(DW_DP_CONFIG_REG1, DW_DP_CONFIG_REG3), 1939 + regmap_reg_range(DW_DP_CCTL, DW_DP_SOFT_RESET_CTRL), 1940 + regmap_reg_range(DW_DP_VSAMPLE_CTRL, DW_DP_VIDEO_HBLANK_INTERVAL), 1941 + regmap_reg_range(DW_DP_AUD_CONFIG1, DW_DP_AUD_CONFIG1), 1942 + regmap_reg_range(DW_DP_SDP_VERTICAL_CTRL, DW_DP_SDP_STATUS_EN), 1943 + regmap_reg_range(DW_DP_PHYIF_CTRL, DW_DP_PHYIF_PWRDOWN_CTRL), 1944 + regmap_reg_range(DW_DP_AUX_CMD, DW_DP_AUX_DATA3), 1945 + regmap_reg_range(DW_DP_GENERAL_INTERRUPT, DW_DP_HPD_INTERRUPT_ENABLE), 1946 + }; 1947 + 1948 + static const struct regmap_access_table dw_dp_readable_table = { 1949 + .yes_ranges = dw_dp_readable_ranges, 1950 + .n_yes_ranges = ARRAY_SIZE(dw_dp_readable_ranges), 1951 + }; 1952 + 1953 + static const struct regmap_config dw_dp_regmap_config = { 1954 + .reg_bits = 32, 1955 + .reg_stride = 4, 1956 + .val_bits = 32, 1957 + .fast_io = true, 1958 + .max_register = DW_DP_MAX_REGISTER, 1959 + .rd_table = &dw_dp_readable_table, 1960 + }; 1961 + 1962 + static void dw_dp_phy_exit(void *data) 1963 + { 1964 + struct dw_dp *dp = data; 1965 + 1966 + phy_exit(dp->phy); 1967 + } 1968 + 1969 + struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder, 1970 + const struct dw_dp_plat_data *plat_data) 1971 + { 1972 + struct platform_device *pdev = to_platform_device(dev); 1973 + struct dw_dp *dp; 1974 + struct drm_bridge *bridge; 1975 + void __iomem *res; 1976 + int ret; 1977 + 1978 + dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 1979 + if (!dp) 1980 + return ERR_PTR(-ENOMEM); 1981 + 1982 + dp = devm_drm_bridge_alloc(dev, struct dw_dp, bridge, &dw_dp_bridge_funcs); 1983 + if (IS_ERR(dp)) 1984 + return ERR_CAST(dp); 1985 + 1986 + dp->dev = dev; 1987 + dp->pixel_mode = DW_DP_MP_QUAD_PIXEL; 1988 + 1989 + dp->plat_data.max_link_rate = plat_data->max_link_rate; 1990 + bridge = &dp->bridge; 1991 + mutex_init(&dp->irq_lock); 1992 + INIT_WORK(&dp->hpd_work, dw_dp_hpd_work); 1993 + init_completion(&dp->complete); 1994 + 1995 + res = devm_platform_ioremap_resource(pdev, 0); 1996 + if (IS_ERR(res)) 1997 + return ERR_CAST(res); 1998 + 1999 + dp->regmap = devm_regmap_init_mmio(dev, res, &dw_dp_regmap_config); 2000 + if (IS_ERR(dp->regmap)) { 2001 + dev_err_probe(dev, PTR_ERR(dp->regmap), "failed to create regmap\n"); 2002 + return ERR_CAST(dp->regmap); 2003 + } 2004 + 2005 + dp->phy = devm_of_phy_get(dev, dev->of_node, NULL); 2006 + if (IS_ERR(dp->phy)) { 2007 + dev_err_probe(dev, PTR_ERR(dp->phy), "failed to get phy\n"); 2008 + return ERR_CAST(dp->phy); 2009 + } 2010 + 2011 + dp->apb_clk = devm_clk_get_enabled(dev, "apb"); 2012 + if (IS_ERR(dp->apb_clk)) { 2013 + dev_err_probe(dev, PTR_ERR(dp->apb_clk), "failed to get apb clock\n"); 2014 + return ERR_CAST(dp->apb_clk); 2015 + } 2016 + 2017 + dp->aux_clk = devm_clk_get_enabled(dev, "aux"); 2018 + if (IS_ERR(dp->aux_clk)) { 2019 + dev_err_probe(dev, PTR_ERR(dp->aux_clk), "failed to get aux clock\n"); 2020 + return ERR_CAST(dp->aux_clk); 2021 + } 2022 + 2023 + dp->i2s_clk = devm_clk_get(dev, "i2s"); 2024 + if (IS_ERR(dp->i2s_clk)) { 2025 + dev_err_probe(dev, PTR_ERR(dp->i2s_clk), "failed to get i2s clock\n"); 2026 + return ERR_CAST(dp->i2s_clk); 2027 + } 2028 + 2029 + dp->spdif_clk = devm_clk_get(dev, "spdif"); 2030 + if (IS_ERR(dp->spdif_clk)) { 2031 + dev_err_probe(dev, PTR_ERR(dp->spdif_clk), "failed to get spdif clock\n"); 2032 + return ERR_CAST(dp->spdif_clk); 2033 + } 2034 + 2035 + dp->hdcp_clk = devm_clk_get(dev, "hdcp"); 2036 + if (IS_ERR(dp->hdcp_clk)) { 2037 + dev_err_probe(dev, PTR_ERR(dp->hdcp_clk), "failed to get hdcp clock\n"); 2038 + return ERR_CAST(dp->hdcp_clk); 2039 + } 2040 + 2041 + dp->rstc = devm_reset_control_get(dev, NULL); 2042 + if (IS_ERR(dp->rstc)) { 2043 + dev_err_probe(dev, PTR_ERR(dp->rstc), "failed to get reset control\n"); 2044 + return ERR_CAST(dp->rstc); 2045 + } 2046 + 2047 + bridge->of_node = dev->of_node; 2048 + bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; 2049 + bridge->type = DRM_MODE_CONNECTOR_DisplayPort; 2050 + bridge->ycbcr_420_allowed = true; 2051 + 2052 + dp->aux.dev = dev; 2053 + dp->aux.drm_dev = encoder->dev; 2054 + dp->aux.name = dev_name(dev); 2055 + dp->aux.transfer = dw_dp_aux_transfer; 2056 + ret = drm_dp_aux_register(&dp->aux); 2057 + if (ret) { 2058 + dev_err_probe(dev, ret, "Aux register failed\n"); 2059 + return ERR_PTR(ret); 2060 + } 2061 + 2062 + ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); 2063 + if (ret) 2064 + dev_err_probe(dev, ret, "Failed to attach bridge\n"); 2065 + 2066 + dw_dp_init_hw(dp); 2067 + 2068 + ret = phy_init(dp->phy); 2069 + if (ret) { 2070 + dev_err_probe(dev, ret, "phy init failed\n"); 2071 + return ERR_PTR(ret); 2072 + } 2073 + 2074 + ret = devm_add_action_or_reset(dev, dw_dp_phy_exit, dp); 2075 + if (ret) 2076 + return ERR_PTR(ret); 2077 + 2078 + dp->irq = platform_get_irq(pdev, 0); 2079 + if (dp->irq < 0) 2080 + return ERR_PTR(ret); 2081 + 2082 + ret = devm_request_threaded_irq(dev, dp->irq, NULL, dw_dp_irq, 2083 + IRQF_ONESHOT, dev_name(dev), dp); 2084 + if (ret) { 2085 + dev_err_probe(dev, ret, "failed to request irq\n"); 2086 + return ERR_PTR(ret); 2087 + } 2088 + 2089 + return dp; 2090 + } 2091 + EXPORT_SYMBOL_GPL(dw_dp_bind); 2092 + 2093 + MODULE_AUTHOR("Andy Yan <andyshrk@163.com>"); 2094 + MODULE_DESCRIPTION("DW DP Core Library"); 2095 + MODULE_LICENSE("GPL");
+2 -3
drivers/gpu/drm/display/drm_bridge_connector.c
··· 751 751 return ERR_PTR(-EINVAL); 752 752 } 753 753 754 - if (!drm_bridge_get_next_bridge(bridge)) 754 + if (drm_bridge_is_last(bridge)) 755 755 connector_type = bridge->type; 756 756 757 757 #ifdef CONFIG_OF 758 - if (!drm_bridge_get_next_bridge(bridge) && 759 - bridge->of_node) 758 + if (drm_bridge_is_last(bridge) && bridge->of_node) 760 759 connector->fwnode = fwnode_handle_get(of_fwnode_handle(bridge->of_node)); 761 760 #endif 762 761
+3
drivers/gpu/drm/drm_bridge.c
··· 1435 1435 unsigned int idx) 1436 1436 { 1437 1437 drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs); 1438 + 1439 + drm_printf(p, "\trefcount: %u\n", kref_read(&bridge->refcount)); 1440 + 1438 1441 drm_printf(p, "\ttype: [%d] %s\n", 1439 1442 bridge->type, 1440 1443 drm_get_connector_type_name(bridge->type));
+108
drivers/gpu/drm/drm_format_helper.c
··· 1256 1256 } 1257 1257 EXPORT_SYMBOL(drm_fb_blit); 1258 1258 1259 + static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels) 1260 + { 1261 + u8 *dbuf8 = dbuf; 1262 + const u8 *sbuf8 = sbuf; 1263 + u8 px; 1264 + 1265 + while (pixels) { 1266 + unsigned int i, bits = min(pixels, 4U); 1267 + u8 byte = 0; 1268 + 1269 + for (i = 0; i < bits; i++, pixels--) { 1270 + byte >>= 2; 1271 + px = (*sbuf8++ * 3 + 127) / 255; 1272 + byte |= (px &= 0x03) << 6; 1273 + } 1274 + *dbuf8++ = byte; 1275 + } 1276 + } 1277 + 1259 1278 static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int pixels) 1260 1279 { 1261 1280 u8 *dbuf8 = dbuf; ··· 1381 1362 } 1382 1363 } 1383 1364 EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono); 1365 + 1366 + /** 1367 + * drm_fb_xrgb8888_to_gray2 - Convert XRGB8888 to gray2 1368 + * @dst: Array of gray2 destination buffer 1369 + * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines 1370 + * within @dst; can be NULL if scanlines are stored next to each other. 1371 + * @src: Array of XRGB8888 source buffers 1372 + * @fb: DRM framebuffer 1373 + * @clip: Clip rectangle area to copy 1374 + * @state: Transform and conversion state 1375 + * 1376 + * This function copies parts of a framebuffer to display memory and converts the 1377 + * color format during the process. Destination and framebuffer formats must match. The 1378 + * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at 1379 + * least as many entries as there are planes in @fb's format. Each entry stores the 1380 + * value for the format's respective color plane at the same index. 1381 + * 1382 + * This function does not apply clipping on @dst (i.e. the destination is at the 1383 + * top-left corner). The first pixel (upper left corner of the clip rectangle) will 1384 + * be converted and copied to the two first bits (LSB) in the first byte of the gray2 1385 + * destination buffer. If the caller requires that the first pixel in a byte must 1386 + * be located at an x-coordinate that is a multiple of 8, then the caller must take 1387 + * care itself of supplying a suitable clip rectangle. 1388 + * 1389 + * DRM doesn't have native gray2 support. Drivers can use this function for 1390 + * gray2 devices that don't support XRGB8888 natively. Such drivers can 1391 + * announce the commonly supported XR24 format to userspace and use this function 1392 + * to convert to the native format. 1393 + * 1394 + */ 1395 + void drm_fb_xrgb8888_to_gray2(struct iosys_map *dst, const unsigned int *dst_pitch, 1396 + const struct iosys_map *src, const struct drm_framebuffer *fb, 1397 + const struct drm_rect *clip, struct drm_format_conv_state *state) 1398 + { 1399 + static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = { 1400 + 0, 0, 0, 0 1401 + }; 1402 + unsigned int linepixels = drm_rect_width(clip); 1403 + unsigned int lines = drm_rect_height(clip); 1404 + unsigned int cpp = fb->format->cpp[0]; 1405 + unsigned int len_src32 = linepixels * cpp; 1406 + struct drm_device *dev = fb->dev; 1407 + void *vaddr = src[0].vaddr; 1408 + unsigned int dst_pitch_0; 1409 + unsigned int y; 1410 + u8 *gray2 = dst[0].vaddr, *gray8; 1411 + u32 *src32; 1412 + 1413 + if (drm_WARN_ON(dev, fb->format->format != DRM_FORMAT_XRGB8888)) 1414 + return; 1415 + 1416 + if (!dst_pitch) 1417 + dst_pitch = default_dst_pitch; 1418 + dst_pitch_0 = dst_pitch[0]; 1419 + 1420 + /* 1421 + * The gray2 destination buffer contains 2 bit per pixel 1422 + */ 1423 + if (!dst_pitch_0) 1424 + dst_pitch_0 = DIV_ROUND_UP(linepixels, 4); 1425 + 1426 + /* 1427 + * The dma memory is write-combined so reads are uncached. 1428 + * Speed up by fetching one line at a time. 1429 + * 1430 + * Also, format conversion from XR24 to gray2 are done 1431 + * line-by-line but are converted to 8-bit grayscale as an 1432 + * intermediate step. 1433 + * 1434 + * Allocate a buffer to be used for both copying from the cma 1435 + * memory and to store the intermediate grayscale line pixels. 1436 + */ 1437 + src32 = drm_format_conv_state_reserve(state, len_src32 + linepixels, GFP_KERNEL); 1438 + if (!src32) 1439 + return; 1440 + 1441 + gray8 = (u8 *)src32 + len_src32; 1442 + 1443 + vaddr += clip_offset(clip, fb->pitches[0], cpp); 1444 + for (y = 0; y < lines; y++) { 1445 + src32 = memcpy(src32, vaddr, len_src32); 1446 + drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels); 1447 + drm_fb_gray8_to_gray2_line(gray2, gray8, linepixels); 1448 + vaddr += fb->pitches[0]; 1449 + gray2 += dst_pitch_0; 1450 + } 1451 + } 1452 + EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray2); 1453 +
+2
drivers/gpu/drm/drm_gem.c
··· 187 187 kref_init(&obj->refcount); 188 188 obj->handle_count = 0; 189 189 obj->size = size; 190 + mutex_init(&obj->gpuva.lock); 190 191 dma_resv_init(&obj->_resv); 191 192 if (!obj->resv) 192 193 obj->resv = &obj->_resv; ··· 211 210 WARN_ON(obj->dma_buf); 212 211 213 212 dma_resv_fini(&obj->_resv); 213 + mutex_destroy(&obj->gpuva.lock); 214 214 } 215 215 EXPORT_SYMBOL(drm_gem_private_object_fini); 216 216
+14 -16
drivers/gpu/drm/drm_gpuvm.c
··· 497 497 * DRM GPUVM also does not take care of the locking of the backing 498 498 * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by 499 499 * itself; drivers are responsible to enforce mutual exclusion using either the 500 - * GEMs dma_resv lock or alternatively a driver specific external lock. For the 501 - * latter see also drm_gem_gpuva_set_lock(). 500 + * GEMs dma_resv lock or the GEMs gpuva.lock mutex. 502 501 * 503 502 * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold 504 503 * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed ··· 1581 1582 drm_gpuvm_bo_list_del(vm_bo, extobj, lock); 1582 1583 drm_gpuvm_bo_list_del(vm_bo, evict, lock); 1583 1584 1584 - drm_gem_gpuva_assert_lock_held(obj); 1585 + drm_gem_gpuva_assert_lock_held(gpuvm, obj); 1585 1586 list_del(&vm_bo->list.entry.gem); 1586 1587 1587 1588 if (ops && ops->vm_bo_free) ··· 1602 1603 * If the reference count drops to zero, the &gpuvm_bo is destroyed, which 1603 1604 * includes removing it from the GEMs gpuva list. Hence, if a call to this 1604 1605 * function can potentially let the reference count drop to zero the caller must 1605 - * hold the dma-resv or driver specific GEM gpuva lock. 1606 + * hold the lock that the GEM uses for its gpuva list (either the GEM's 1607 + * dma-resv or gpuva.lock mutex). 1606 1608 * 1607 1609 * This function may only be called from non-atomic context. 1608 1610 * ··· 1627 1627 { 1628 1628 struct drm_gpuvm_bo *vm_bo; 1629 1629 1630 - drm_gem_gpuva_assert_lock_held(obj); 1630 + drm_gem_gpuva_assert_lock_held(gpuvm, obj); 1631 1631 drm_gem_for_each_gpuvm_bo(vm_bo, obj) 1632 1632 if (vm_bo->vm == gpuvm) 1633 1633 return vm_bo; ··· 1686 1686 if (!vm_bo) 1687 1687 return ERR_PTR(-ENOMEM); 1688 1688 1689 - drm_gem_gpuva_assert_lock_held(obj); 1689 + drm_gem_gpuva_assert_lock_held(gpuvm, obj); 1690 1690 list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list); 1691 1691 1692 1692 return vm_bo; ··· 1722 1722 return vm_bo; 1723 1723 } 1724 1724 1725 - drm_gem_gpuva_assert_lock_held(obj); 1725 + drm_gem_gpuva_assert_lock_held(gpuvm, obj); 1726 1726 list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list); 1727 1727 1728 1728 return __vm_bo; ··· 1894 1894 * reference of the latter is taken. 1895 1895 * 1896 1896 * This function expects the caller to protect the GEM's GPUVA list against 1897 - * concurrent access using either the GEMs dma_resv lock or a driver specific 1898 - * lock set through drm_gem_gpuva_set_lock(). 1897 + * concurrent access using either the GEM's dma-resv or gpuva.lock mutex. 1899 1898 */ 1900 1899 void 1901 1900 drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo) ··· 1909 1910 1910 1911 va->vm_bo = drm_gpuvm_bo_get(vm_bo); 1911 1912 1912 - drm_gem_gpuva_assert_lock_held(obj); 1913 + drm_gem_gpuva_assert_lock_held(gpuvm, obj); 1913 1914 list_add_tail(&va->gem.entry, &vm_bo->list.gpuva); 1914 1915 } 1915 1916 EXPORT_SYMBOL_GPL(drm_gpuva_link); ··· 1929 1930 * the latter is dropped. 1930 1931 * 1931 1932 * This function expects the caller to protect the GEM's GPUVA list against 1932 - * concurrent access using either the GEMs dma_resv lock or a driver specific 1933 - * lock set through drm_gem_gpuva_set_lock(). 1933 + * concurrent access using either the GEM's dma-resv or gpuva.lock mutex. 1934 1934 */ 1935 1935 void 1936 1936 drm_gpuva_unlink(struct drm_gpuva *va) ··· 1940 1942 if (unlikely(!obj)) 1941 1943 return; 1942 1944 1943 - drm_gem_gpuva_assert_lock_held(obj); 1945 + drm_gem_gpuva_assert_lock_held(va->vm, obj); 1944 1946 list_del_init(&va->gem.entry); 1945 1947 1946 1948 va->vm_bo = NULL; ··· 2941 2943 * After the caller finished processing the returned &drm_gpuva_ops, they must 2942 2944 * be freed with &drm_gpuva_ops_free. 2943 2945 * 2944 - * It is the callers responsibility to protect the GEMs GPUVA list against 2945 - * concurrent access using the GEMs dma_resv lock. 2946 + * This function expects the caller to protect the GEM's GPUVA list against 2947 + * concurrent access using either the GEM's dma-resv or gpuva.lock mutex. 2946 2948 * 2947 2949 * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure 2948 2950 */ ··· 2954 2956 struct drm_gpuva *va; 2955 2957 int ret; 2956 2958 2957 - drm_gem_gpuva_assert_lock_held(vm_bo->obj); 2959 + drm_gem_gpuva_assert_lock_held(vm_bo->vm, vm_bo->obj); 2958 2960 2959 2961 ops = kzalloc(sizeof(*ops), GFP_KERNEL); 2960 2962 if (!ops)
+88 -27
drivers/gpu/drm/drm_panel_backlight_quirks.c
··· 8 8 #include <drm/drm_edid.h> 9 9 #include <drm/drm_utils.h> 10 10 11 - struct drm_panel_min_backlight_quirk { 12 - struct { 13 - enum dmi_field field; 14 - const char * const value; 15 - } dmi_match; 16 - struct drm_edid_ident ident; 17 - u8 min_brightness; 11 + struct drm_panel_match { 12 + enum dmi_field field; 13 + const char * const value; 18 14 }; 19 15 20 - static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks[] = { 16 + struct drm_get_panel_backlight_quirk { 17 + struct drm_panel_match dmi_match; 18 + struct drm_panel_match dmi_match_other; 19 + struct drm_edid_ident ident; 20 + struct drm_panel_backlight_quirk quirk; 21 + }; 22 + 23 + static const struct drm_get_panel_backlight_quirk drm_panel_min_backlight_quirks[] = { 21 24 /* 13 inch matte panel */ 22 25 { 23 26 .dmi_match.field = DMI_BOARD_VENDOR, 24 27 .dmi_match.value = "Framework", 25 28 .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0bca), 26 29 .ident.name = "NE135FBM-N41", 27 - .min_brightness = 0, 30 + .quirk = { .min_brightness = 1, }, 28 31 }, 29 32 /* 13 inch glossy panel */ 30 33 { ··· 35 32 .dmi_match.value = "Framework", 36 33 .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x095f), 37 34 .ident.name = "NE135FBM-N41", 38 - .min_brightness = 0, 35 + .quirk = { .min_brightness = 1, }, 39 36 }, 40 37 /* 13 inch 2.8k panel */ 41 38 { ··· 43 40 .dmi_match.value = "Framework", 44 41 .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0cb4), 45 42 .ident.name = "NE135A1M-NY1", 46 - .min_brightness = 0, 43 + .quirk = { .min_brightness = 1, }, 44 + }, 45 + /* Steam Deck models */ 46 + { 47 + .dmi_match.field = DMI_SYS_VENDOR, 48 + .dmi_match.value = "Valve", 49 + .dmi_match_other.field = DMI_PRODUCT_NAME, 50 + .dmi_match_other.value = "Jupiter", 51 + .quirk = { .min_brightness = 1, }, 52 + }, 53 + { 54 + .dmi_match.field = DMI_SYS_VENDOR, 55 + .dmi_match.value = "Valve", 56 + .dmi_match_other.field = DMI_PRODUCT_NAME, 57 + .dmi_match_other.value = "Galileo", 58 + .quirk = { .min_brightness = 1, }, 59 + }, 60 + /* Have OLED Panels with brightness issue when last byte is 0/1 */ 61 + { 62 + .dmi_match.field = DMI_SYS_VENDOR, 63 + .dmi_match.value = "AYANEO", 64 + .dmi_match_other.field = DMI_PRODUCT_NAME, 65 + .dmi_match_other.value = "AYANEO 3", 66 + .quirk = { .brightness_mask = 3, }, 67 + }, 68 + { 69 + .dmi_match.field = DMI_SYS_VENDOR, 70 + .dmi_match.value = "ZOTAC", 71 + .dmi_match_other.field = DMI_BOARD_NAME, 72 + .dmi_match_other.value = "G0A1W", 73 + .quirk = { .brightness_mask = 3, }, 74 + }, 75 + { 76 + .dmi_match.field = DMI_SYS_VENDOR, 77 + .dmi_match.value = "ZOTAC", 78 + .dmi_match_other.field = DMI_BOARD_NAME, 79 + .dmi_match_other.value = "G1A1W", 80 + .quirk = { .brightness_mask = 3, }, 81 + }, 82 + { 83 + .dmi_match.field = DMI_SYS_VENDOR, 84 + .dmi_match.value = "ONE-NETBOOK", 85 + .dmi_match_other.field = DMI_PRODUCT_NAME, 86 + .dmi_match_other.value = "ONEXPLAYER F1Pro", 87 + .quirk = { .brightness_mask = 3, }, 88 + }, 89 + { 90 + .dmi_match.field = DMI_SYS_VENDOR, 91 + .dmi_match.value = "ONE-NETBOOK", 92 + .dmi_match_other.field = DMI_PRODUCT_NAME, 93 + .dmi_match_other.value = "ONEXPLAYER F1 EVA-02", 94 + .quirk = { .brightness_mask = 3, }, 47 95 }, 48 96 }; 49 97 50 - static bool drm_panel_min_backlight_quirk_matches(const struct drm_panel_min_backlight_quirk *quirk, 51 - const struct drm_edid *edid) 98 + static bool drm_panel_min_backlight_quirk_matches( 99 + const struct drm_get_panel_backlight_quirk *quirk, 100 + const struct drm_edid *edid) 52 101 { 53 - if (!dmi_match(quirk->dmi_match.field, quirk->dmi_match.value)) 102 + if (quirk->dmi_match.field && 103 + !dmi_match(quirk->dmi_match.field, quirk->dmi_match.value)) 54 104 return false; 55 105 56 - if (!drm_edid_match(edid, &quirk->ident)) 106 + if (quirk->dmi_match_other.field && 107 + !dmi_match(quirk->dmi_match_other.field, 108 + quirk->dmi_match_other.value)) 109 + return false; 110 + 111 + if (quirk->ident.panel_id && !drm_edid_match(edid, &quirk->ident)) 57 112 return false; 58 113 59 114 return true; 60 115 } 61 116 62 117 /** 63 - * drm_get_panel_min_brightness_quirk - Get minimum supported brightness level for a panel. 118 + * drm_get_panel_backlight_quirk - Get backlight quirks for a panel 64 119 * @edid: EDID of the panel to check 65 120 * 66 121 * This function checks for platform specific (e.g. DMI based) quirks 67 122 * providing info on the minimum backlight brightness for systems where this 68 - * cannot be probed correctly from the hard-/firm-ware. 123 + * cannot be probed correctly from the hard-/firm-ware and other sources. 69 124 * 70 125 * Returns: 71 - * A negative error value or 72 - * an override value in the range [0, 255] representing 0-100% to be scaled to 73 - * the drivers target range. 126 + * a drm_panel_backlight_quirk struct if a quirk was found, otherwise an 127 + * error pointer. 74 128 */ 75 - int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid) 129 + const struct drm_panel_backlight_quirk * 130 + drm_get_panel_backlight_quirk(const struct drm_edid *edid) 76 131 { 77 - const struct drm_panel_min_backlight_quirk *quirk; 132 + const struct drm_get_panel_backlight_quirk *quirk; 78 133 size_t i; 79 134 80 135 if (!IS_ENABLED(CONFIG_DMI)) 81 - return -ENODATA; 136 + return ERR_PTR(-ENODATA); 82 137 83 138 if (!edid) 84 - return -EINVAL; 139 + return ERR_PTR(-EINVAL); 85 140 86 141 for (i = 0; i < ARRAY_SIZE(drm_panel_min_backlight_quirks); i++) { 87 142 quirk = &drm_panel_min_backlight_quirks[i]; 88 143 89 144 if (drm_panel_min_backlight_quirk_matches(quirk, edid)) 90 - return quirk->min_brightness; 145 + return &quirk->quirk; 91 146 } 92 147 93 - return -ENODATA; 148 + return ERR_PTR(-ENODATA); 94 149 } 95 - EXPORT_SYMBOL(drm_get_panel_min_brightness_quirk); 150 + EXPORT_SYMBOL(drm_get_panel_backlight_quirk); 96 151 97 152 MODULE_DESCRIPTION("Quirks for panel backlight overrides"); 98 153 MODULE_LICENSE("GPL");
+10 -15
drivers/gpu/drm/gud/gud_connector.c
··· 16 16 #include <drm/drm_modeset_helper_vtables.h> 17 17 #include <drm/drm_print.h> 18 18 #include <drm/drm_probe_helper.h> 19 - #include <drm/drm_simple_kms_helper.h> 20 19 #include <drm/gud.h> 21 20 22 21 #include "gud_internal.h" ··· 606 607 return gconn->num_properties; 607 608 } 608 609 610 + static const struct drm_encoder_funcs gud_drm_simple_encoder_funcs_cleanup = { 611 + .destroy = drm_encoder_cleanup, 612 + }; 613 + 609 614 static int gud_connector_create(struct gud_device *gdrm, unsigned int index, 610 615 struct gud_connector_descriptor_req *desc) 611 616 { 612 617 struct drm_device *drm = &gdrm->drm; 613 618 struct gud_connector *gconn; 614 619 struct drm_connector *connector; 615 - struct drm_encoder *encoder; 616 620 int ret, connector_type; 617 621 u32 flags; 618 622 ··· 683 681 return ret; 684 682 } 685 683 686 - /* The first connector is attached to the existing simple pipe encoder */ 687 - if (!connector->index) { 688 - encoder = &gdrm->pipe.encoder; 689 - } else { 690 - encoder = &gconn->encoder; 684 + gconn->encoder.possible_crtcs = drm_crtc_mask(&gdrm->crtc); 685 + ret = drm_encoder_init(drm, &gconn->encoder, &gud_drm_simple_encoder_funcs_cleanup, 686 + DRM_MODE_ENCODER_NONE, NULL); 687 + if (ret) 688 + return ret; 691 689 692 - ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE); 693 - if (ret) 694 - return ret; 695 - 696 - encoder->possible_crtcs = 1; 697 - } 698 - 699 - return drm_connector_attach_encoder(connector, encoder); 690 + return drm_connector_attach_encoder(connector, &gconn->encoder); 700 691 } 701 692 702 693 int gud_get_connectors(struct gud_device *gdrm)
+41 -11
drivers/gpu/drm/gud/gud_drv.c
··· 16 16 #include <drm/clients/drm_client_setup.h> 17 17 #include <drm/drm_atomic_helper.h> 18 18 #include <drm/drm_blend.h> 19 + #include <drm/drm_crtc_helper.h> 19 20 #include <drm/drm_damage_helper.h> 20 21 #include <drm/drm_debugfs.h> 21 22 #include <drm/drm_drv.h> ··· 28 27 #include <drm/drm_managed.h> 29 28 #include <drm/drm_print.h> 30 29 #include <drm/drm_probe_helper.h> 31 - #include <drm/drm_simple_kms_helper.h> 32 30 #include <drm/gud.h> 33 31 34 32 #include "gud_internal.h" ··· 289 289 * but mask out any additions on future devices. 290 290 */ 291 291 val &= GUD_ROTATION_MASK; 292 - ret = drm_plane_create_rotation_property(&gdrm->pipe.plane, 292 + ret = drm_plane_create_rotation_property(&gdrm->plane, 293 293 DRM_MODE_ROTATE_0, val); 294 294 break; 295 295 default: ··· 338 338 return 0; 339 339 } 340 340 341 - static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = { 342 - .check = gud_pipe_check, 343 - .update = gud_pipe_update, 344 - DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS 341 + static const struct drm_crtc_helper_funcs gud_crtc_helper_funcs = { 342 + .atomic_check = drm_crtc_helper_atomic_check 343 + }; 344 + 345 + static const struct drm_crtc_funcs gud_crtc_funcs = { 346 + .reset = drm_atomic_helper_crtc_reset, 347 + .destroy = drm_crtc_cleanup, 348 + .set_config = drm_atomic_helper_set_config, 349 + .page_flip = drm_atomic_helper_page_flip, 350 + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 351 + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 352 + }; 353 + 354 + static const struct drm_plane_helper_funcs gud_plane_helper_funcs = { 355 + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, 356 + .atomic_check = gud_plane_atomic_check, 357 + .atomic_update = gud_plane_atomic_update, 358 + }; 359 + 360 + static const struct drm_plane_funcs gud_plane_funcs = { 361 + .update_plane = drm_atomic_helper_update_plane, 362 + .disable_plane = drm_atomic_helper_disable_plane, 363 + .destroy = drm_plane_cleanup, 364 + DRM_GEM_SHADOW_PLANE_FUNCS, 345 365 }; 346 366 347 367 static const struct drm_mode_config_funcs gud_mode_config_funcs = { ··· 370 350 .atomic_commit = drm_atomic_helper_commit, 371 351 }; 372 352 373 - static const u64 gud_pipe_modifiers[] = { 353 + static const u64 gud_plane_modifiers[] = { 374 354 DRM_FORMAT_MOD_LINEAR, 375 355 DRM_FORMAT_MOD_INVALID 376 356 }; ··· 587 567 return -ENOMEM; 588 568 } 589 569 590 - ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs, 591 - formats, num_formats, 592 - gud_pipe_modifiers, NULL); 570 + ret = drm_universal_plane_init(drm, &gdrm->plane, 0, 571 + &gud_plane_funcs, 572 + formats, num_formats, 573 + gud_plane_modifiers, 574 + DRM_PLANE_TYPE_PRIMARY, NULL); 593 575 if (ret) 594 576 return ret; 577 + 578 + drm_plane_helper_add(&gdrm->plane, &gud_plane_helper_funcs); 579 + drm_plane_enable_fb_damage_clips(&gdrm->plane); 595 580 596 581 devm_kfree(dev, formats); 597 582 devm_kfree(dev, formats_dev); ··· 607 582 return ret; 608 583 } 609 584 610 - drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane); 585 + ret = drm_crtc_init_with_planes(drm, &gdrm->crtc, &gdrm->plane, NULL, 586 + &gud_crtc_funcs, NULL); 587 + if (ret) 588 + return ret; 589 + 590 + drm_crtc_helper_add(&gdrm->crtc, &gud_crtc_helper_funcs); 611 591 612 592 ret = gud_get_connectors(gdrm); 613 593 if (ret) {
+6 -7
drivers/gpu/drm/gud/gud_internal.h
··· 11 11 #include <uapi/drm/drm_fourcc.h> 12 12 13 13 #include <drm/drm_modes.h> 14 - #include <drm/drm_simple_kms_helper.h> 15 14 16 15 struct gud_device { 17 16 struct drm_device drm; 18 - struct drm_simple_display_pipe pipe; 17 + struct drm_plane plane; 18 + struct drm_crtc crtc; 19 19 struct work_struct work; 20 20 u32 flags; 21 21 const struct drm_format_info *xrgb8888_emulation_format; ··· 62 62 63 63 void gud_clear_damage(struct gud_device *gdrm); 64 64 void gud_flush_work(struct work_struct *work); 65 - int gud_pipe_check(struct drm_simple_display_pipe *pipe, 66 - struct drm_plane_state *new_plane_state, 67 - struct drm_crtc_state *new_crtc_state); 68 - void gud_pipe_update(struct drm_simple_display_pipe *pipe, 69 - struct drm_plane_state *old_state); 65 + int gud_plane_atomic_check(struct drm_plane *plane, 66 + struct drm_atomic_state *state); 67 + void gud_plane_atomic_update(struct drm_plane *plane, 68 + struct drm_atomic_state *atomic_state); 70 69 int gud_connector_fill_properties(struct drm_connector_state *connector_state, 71 70 struct gud_property_req *properties); 72 71 int gud_get_connectors(struct gud_device *gdrm);
+42 -22
drivers/gpu/drm/gud/gud_pipe.c
··· 20 20 #include <drm/drm_gem_framebuffer_helper.h> 21 21 #include <drm/drm_print.h> 22 22 #include <drm/drm_rect.h> 23 - #include <drm/drm_simple_kms_helper.h> 24 23 #include <drm/gud.h> 25 24 26 25 #include "gud_internal.h" ··· 450 451 gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage); 451 452 } 452 453 453 - int gud_pipe_check(struct drm_simple_display_pipe *pipe, 454 - struct drm_plane_state *new_plane_state, 455 - struct drm_crtc_state *new_crtc_state) 454 + int gud_plane_atomic_check(struct drm_plane *plane, 455 + struct drm_atomic_state *state) 456 456 { 457 - struct gud_device *gdrm = to_gud_device(pipe->crtc.dev); 458 - struct drm_plane_state *old_plane_state = pipe->plane.state; 459 - const struct drm_display_mode *mode = &new_crtc_state->mode; 460 - struct drm_atomic_state *state = new_plane_state->state; 457 + struct gud_device *gdrm = to_gud_device(plane->dev); 458 + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); 459 + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); 460 + struct drm_crtc *crtc = new_plane_state->crtc; 461 + struct drm_crtc_state *crtc_state; 462 + const struct drm_display_mode *mode; 461 463 struct drm_framebuffer *old_fb = old_plane_state->fb; 462 464 struct drm_connector_state *connector_state = NULL; 463 465 struct drm_framebuffer *fb = new_plane_state->fb; ··· 469 469 int idx, ret; 470 470 size_t len; 471 471 472 - if (WARN_ON_ONCE(!fb)) 472 + if (drm_WARN_ON_ONCE(plane->dev, !fb)) 473 473 return -EINVAL; 474 474 475 + if (drm_WARN_ON_ONCE(plane->dev, !crtc)) 476 + return -EINVAL; 477 + 478 + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 479 + 480 + mode = &crtc_state->mode; 481 + 482 + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 483 + DRM_PLANE_NO_SCALING, 484 + DRM_PLANE_NO_SCALING, 485 + false, false); 486 + if (ret) 487 + return ret; 488 + 489 + if (!new_plane_state->visible) 490 + return 0; 491 + 475 492 if (old_plane_state->rotation != new_plane_state->rotation) 476 - new_crtc_state->mode_changed = true; 493 + crtc_state->mode_changed = true; 477 494 478 495 if (old_fb && old_fb->format != format) 479 - new_crtc_state->mode_changed = true; 496 + crtc_state->mode_changed = true; 480 497 481 - if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed) 498 + if (!crtc_state->mode_changed && !crtc_state->connectors_changed) 482 499 return 0; 483 500 484 501 /* Only one connector is supported */ 485 - if (hweight32(new_crtc_state->connector_mask) != 1) 502 + if (hweight32(crtc_state->connector_mask) != 1) 486 503 return -EINVAL; 487 504 488 505 if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format) ··· 517 500 if (!connector_state) { 518 501 struct drm_connector_list_iter conn_iter; 519 502 520 - drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter); 503 + drm_connector_list_iter_begin(plane->dev, &conn_iter); 521 504 drm_for_each_connector_iter(connector, &conn_iter) { 522 505 if (connector->state->crtc) { 523 506 connector_state = connector->state; ··· 584 567 return ret; 585 568 } 586 569 587 - void gud_pipe_update(struct drm_simple_display_pipe *pipe, 588 - struct drm_plane_state *old_state) 570 + void gud_plane_atomic_update(struct drm_plane *plane, 571 + struct drm_atomic_state *atomic_state) 589 572 { 590 - struct drm_device *drm = pipe->crtc.dev; 573 + struct drm_device *drm = plane->dev; 591 574 struct gud_device *gdrm = to_gud_device(drm); 592 - struct drm_plane_state *state = pipe->plane.state; 593 - struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); 594 - struct drm_framebuffer *fb = state->fb; 595 - struct drm_crtc *crtc = &pipe->crtc; 575 + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(atomic_state, plane); 576 + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(atomic_state, plane); 577 + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state); 578 + struct drm_framebuffer *fb = new_state->fb; 579 + struct drm_crtc *crtc = new_state->crtc; 596 580 struct drm_rect damage; 581 + struct drm_atomic_helper_damage_iter iter; 597 582 int ret, idx; 598 583 599 584 if (crtc->state->mode_changed || !crtc->state->enable) { ··· 630 611 if (ret) 631 612 goto ctrl_disable; 632 613 633 - if (drm_atomic_helper_damage_merged(old_state, state, &damage)) 614 + drm_atomic_helper_damage_iter_init(&iter, old_state, new_state); 615 + drm_atomic_for_each_plane_damage(&iter, &damage) 634 616 gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage); 635 617 636 618 drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+2 -1
drivers/gpu/drm/imagination/Kconfig
··· 3 3 4 4 config DRM_POWERVR 5 5 tristate "Imagination Technologies PowerVR (Series 6 and later) & IMG Graphics" 6 - depends on ARM64 6 + depends on (ARM64 || RISCV && 64BIT) 7 7 depends on DRM 8 + depends on MMU 8 9 depends on PM 9 10 select DRM_EXEC 10 11 select DRM_GEM_SHMEM_HELPER
+5 -17
drivers/gpu/drm/imagination/pvr_device.c
··· 23 23 #include <linux/firmware.h> 24 24 #include <linux/gfp.h> 25 25 #include <linux/interrupt.h> 26 + #include <linux/of.h> 26 27 #include <linux/platform_device.h> 27 28 #include <linux/pm_runtime.h> 28 29 #include <linux/reset.h> ··· 118 117 pvr_dev->core_clk = core_clk; 119 118 pvr_dev->sys_clk = sys_clk; 120 119 pvr_dev->mem_clk = mem_clk; 121 - 122 - return 0; 123 - } 124 - 125 - static int pvr_device_reset_init(struct pvr_device *pvr_dev) 126 - { 127 - struct drm_device *drm_dev = from_pvr_device(pvr_dev); 128 - struct reset_control *reset; 129 - 130 - reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL); 131 - if (IS_ERR(reset)) 132 - return dev_err_probe(drm_dev->dev, PTR_ERR(reset), 133 - "failed to get gpu reset line\n"); 134 - 135 - pvr_dev->reset = reset; 136 120 137 121 return 0; 138 122 } ··· 604 618 struct device *dev = drm_dev->dev; 605 619 int err; 606 620 621 + /* Get the platform-specific data based on the compatible string. */ 622 + pvr_dev->device_data = of_device_get_match_data(dev); 623 + 607 624 /* 608 625 * Setup device parameters. We do this first in case other steps 609 626 * depend on them. ··· 620 631 if (err) 621 632 return err; 622 633 623 - /* Get the reset line for the GPU */ 624 - err = pvr_device_reset_init(pvr_dev); 634 + err = pvr_dev->device_data->pwr_ops->init(pvr_dev); 625 635 if (err) 626 636 return err; 627 637
+17
drivers/gpu/drm/imagination/pvr_device.h
··· 37 37 /* Forward declaration from <linux/firmware.h>. */ 38 38 struct firmware; 39 39 40 + /* Forward declaration from <linux/pwrseq/consumer.h> */ 41 + struct pwrseq_desc; 42 + 40 43 /** 41 44 * struct pvr_gpu_id - Hardware GPU ID information for a PowerVR device 42 45 * @b: Branch ID. ··· 58 55 */ 59 56 struct pvr_fw_version { 60 57 u16 major, minor; 58 + }; 59 + 60 + /** 61 + * struct pvr_device_data - Platform specific data associated with a compatible string. 62 + * @pwr_ops: Pointer to a structure with platform-specific power functions. 63 + */ 64 + struct pvr_device_data { 65 + const struct pvr_power_sequence_ops *pwr_ops; 61 66 }; 62 67 63 68 /** ··· 108 97 109 98 /** @fw_version: Firmware version detected at runtime. */ 110 99 struct pvr_fw_version fw_version; 100 + 101 + /** @device_data: Pointer to platform-specific data. */ 102 + const struct pvr_device_data *device_data; 111 103 112 104 /** @regs_resource: Resource representing device control registers. */ 113 105 struct resource *regs_resource; ··· 161 147 * procedure. 162 148 */ 163 149 struct reset_control *reset; 150 + 151 + /** @pwrseq: Pointer to a power sequencer, if one is used. */ 152 + struct pwrseq_desc *pwrseq; 164 153 165 154 /** @irq: IRQ number. */ 166 155 int irq;
+21 -2
drivers/gpu/drm/imagination/pvr_drv.c
··· 1480 1480 pvr_power_domains_fini(pvr_dev); 1481 1481 } 1482 1482 1483 + static const struct pvr_device_data pvr_device_data_manual = { 1484 + .pwr_ops = &pvr_power_sequence_ops_manual, 1485 + }; 1486 + 1487 + static const struct pvr_device_data pvr_device_data_pwrseq = { 1488 + .pwr_ops = &pvr_power_sequence_ops_pwrseq, 1489 + }; 1490 + 1483 1491 static const struct of_device_id dt_match[] = { 1484 - { .compatible = "img,img-rogue", .data = NULL }, 1492 + { 1493 + .compatible = "thead,th1520-gpu", 1494 + .data = &pvr_device_data_pwrseq, 1495 + }, 1496 + { 1497 + .compatible = "img,img-rogue", 1498 + .data = &pvr_device_data_manual, 1499 + }, 1485 1500 1486 1501 /* 1487 1502 * This legacy compatible string was introduced early on before the more generic 1488 1503 * "img,img-rogue" was added. Keep it around here for compatibility, but never use 1489 1504 * "img,img-axe" in new devicetrees. 1490 1505 */ 1491 - { .compatible = "img,img-axe", .data = NULL }, 1506 + { 1507 + .compatible = "img,img-axe", 1508 + .data = &pvr_device_data_manual, 1509 + }, 1492 1510 {} 1493 1511 }; 1494 1512 MODULE_DEVICE_TABLE(of, dt_match); ··· 1531 1513 MODULE_LICENSE("Dual MIT/GPL"); 1532 1514 MODULE_IMPORT_NS("DMA_BUF"); 1533 1515 MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw"); 1516 + MODULE_FIRMWARE("powervr/rogue_36.52.104.182_v1.fw"); 1534 1517 MODULE_FIRMWARE("powervr/rogue_36.53.104.796_v1.fw");
+123 -45
drivers/gpu/drm/imagination/pvr_power.c
··· 18 18 #include <linux/platform_device.h> 19 19 #include <linux/pm_domain.h> 20 20 #include <linux/pm_runtime.h> 21 + #include <linux/pwrseq/consumer.h> 21 22 #include <linux/reset.h> 22 23 #include <linux/timer.h> 23 24 #include <linux/types.h> ··· 235 234 return 0; 236 235 } 237 236 238 - int 239 - pvr_power_device_suspend(struct device *dev) 237 + static int pvr_power_init_manual(struct pvr_device *pvr_dev) 240 238 { 241 - struct platform_device *plat_dev = to_platform_device(dev); 242 - struct drm_device *drm_dev = platform_get_drvdata(plat_dev); 243 - struct pvr_device *pvr_dev = to_pvr_device(drm_dev); 244 - int err = 0; 245 - int idx; 239 + struct drm_device *drm_dev = from_pvr_device(pvr_dev); 240 + struct reset_control *reset; 246 241 247 - if (!drm_dev_enter(drm_dev, &idx)) 248 - return -EIO; 242 + reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL); 243 + if (IS_ERR(reset)) 244 + return dev_err_probe(drm_dev->dev, PTR_ERR(reset), 245 + "failed to get gpu reset line\n"); 249 246 250 - if (pvr_dev->fw_dev.booted) { 251 - err = pvr_power_fw_disable(pvr_dev, false); 252 - if (err) 253 - goto err_drm_dev_exit; 254 - } 247 + pvr_dev->reset = reset; 255 248 256 - clk_disable_unprepare(pvr_dev->mem_clk); 257 - clk_disable_unprepare(pvr_dev->sys_clk); 258 - clk_disable_unprepare(pvr_dev->core_clk); 259 - 260 - err = reset_control_assert(pvr_dev->reset); 261 - 262 - err_drm_dev_exit: 263 - drm_dev_exit(idx); 264 - 265 - return err; 249 + return 0; 266 250 } 267 251 268 - int 269 - pvr_power_device_resume(struct device *dev) 252 + static int pvr_power_on_sequence_manual(struct pvr_device *pvr_dev) 270 253 { 271 - struct platform_device *plat_dev = to_platform_device(dev); 272 - struct drm_device *drm_dev = platform_get_drvdata(plat_dev); 273 - struct pvr_device *pvr_dev = to_pvr_device(drm_dev); 274 - int idx; 275 254 int err; 276 - 277 - if (!drm_dev_enter(drm_dev, &idx)) 278 - return -EIO; 279 255 280 256 err = clk_prepare_enable(pvr_dev->core_clk); 281 257 if (err) 282 - goto err_drm_dev_exit; 258 + return err; 283 259 284 260 err = clk_prepare_enable(pvr_dev->sys_clk); 285 261 if (err) ··· 280 302 if (err) 281 303 goto err_mem_clk_disable; 282 304 283 - if (pvr_dev->fw_dev.booted) { 284 - err = pvr_power_fw_enable(pvr_dev); 285 - if (err) 286 - goto err_reset_assert; 287 - } 288 - 289 - drm_dev_exit(idx); 290 - 291 305 return 0; 292 - 293 - err_reset_assert: 294 - reset_control_assert(pvr_dev->reset); 295 306 296 307 err_mem_clk_disable: 297 308 clk_disable_unprepare(pvr_dev->mem_clk); ··· 290 323 291 324 err_core_clk_disable: 292 325 clk_disable_unprepare(pvr_dev->core_clk); 326 + 327 + return err; 328 + } 329 + 330 + static int pvr_power_off_sequence_manual(struct pvr_device *pvr_dev) 331 + { 332 + int err; 333 + 334 + err = reset_control_assert(pvr_dev->reset); 335 + 336 + clk_disable_unprepare(pvr_dev->mem_clk); 337 + clk_disable_unprepare(pvr_dev->sys_clk); 338 + clk_disable_unprepare(pvr_dev->core_clk); 339 + 340 + return err; 341 + } 342 + 343 + const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual = { 344 + .init = pvr_power_init_manual, 345 + .power_on = pvr_power_on_sequence_manual, 346 + .power_off = pvr_power_off_sequence_manual, 347 + }; 348 + 349 + static int pvr_power_init_pwrseq(struct pvr_device *pvr_dev) 350 + { 351 + struct device *dev = from_pvr_device(pvr_dev)->dev; 352 + 353 + pvr_dev->pwrseq = devm_pwrseq_get(dev, "gpu-power"); 354 + if (IS_ERR(pvr_dev->pwrseq)) { 355 + /* 356 + * This platform requires a sequencer. If we can't get it, we 357 + * must return the error (including -EPROBE_DEFER to wait for 358 + * the provider to appear) 359 + */ 360 + return dev_err_probe(dev, PTR_ERR(pvr_dev->pwrseq), 361 + "Failed to get required power sequencer\n"); 362 + } 363 + 364 + return 0; 365 + } 366 + 367 + static int pvr_power_on_sequence_pwrseq(struct pvr_device *pvr_dev) 368 + { 369 + return pwrseq_power_on(pvr_dev->pwrseq); 370 + } 371 + 372 + static int pvr_power_off_sequence_pwrseq(struct pvr_device *pvr_dev) 373 + { 374 + return pwrseq_power_off(pvr_dev->pwrseq); 375 + } 376 + 377 + const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq = { 378 + .init = pvr_power_init_pwrseq, 379 + .power_on = pvr_power_on_sequence_pwrseq, 380 + .power_off = pvr_power_off_sequence_pwrseq, 381 + }; 382 + 383 + int 384 + pvr_power_device_suspend(struct device *dev) 385 + { 386 + struct platform_device *plat_dev = to_platform_device(dev); 387 + struct drm_device *drm_dev = platform_get_drvdata(plat_dev); 388 + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); 389 + int err = 0; 390 + int idx; 391 + 392 + if (!drm_dev_enter(drm_dev, &idx)) 393 + return -EIO; 394 + 395 + if (pvr_dev->fw_dev.booted) { 396 + err = pvr_power_fw_disable(pvr_dev, false); 397 + if (err) 398 + goto err_drm_dev_exit; 399 + } 400 + 401 + err = pvr_dev->device_data->pwr_ops->power_off(pvr_dev); 402 + 403 + err_drm_dev_exit: 404 + drm_dev_exit(idx); 405 + 406 + return err; 407 + } 408 + 409 + int 410 + pvr_power_device_resume(struct device *dev) 411 + { 412 + struct platform_device *plat_dev = to_platform_device(dev); 413 + struct drm_device *drm_dev = platform_get_drvdata(plat_dev); 414 + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); 415 + int idx; 416 + int err; 417 + 418 + if (!drm_dev_enter(drm_dev, &idx)) 419 + return -EIO; 420 + 421 + err = pvr_dev->device_data->pwr_ops->power_on(pvr_dev); 422 + if (err) 423 + goto err_drm_dev_exit; 424 + 425 + if (pvr_dev->fw_dev.booted) { 426 + err = pvr_power_fw_enable(pvr_dev); 427 + if (err) 428 + goto err_power_off; 429 + } 430 + 431 + drm_dev_exit(idx); 432 + 433 + return 0; 434 + 435 + err_power_off: 436 + pvr_dev->device_data->pwr_ops->power_off(pvr_dev); 293 437 294 438 err_drm_dev_exit: 295 439 drm_dev_exit(idx);
+15
drivers/gpu/drm/imagination/pvr_power.h
··· 41 41 int pvr_power_domains_init(struct pvr_device *pvr_dev); 42 42 void pvr_power_domains_fini(struct pvr_device *pvr_dev); 43 43 44 + /** 45 + * struct pvr_power_sequence_ops - Platform specific power sequence operations. 46 + * @init: Pointer to the platform-specific initialization function. 47 + * @power_on: Pointer to the platform-specific power on function. 48 + * @power_off: Pointer to the platform-specific power off function. 49 + */ 50 + struct pvr_power_sequence_ops { 51 + int (*init)(struct pvr_device *pvr_dev); 52 + int (*power_on)(struct pvr_device *pvr_dev); 53 + int (*power_off)(struct pvr_device *pvr_dev); 54 + }; 55 + 56 + extern const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual; 57 + extern const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq; 58 + 44 59 #endif /* PVR_POWER_H */
+1 -4
drivers/gpu/drm/nouveau/nouveau_platform.c
··· 30 30 func = of_device_get_match_data(&pdev->dev); 31 31 32 32 drm = nouveau_platform_device_create(func, pdev, &device); 33 - if (IS_ERR(drm)) 34 - return PTR_ERR(drm); 35 - 36 - return 0; 33 + return PTR_ERR_OR_ZERO(drm); 37 34 } 38 35 39 36 static void nouveau_platform_remove(struct platform_device *pdev)
+2 -4
drivers/gpu/drm/omapdrm/omap_drv.c
··· 378 378 struct device_node *node = NULL; 379 379 380 380 if (output->bridge) { 381 - struct drm_bridge *bridge = output->bridge; 382 - 383 - while (drm_bridge_get_next_bridge(bridge)) 384 - bridge = drm_bridge_get_next_bridge(bridge); 381 + struct drm_bridge *bridge __free(drm_bridge_put) = 382 + drm_bridge_chain_get_last_bridge(output->bridge->encoder); 385 383 386 384 node = bridge->of_node; 387 385 }
-3
drivers/gpu/drm/panthor/panthor_gem.c
··· 74 74 mutex_destroy(&bo->label.lock); 75 75 76 76 drm_gem_free_mmap_offset(&bo->base.base); 77 - mutex_destroy(&bo->gpuva_list_lock); 78 77 drm_gem_shmem_free(&bo->base); 79 78 drm_gem_object_put(vm_root_gem); 80 79 } ··· 245 246 246 247 obj->base.base.funcs = &panthor_gem_funcs; 247 248 obj->base.map_wc = !ptdev->coherent; 248 - mutex_init(&obj->gpuva_list_lock); 249 - drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock); 250 249 mutex_init(&obj->label.lock); 251 250 252 251 panthor_gem_debugfs_bo_init(obj);
-12
drivers/gpu/drm/panthor/panthor_gem.h
··· 79 79 */ 80 80 struct drm_gem_object *exclusive_vm_root_gem; 81 81 82 - /** 83 - * @gpuva_list_lock: Custom GPUVA lock. 84 - * 85 - * Used to protect insertion of drm_gpuva elements to the 86 - * drm_gem_object.gpuva.list list. 87 - * 88 - * We can't use the GEM resv for that, because drm_gpuva_link() is 89 - * called in a dma-signaling path, where we're not allowed to take 90 - * resv locks. 91 - */ 92 - struct mutex gpuva_list_lock; 93 - 94 82 /** @flags: Combination of drm_panthor_bo_flags flags. */ 95 83 u32 flags; 96 84
+39 -43
drivers/gpu/drm/panthor/panthor_mmu.c
··· 569 569 write_cmd(ptdev, as_nr, AS_COMMAND_LOCK); 570 570 } 571 571 572 - static int mmu_hw_do_flush_on_gpu_ctrl(struct panthor_device *ptdev, int as_nr, 573 - u32 op) 572 + static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr, 573 + u64 iova, u64 size, u32 op) 574 574 { 575 575 const u32 l2_flush_op = CACHE_CLEAN | CACHE_INV; 576 - u32 lsc_flush_op = 0; 576 + u32 lsc_flush_op; 577 577 int ret; 578 578 579 - if (op == AS_COMMAND_FLUSH_MEM) 579 + lockdep_assert_held(&ptdev->mmu->as.slots_lock); 580 + 581 + switch (op) { 582 + case AS_COMMAND_FLUSH_MEM: 580 583 lsc_flush_op = CACHE_CLEAN | CACHE_INV; 584 + break; 585 + case AS_COMMAND_FLUSH_PT: 586 + lsc_flush_op = 0; 587 + break; 588 + default: 589 + drm_WARN(&ptdev->base, 1, "Unexpected AS_COMMAND: %d", op); 590 + return -EINVAL; 591 + } 592 + 593 + if (as_nr < 0) 594 + return 0; 595 + 596 + /* 597 + * If the AS number is greater than zero, then we can be sure 598 + * the device is up and running, so we don't need to explicitly 599 + * power it up 600 + */ 601 + 602 + lock_region(ptdev, as_nr, iova, size); 581 603 582 604 ret = wait_ready(ptdev, as_nr); 583 605 if (ret) ··· 617 595 write_cmd(ptdev, as_nr, AS_COMMAND_UNLOCK); 618 596 619 597 /* Wait for the unlock command to complete */ 620 - return wait_ready(ptdev, as_nr); 621 - } 622 - 623 - static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr, 624 - u64 iova, u64 size, u32 op) 625 - { 626 - lockdep_assert_held(&ptdev->mmu->as.slots_lock); 627 - 628 - if (as_nr < 0) 629 - return 0; 630 - 631 - /* 632 - * If the AS number is greater than zero, then we can be sure 633 - * the device is up and running, so we don't need to explicitly 634 - * power it up 635 - */ 636 - 637 - if (op != AS_COMMAND_UNLOCK) 638 - lock_region(ptdev, as_nr, iova, size); 639 - 640 - if (op == AS_COMMAND_FLUSH_MEM || op == AS_COMMAND_FLUSH_PT) 641 - return mmu_hw_do_flush_on_gpu_ctrl(ptdev, as_nr, op); 642 - 643 - /* Run the MMU operation */ 644 - write_cmd(ptdev, as_nr, op); 645 - 646 - /* Wait for the flush to complete */ 647 598 return wait_ready(ptdev, as_nr); 648 599 } 649 600 ··· 1102 1107 * GEM vm_bo list. 1103 1108 */ 1104 1109 dma_resv_lock(drm_gpuvm_resv(vm), NULL); 1105 - mutex_lock(&bo->gpuva_list_lock); 1110 + mutex_lock(&bo->base.base.gpuva.lock); 1106 1111 unpin = drm_gpuvm_bo_put(vm_bo); 1107 - mutex_unlock(&bo->gpuva_list_lock); 1112 + mutex_unlock(&bo->base.base.gpuva.lock); 1108 1113 dma_resv_unlock(drm_gpuvm_resv(vm)); 1109 1114 1110 1115 /* If the vm_bo object was destroyed, release the pin reference that ··· 1222 1227 (flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP) 1223 1228 return -EINVAL; 1224 1229 1225 - /* Make sure the VA and size are aligned and in-bounds. */ 1230 + /* Make sure the VA and size are in-bounds. */ 1226 1231 if (size > bo->base.base.size || offset > bo->base.base.size - size) 1227 1232 return -EINVAL; 1228 1233 ··· 1277 1282 * calling this function. 1278 1283 */ 1279 1284 dma_resv_lock(panthor_vm_resv(vm), NULL); 1280 - mutex_lock(&bo->gpuva_list_lock); 1285 + mutex_lock(&bo->base.base.gpuva.lock); 1281 1286 op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo); 1282 - mutex_unlock(&bo->gpuva_list_lock); 1287 + mutex_unlock(&bo->base.base.gpuva.lock); 1283 1288 dma_resv_unlock(panthor_vm_resv(vm)); 1284 1289 1285 1290 /* If the a vm_bo for this <VM,BO> combination exists, it already ··· 2031 2036 { 2032 2037 struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj); 2033 2038 2034 - mutex_lock(&bo->gpuva_list_lock); 2039 + mutex_lock(&bo->base.base.gpuva.lock); 2035 2040 drm_gpuva_link(&vma->base, vm_bo); 2036 2041 drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo)); 2037 - mutex_unlock(&bo->gpuva_list_lock); 2042 + mutex_unlock(&bo->base.base.gpuva.lock); 2038 2043 } 2039 2044 2040 2045 static void panthor_vma_unlink(struct panthor_vm *vm, ··· 2043 2048 struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj); 2044 2049 struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo); 2045 2050 2046 - mutex_lock(&bo->gpuva_list_lock); 2051 + mutex_lock(&bo->base.base.gpuva.lock); 2047 2052 drm_gpuva_unlink(&vma->base); 2048 - mutex_unlock(&bo->gpuva_list_lock); 2053 + mutex_unlock(&bo->base.base.gpuva.lock); 2049 2054 2050 2055 /* drm_gpuva_unlink() release the vm_bo, but we manually retained it 2051 2056 * when entering this function, so we can implement deferred VMA ··· 2415 2420 * to be handled the same way user VMAs are. 2416 2421 */ 2417 2422 drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM", 2418 - DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem, 2419 - min_va, va_range, 0, 0, &panthor_gpuvm_ops); 2423 + DRM_GPUVM_RESV_PROTECTED | DRM_GPUVM_IMMEDIATE_MODE, 2424 + &ptdev->base, dummy_gem, min_va, va_range, 0, 0, 2425 + &panthor_gpuvm_ops); 2420 2426 drm_gem_object_put(dummy_gem); 2421 2427 return vm; 2422 2428 ··· 2447 2451 int ret; 2448 2452 2449 2453 /* Aligned on page size. */ 2450 - if (!IS_ALIGNED(op->va | op->size, vm_pgsz)) 2454 + if (!IS_ALIGNED(op->va | op->size | op->bo_offset, vm_pgsz)) 2451 2455 return -EINVAL; 2452 2456 2453 2457 switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
+4 -1
drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
··· 576 576 udelay(10); 577 577 rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); 578 578 579 - ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN; 579 + rcar_mipi_dsi_clr(dsi, TXSETR, TXSETR_LANECNT_MASK); 580 + rcar_mipi_dsi_set(dsi, TXSETR, dsi->lanes - 1); 581 + 582 + ppisetr = ((BIT(dsi->lanes) - 1) & PPISETR_DLEN_MASK) | PPISETR_CLEN; 580 583 rcar_mipi_dsi_write(dsi, PPISETR, ppisetr); 581 584 582 585 rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
+4 -4
drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
··· 12 12 #define LINKSR_LPBUSY (1 << 1) 13 13 #define LINKSR_HSBUSY (1 << 0) 14 14 15 + #define TXSETR 0x100 16 + #define TXSETR_LANECNT_MASK (0x3 << 0) 17 + 15 18 /* 16 19 * Video Mode Register 17 20 */ ··· 83 80 * PHY-Protocol Interface (PPI) Registers 84 81 */ 85 82 #define PPISETR 0x700 86 - #define PPISETR_DLEN_0 (0x1 << 0) 87 - #define PPISETR_DLEN_1 (0x3 << 0) 88 - #define PPISETR_DLEN_2 (0x7 << 0) 89 - #define PPISETR_DLEN_3 (0xf << 0) 83 + #define PPISETR_DLEN_MASK (0xf << 0) 90 84 #define PPISETR_CLEN (1 << 8) 91 85 92 86 #define PPICLCR 0x710
+9
drivers/gpu/drm/rockchip/Kconfig
··· 10 10 select VIDEOMODE_HELPERS 11 11 select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP 12 12 select DRM_DISPLAY_DP_AUX_BUS if ROCKCHIP_ANALOGIX_DP 13 + select DRM_DW_DP if ROCKCHIP_DW_DP 13 14 select DRM_DW_HDMI if ROCKCHIP_DW_HDMI 14 15 select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP 15 16 select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI ··· 60 59 for the cdn DP driver. If you want to enable Dp on 61 60 RK3399 based SoC, you should select this 62 61 option. 62 + 63 + config ROCKCHIP_DW_DP 64 + bool "Rockchip specific extensions for Synopsys DW DP" 65 + help 66 + This selects support for Rockchip SoC specific extensions 67 + to enable Synopsys DesignWare Cores based DisplayPort transmit 68 + controller support on Rockchip SoC, If you want to enable DP on 69 + rk3588 based SoC, you should select this option. 63 70 64 71 config ROCKCHIP_DW_HDMI 65 72 bool "Rockchip specific extensions for Synopsys DW HDMI"
+1
drivers/gpu/drm/rockchip/Makefile
··· 14 14 rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI_QP) += dw_hdmi_qp-rockchip.o 15 15 rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o 16 16 rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI2) += dw-mipi-dsi2-rockchip.o 17 + rockchipdrm-$(CONFIG_ROCKCHIP_DW_DP) += dw_dp-rockchip.o 17 18 rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o 18 19 rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o 19 20 rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
+150
drivers/gpu/drm/rockchip/dw_dp-rockchip.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2020 Rockchip Electronics Co., Ltd. 4 + * 5 + * Author: Zhang Yubing <yubing.zhang@rock-chips.com> 6 + * Author: Andy Yan <andy.yan@rock-chips.com> 7 + */ 8 + 9 + #include <linux/component.h> 10 + #include <linux/of_device.h> 11 + #include <linux/platform_device.h> 12 + #include <drm/bridge/dw_dp.h> 13 + #include <drm/drm_atomic_helper.h> 14 + #include <drm/drm_bridge.h> 15 + #include <drm/drm_bridge_connector.h> 16 + #include <drm/drm_of.h> 17 + #include <drm/drm_print.h> 18 + #include <drm/drm_probe_helper.h> 19 + #include <drm/drm_simple_kms_helper.h> 20 + 21 + #include <linux/media-bus-format.h> 22 + #include <linux/videodev2.h> 23 + 24 + #include "rockchip_drm_drv.h" 25 + #include "rockchip_drm_vop.h" 26 + 27 + struct rockchip_dw_dp { 28 + struct dw_dp *base; 29 + struct device *dev; 30 + struct rockchip_encoder encoder; 31 + }; 32 + 33 + static int dw_dp_encoder_atomic_check(struct drm_encoder *encoder, 34 + struct drm_crtc_state *crtc_state, 35 + struct drm_connector_state *conn_state) 36 + { 37 + struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 38 + struct drm_atomic_state *state = conn_state->state; 39 + struct drm_display_info *di = &conn_state->connector->display_info; 40 + struct drm_bridge *bridge = drm_bridge_chain_get_first_bridge(encoder); 41 + struct drm_bridge_state *bridge_state = drm_atomic_get_new_bridge_state(state, bridge); 42 + u32 bus_format = bridge_state->input_bus_cfg.format; 43 + 44 + switch (bus_format) { 45 + case MEDIA_BUS_FMT_UYYVYY10_0_5X30: 46 + case MEDIA_BUS_FMT_UYYVYY8_0_5X24: 47 + s->output_mode = ROCKCHIP_OUT_MODE_YUV420; 48 + break; 49 + case MEDIA_BUS_FMT_YUYV10_1X20: 50 + case MEDIA_BUS_FMT_YUYV8_1X16: 51 + s->output_mode = ROCKCHIP_OUT_MODE_S888_DUMMY; 52 + break; 53 + case MEDIA_BUS_FMT_RGB101010_1X30: 54 + case MEDIA_BUS_FMT_RGB888_1X24: 55 + case MEDIA_BUS_FMT_RGB666_1X24_CPADHI: 56 + case MEDIA_BUS_FMT_YUV10_1X30: 57 + case MEDIA_BUS_FMT_YUV8_1X24: 58 + default: 59 + s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 60 + break; 61 + } 62 + 63 + s->output_type = DRM_MODE_CONNECTOR_DisplayPort; 64 + s->bus_format = bus_format; 65 + s->bus_flags = di->bus_flags; 66 + s->color_space = V4L2_COLORSPACE_DEFAULT; 67 + 68 + return 0; 69 + } 70 + 71 + static const struct drm_encoder_helper_funcs dw_dp_encoder_helper_funcs = { 72 + .atomic_check = dw_dp_encoder_atomic_check, 73 + }; 74 + 75 + static int dw_dp_rockchip_bind(struct device *dev, struct device *master, void *data) 76 + { 77 + struct platform_device *pdev = to_platform_device(dev); 78 + struct dw_dp_plat_data plat_data; 79 + struct drm_device *drm_dev = data; 80 + struct rockchip_dw_dp *dp; 81 + struct drm_encoder *encoder; 82 + struct drm_connector *connector; 83 + int ret; 84 + 85 + dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 86 + if (!dp) 87 + return -ENOMEM; 88 + 89 + dp->dev = dev; 90 + platform_set_drvdata(pdev, dp); 91 + 92 + plat_data.max_link_rate = 810000; 93 + encoder = &dp->encoder.encoder; 94 + encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, dev->of_node); 95 + rockchip_drm_encoder_set_crtc_endpoint_id(&dp->encoder, dev->of_node, 0, 0); 96 + 97 + ret = drmm_encoder_init(drm_dev, encoder, NULL, DRM_MODE_ENCODER_TMDS, NULL); 98 + if (ret) 99 + return ret; 100 + drm_encoder_helper_add(encoder, &dw_dp_encoder_helper_funcs); 101 + 102 + dp->base = dw_dp_bind(dev, encoder, &plat_data); 103 + if (IS_ERR(dp->base)) { 104 + ret = PTR_ERR(dp->base); 105 + return ret; 106 + } 107 + 108 + connector = drm_bridge_connector_init(drm_dev, encoder); 109 + if (IS_ERR(connector)) { 110 + ret = PTR_ERR(connector); 111 + return dev_err_probe(dev, ret, "Failed to init bridge connector"); 112 + } 113 + 114 + drm_connector_attach_encoder(connector, encoder); 115 + 116 + return 0; 117 + } 118 + 119 + static const struct component_ops dw_dp_rockchip_component_ops = { 120 + .bind = dw_dp_rockchip_bind, 121 + }; 122 + 123 + static int dw_dp_probe(struct platform_device *pdev) 124 + { 125 + struct device *dev = &pdev->dev; 126 + 127 + return component_add(dev, &dw_dp_rockchip_component_ops); 128 + } 129 + 130 + static void dw_dp_remove(struct platform_device *pdev) 131 + { 132 + struct rockchip_dw_dp *dp = platform_get_drvdata(pdev); 133 + 134 + component_del(dp->dev, &dw_dp_rockchip_component_ops); 135 + } 136 + 137 + static const struct of_device_id dw_dp_of_match[] = { 138 + { .compatible = "rockchip,rk3588-dp", }, 139 + {} 140 + }; 141 + MODULE_DEVICE_TABLE(of, dw_dp_of_match); 142 + 143 + struct platform_driver dw_dp_driver = { 144 + .probe = dw_dp_probe, 145 + .remove = dw_dp_remove, 146 + .driver = { 147 + .name = "dw-dp", 148 + .of_match_table = dw_dp_of_match, 149 + }, 150 + };
+1
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
··· 529 529 ADD_ROCKCHIP_SUB_DRIVER(rockchip_dp_driver, 530 530 CONFIG_ROCKCHIP_ANALOGIX_DP); 531 531 ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP); 532 + ADD_ROCKCHIP_SUB_DRIVER(dw_dp_driver, CONFIG_ROCKCHIP_DW_DP); 532 533 ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver, 533 534 CONFIG_ROCKCHIP_DW_HDMI); 534 535 ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_qp_rockchip_pltfm_driver,
+1
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
··· 87 87 struct device_node *np, int port, int reg); 88 88 int rockchip_drm_endpoint_is_subdriver(struct device_node *ep); 89 89 extern struct platform_driver cdn_dp_driver; 90 + extern struct platform_driver dw_dp_driver; 90 91 extern struct platform_driver dw_hdmi_rockchip_pltfm_driver; 91 92 extern struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver; 92 93 extern struct platform_driver dw_mipi_dsi_rockchip_driver;
+14 -11
drivers/gpu/drm/scheduler/sched_entity.c
··· 285 285 return 0; 286 286 287 287 sched = entity->rq->sched; 288 - /** 289 - * The client will not queue more IBs during this fini, consume existing 290 - * queued IBs or discard them on SIGKILL 288 + /* 289 + * The client will not queue more jobs during this fini - consume 290 + * existing queued ones, or discard them on SIGKILL. 291 291 */ 292 292 if (current->flags & PF_EXITING) { 293 293 if (timeout) ··· 300 300 drm_sched_entity_is_idle(entity)); 301 301 } 302 302 303 - /* For killed process disable any more IBs enqueue right now */ 303 + /* For a killed process disallow further enqueueing of jobs. */ 304 304 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 305 305 if ((!last_user || last_user == current->group_leader) && 306 306 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) ··· 324 324 void drm_sched_entity_fini(struct drm_sched_entity *entity) 325 325 { 326 326 /* 327 - * If consumption of existing IBs wasn't completed. Forcefully remove 328 - * them here. Also makes sure that the scheduler won't touch this entity 329 - * any more. 327 + * If consumption of existing jobs wasn't completed forcefully remove 328 + * them. Also makes sure that the scheduler won't touch this entity any 329 + * more. 330 330 */ 331 331 drm_sched_entity_kill(entity); 332 332 ··· 391 391 * Add a callback to the current dependency of the entity to wake up the 392 392 * scheduler when the entity becomes available. 393 393 */ 394 - static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 394 + static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity, 395 + struct drm_sched_job *sched_job) 395 396 { 396 397 struct drm_gpu_scheduler *sched = entity->rq->sched; 397 398 struct dma_fence *fence = entity->dependency; ··· 421 420 dma_fence_put(entity->dependency); 422 421 entity->dependency = fence; 423 422 } 423 + 424 + if (trace_drm_sched_job_unschedulable_enabled() && 425 + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &entity->dependency->flags)) 426 + trace_drm_sched_job_unschedulable(sched_job, entity->dependency); 424 427 425 428 if (!dma_fence_add_callback(entity->dependency, &entity->cb, 426 429 drm_sched_entity_wakeup)) ··· 466 461 467 462 while ((entity->dependency = 468 463 drm_sched_job_dependency(sched_job, entity))) { 469 - if (drm_sched_entity_add_dependency_cb(entity)) { 470 - trace_drm_sched_job_unschedulable(sched_job, entity->dependency); 464 + if (drm_sched_entity_add_dependency_cb(entity, sched_job)) 471 465 return NULL; 472 - } 473 466 } 474 467 475 468 /* skip jobs from entity that marked guilty */
+16
drivers/gpu/drm/scheduler/sched_main.c
··· 1424 1424 * Prevents reinsertion and marks job_queue as idle, 1425 1425 * it will be removed from the rq in drm_sched_entity_fini() 1426 1426 * eventually 1427 + * 1428 + * FIXME: 1429 + * This lacks the proper spin_lock(&s_entity->lock) and 1430 + * is, therefore, a race condition. Most notably, it 1431 + * can race with drm_sched_entity_push_job(). The lock 1432 + * cannot be taken here, however, because this would 1433 + * lead to lock inversion -> deadlock. 1434 + * 1435 + * The best solution probably is to enforce the life 1436 + * time rule of all entities having to be torn down 1437 + * before their scheduler. Then, however, locking could 1438 + * be dropped alltogether from this function. 1439 + * 1440 + * For now, this remains a potential race in all 1441 + * drivers that keep entities alive for longer than 1442 + * the scheduler. 1427 1443 */ 1428 1444 s_entity->stopped = true; 1429 1445 spin_unlock(&rq->lock);
-1
drivers/gpu/drm/scheduler/tests/sched_tests.h
··· 11 11 #include <linux/hrtimer.h> 12 12 #include <linux/ktime.h> 13 13 #include <linux/list.h> 14 - #include <linux/atomic.h> 15 14 #include <linux/mutex.h> 16 15 #include <linux/types.h> 17 16
+24 -21
drivers/gpu/drm/sitronix/st7571-i2c.c
··· 151 151 bool ignore_nak; 152 152 153 153 bool grayscale; 154 + bool inverted; 154 155 u32 height_mm; 155 156 u32 width_mm; 156 157 u32 startline; ··· 219 218 return ret; 220 219 } 221 220 222 - static inline u8 st7571_transform_xy(const char *p, int x, int y) 221 + static inline u8 st7571_transform_xy(const char *p, int x, int y, u8 bpp) 223 222 { 224 223 int xrest = x % 8; 225 224 u8 result = 0; 225 + u8 row_len = 16 * bpp; 226 226 227 227 /* 228 228 * Transforms an (x, y) pixel coordinate into a vertical 8-bit ··· 238 236 239 237 for (int i = 0; i < 8; i++) { 240 238 int row_idx = y + i; 241 - u8 byte = p[row_idx * 16 + x]; 239 + u8 byte = p[row_idx * row_len + x]; 242 240 u8 bit = (byte >> xrest) & 1; 243 241 244 242 result |= (bit << i); ··· 305 303 struct iosys_map dst; 306 304 307 305 switch (fb->format->format) { 308 - case DRM_FORMAT_XRGB8888: /* Only support XRGB8888 in monochrome mode */ 309 - dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8); 306 + case DRM_FORMAT_XRGB8888: 307 + dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 4); 310 308 iosys_map_set_vaddr(&dst, st7571->hwbuf); 311 309 312 - drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); 310 + drm_fb_xrgb8888_to_gray2(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); 313 311 break; 314 312 315 313 case DRM_FORMAT_R1: ··· 335 333 336 334 for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) { 337 335 for (int x = rect->x1; x < rect->x2; x++) 338 - row[x] = st7571_transform_xy(st7571->hwbuf, x, y); 336 + row[x] = st7571_transform_xy(st7571->hwbuf, x, y, 1); 339 337 340 338 st7571_set_position(st7571, rect->x1, y); 341 339 ··· 360 358 rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines); 361 359 362 360 switch (format) { 363 - case DRM_FORMAT_XRGB8888: 364 - /* Threated as monochrome (R1) */ 365 - fallthrough; 366 361 case DRM_FORMAT_R1: 367 - x1 = rect->x1; 368 - x2 = rect->x2; 362 + x1 = rect->x1 * 1; 363 + x2 = rect->x2 * 1; 369 364 break; 370 365 case DRM_FORMAT_R2: 366 + fallthrough; 367 + case DRM_FORMAT_XRGB8888: 371 368 x1 = rect->x1 * 2; 372 369 x2 = rect->x2 * 2; 373 370 break; ··· 374 373 375 374 for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) { 376 375 for (int x = x1; x < x2; x++) 377 - row[x] = st7571_transform_xy(st7571->hwbuf, x, y); 376 + row[x] = st7571_transform_xy(st7571->hwbuf, x, y, 2); 378 377 379 378 st7571_set_position(st7571, rect->x1, y); 380 379 ··· 387 386 * even if the format is monochrome. 388 387 * 389 388 * The bit values maps to the following grayscale: 390 - * 0 0 = White 391 - * 0 1 = Light gray 392 - * 1 0 = Dark gray 393 - * 1 1 = Black 389 + * 0 0 = Black 390 + * 0 1 = Dark gray 391 + * 1 0 = Light gray 392 + * 1 1 = White 394 393 * 395 394 * For monochrome formats, write the same value twice to get 396 395 * either a black or white pixel. 397 396 */ 398 - if (format == DRM_FORMAT_R1 || format == DRM_FORMAT_XRGB8888) 397 + if (format == DRM_FORMAT_R1) 399 398 regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1); 400 399 } 401 400 } ··· 793 792 794 793 of_property_read_u32(np, "width-mm", &st7567->width_mm); 795 794 of_property_read_u32(np, "height-mm", &st7567->height_mm); 795 + st7567->inverted = of_property_read_bool(np, "sitronix,inverted"); 796 796 797 797 st7567->pformat = &st7571_monochrome; 798 798 st7567->bpp = 1; ··· 821 819 of_property_read_u32(np, "width-mm", &st7571->width_mm); 822 820 of_property_read_u32(np, "height-mm", &st7571->height_mm); 823 821 st7571->grayscale = of_property_read_bool(np, "sitronix,grayscale"); 822 + st7571->inverted = of_property_read_bool(np, "sitronix,inverted"); 824 823 825 824 if (st7571->grayscale) { 826 825 st7571->pformat = &st7571_grayscale; ··· 876 873 ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */ 877 874 ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */ 878 875 879 - ST7571_SET_REVERSE(0), 876 + ST7571_SET_REVERSE(st7567->inverted ? 1 : 0), 880 877 ST7571_SET_ENTIRE_DISPLAY_ON(0), 881 878 }; 882 879 ··· 920 917 ST7571_SET_COLOR_MODE(st7571->pformat->mode), 921 918 ST7571_COMMAND_SET_NORMAL, 922 919 923 - ST7571_SET_REVERSE(0), 920 + ST7571_SET_REVERSE(st7571->inverted ? 1 : 0), 924 921 ST7571_SET_ENTIRE_DISPLAY_ON(0), 925 922 }; 926 923 ··· 1027 1024 drm_dev_unplug(&st7571->dev); 1028 1025 } 1029 1026 1030 - struct st7571_panel_data st7567_config = { 1027 + static const struct st7571_panel_data st7567_config = { 1031 1028 .init = st7567_lcd_init, 1032 1029 .parse_dt = st7567_parse_dt, 1033 1030 .constraints = { ··· 1039 1036 }, 1040 1037 }; 1041 1038 1042 - struct st7571_panel_data st7571_config = { 1039 + static const struct st7571_panel_data st7571_config = { 1043 1040 .init = st7571_lcd_init, 1044 1041 .parse_dt = st7571_parse_dt, 1045 1042 .constraints = {
+1 -2
drivers/gpu/drm/solomon/ssd130x-spi.c
··· 74 74 75 75 t = devm_kzalloc(dev, sizeof(*t), GFP_KERNEL); 76 76 if (!t) 77 - return dev_err_probe(dev, -ENOMEM, 78 - "Failed to allocate SPI transport data\n"); 77 + return -ENOMEM; 79 78 80 79 t->spi = spi; 81 80 t->dc = dc;
+11 -1
drivers/gpu/drm/stm/drv.c
··· 236 236 drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); 237 237 } 238 238 239 + static struct ltdc_plat_data stm_drm_plat_data = { 240 + .pad_max_freq_hz = 90000000, 241 + }; 242 + 243 + static struct ltdc_plat_data stm_drm_plat_data_mp25 = { 244 + .pad_max_freq_hz = 150000000, 245 + }; 246 + 239 247 static const struct of_device_id drv_dt_ids[] = { 240 - { .compatible = "st,stm32-ltdc"}, 248 + { .compatible = "st,stm32-ltdc", .data = &stm_drm_plat_data, }, 249 + { .compatible = "st,stm32mp251-ltdc", .data = &stm_drm_plat_data_mp25, }, 250 + { .compatible = "st,stm32mp255-ltdc", .data = &stm_drm_plat_data_mp25, }, 241 251 { /* end node */ }, 242 252 }; 243 253 MODULE_DEVICE_TABLE(of, drv_dt_ids);
+55 -3
drivers/gpu/drm/stm/ltdc.c
··· 14 14 #include <linux/interrupt.h> 15 15 #include <linux/media-bus-format.h> 16 16 #include <linux/module.h> 17 + #include <linux/of.h> 17 18 #include <linux/of_graph.h> 18 19 #include <linux/pinctrl/consumer.h> 19 20 #include <linux/platform_device.h> ··· 52 51 #define HWVER_10300 0x010300 53 52 #define HWVER_20101 0x020101 54 53 #define HWVER_40100 0x040100 54 + #define HWVER_40101 0x040101 55 55 56 56 /* 57 57 * The address of some registers depends on the HW version: such registers have ··· 837 835 int target_min = target - CLK_TOLERANCE_HZ; 838 836 int target_max = target + CLK_TOLERANCE_HZ; 839 837 int result; 838 + 839 + if (ldev->lvds_clk) { 840 + result = clk_round_rate(ldev->lvds_clk, target); 841 + drm_dbg_driver(crtc->dev, "lvds pixclk rate target %d, available %d\n", 842 + target, result); 843 + } 840 844 841 845 result = clk_round_rate(ldev->pixel_clk, target); 842 846 ··· 1788 1780 { 1789 1781 struct ltdc_device *ldev = ddev->dev_private; 1790 1782 u32 bus_width_log2, lcr, gc2r; 1783 + const struct ltdc_plat_data *pdata = of_device_get_match_data(ddev->dev); 1791 1784 1792 1785 /* 1793 1786 * at least 1 layer must be managed & the number of layers ··· 1803 1794 bus_width_log2 = (gc2r & GC2R_BW) >> 4; 1804 1795 ldev->caps.bus_width = 8 << bus_width_log2; 1805 1796 regmap_read(ldev->regmap, LTDC_IDR, &ldev->caps.hw_version); 1797 + 1798 + ldev->caps.pad_max_freq_hz = pdata->pad_max_freq_hz; 1806 1799 1807 1800 switch (ldev->caps.hw_version) { 1808 1801 case HWVER_10200: ··· 1823 1812 * does not work on 2nd layer. 1824 1813 */ 1825 1814 ldev->caps.non_alpha_only_l1 = true; 1826 - ldev->caps.pad_max_freq_hz = 90000000; 1827 1815 if (ldev->caps.hw_version == HWVER_10200) 1828 1816 ldev->caps.pad_max_freq_hz = 65000000; 1829 1817 ldev->caps.nb_irq = 2; ··· 1853 1843 ldev->caps.fifo_threshold = false; 1854 1844 break; 1855 1845 case HWVER_40100: 1846 + case HWVER_40101: 1856 1847 ldev->caps.layer_ofs = LAY_OFS_1; 1857 1848 ldev->caps.layer_regs = ltdc_layer_regs_a2; 1858 1849 ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a2; ··· 1861 1850 ldev->caps.pix_fmt_nb = ARRAY_SIZE(ltdc_drm_fmt_a2); 1862 1851 ldev->caps.pix_fmt_flex = true; 1863 1852 ldev->caps.non_alpha_only_l1 = false; 1864 - ldev->caps.pad_max_freq_hz = 90000000; 1865 1853 ldev->caps.nb_irq = 2; 1866 1854 ldev->caps.ycbcr_input = true; 1867 1855 ldev->caps.ycbcr_output = true; ··· 1883 1873 1884 1874 drm_dbg_driver(ddev, "\n"); 1885 1875 clk_disable_unprepare(ldev->pixel_clk); 1876 + if (ldev->bus_clk) 1877 + clk_disable_unprepare(ldev->bus_clk); 1878 + if (ldev->lvds_clk) 1879 + clk_disable_unprepare(ldev->lvds_clk); 1886 1880 } 1887 1881 1888 1882 int ltdc_resume(struct drm_device *ddev) ··· 1902 1888 return ret; 1903 1889 } 1904 1890 1905 - return 0; 1891 + if (ldev->bus_clk) { 1892 + ret = clk_prepare_enable(ldev->bus_clk); 1893 + if (ret) { 1894 + drm_err(ddev, "failed to enable bus clock (%d)\n", ret); 1895 + return ret; 1896 + } 1897 + } 1898 + 1899 + if (ldev->lvds_clk) { 1900 + ret = clk_prepare_enable(ldev->lvds_clk); 1901 + if (ret) 1902 + drm_err(ddev, "failed to prepare lvds clock\n"); 1903 + } 1904 + 1905 + return ret; 1906 1906 } 1907 1907 1908 1908 int ltdc_load(struct drm_device *ddev) ··· 1951 1923 return -ENODEV; 1952 1924 } 1953 1925 1926 + if (of_device_is_compatible(np, "st,stm32mp251-ltdc") || 1927 + of_device_is_compatible(np, "st,stm32mp255-ltdc")) { 1928 + ldev->bus_clk = devm_clk_get(dev, "bus"); 1929 + if (IS_ERR(ldev->bus_clk)) 1930 + return dev_err_probe(dev, PTR_ERR(ldev->bus_clk), 1931 + "Unable to get bus clock\n"); 1932 + 1933 + ret = clk_prepare_enable(ldev->bus_clk); 1934 + if (ret) { 1935 + drm_err(ddev, "Unable to prepare bus clock\n"); 1936 + return ret; 1937 + } 1938 + } 1939 + 1954 1940 /* Get endpoints if any */ 1955 1941 for (i = 0; i < nb_endpoints; i++) { 1956 1942 ret = drm_of_find_panel_or_bridge(np, 0, i, &panel, &bridge); ··· 1997 1955 } 1998 1956 } 1999 1957 } 1958 + 1959 + ldev->lvds_clk = devm_clk_get(dev, "lvds"); 1960 + if (IS_ERR(ldev->lvds_clk)) 1961 + ldev->lvds_clk = NULL; 2000 1962 2001 1963 rstc = devm_reset_control_get_exclusive(dev, NULL); 2002 1964 ··· 2081 2035 2082 2036 clk_disable_unprepare(ldev->pixel_clk); 2083 2037 2038 + if (ldev->bus_clk) 2039 + clk_disable_unprepare(ldev->bus_clk); 2040 + 2084 2041 pinctrl_pm_select_sleep_state(ddev->dev); 2085 2042 2086 2043 pm_runtime_enable(ddev->dev); ··· 2091 2042 return 0; 2092 2043 err: 2093 2044 clk_disable_unprepare(ldev->pixel_clk); 2045 + 2046 + if (ldev->bus_clk) 2047 + clk_disable_unprepare(ldev->bus_clk); 2094 2048 2095 2049 return ret; 2096 2050 }
+6
drivers/gpu/drm/stm/ltdc.h
··· 40 40 ktime_t last_timestamp; 41 41 }; 42 42 43 + struct ltdc_plat_data { 44 + int pad_max_freq_hz; /* max frequency supported by pad */ 45 + }; 46 + 43 47 struct ltdc_device { 44 48 void __iomem *regs; 45 49 struct regmap *regmap; 46 50 struct clk *pixel_clk; /* lcd pixel clock */ 51 + struct clk *lvds_clk; /* lvds pixel clock */ 52 + struct clk *bus_clk; /* bus clock */ 47 53 struct mutex err_lock; /* protecting error_status */ 48 54 struct ltdc_caps caps; 49 55 u32 irq_status;
+1 -2
drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
··· 238 238 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 239 239 struct drm_framebuffer *fb = plane_state->fb; 240 240 unsigned int dst_pitch = sysfb->fb_pitch; 241 - struct drm_crtc_state *crtc_state = crtc_state = 242 - drm_atomic_get_new_crtc_state(state, plane_state->crtc); 241 + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); 243 242 struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state); 244 243 const struct drm_format_info *dst_format = sysfb_crtc_state->format; 245 244 struct drm_atomic_helper_damage_iter iter;
+1 -1
drivers/gpu/drm/sysfb/vesadrm.c
··· 289 289 break; 290 290 } 291 291 break; 292 - }; 292 + } 293 293 294 294 return 0; 295 295 }
+15 -7
drivers/gpu/drm/tests/drm_exec_test.c
··· 150 150 static void test_prepare_array(struct kunit *test) 151 151 { 152 152 struct drm_exec_priv *priv = test->priv; 153 - struct drm_gem_object gobj1 = { }; 154 - struct drm_gem_object gobj2 = { }; 155 - struct drm_gem_object *array[] = { &gobj1, &gobj2 }; 153 + struct drm_gem_object *gobj1; 154 + struct drm_gem_object *gobj2; 155 + struct drm_gem_object *array[] = { 156 + (gobj1 = kunit_kzalloc(test, sizeof(*gobj1), GFP_KERNEL)), 157 + (gobj2 = kunit_kzalloc(test, sizeof(*gobj2), GFP_KERNEL)), 158 + }; 156 159 struct drm_exec exec; 157 160 int ret; 158 161 159 - drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE); 160 - drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE); 162 + if (!gobj1 || !gobj2) { 163 + KUNIT_FAIL(test, "Failed to allocate GEM objects.\n"); 164 + return; 165 + } 166 + 167 + drm_gem_private_object_init(priv->drm, gobj1, PAGE_SIZE); 168 + drm_gem_private_object_init(priv->drm, gobj2, PAGE_SIZE); 161 169 162 170 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 163 171 drm_exec_until_all_locked(&exec) ··· 174 166 KUNIT_EXPECT_EQ(test, ret, 0); 175 167 drm_exec_fini(&exec); 176 168 177 - drm_gem_private_object_fini(&gobj1); 178 - drm_gem_private_object_fini(&gobj2); 169 + drm_gem_private_object_fini(gobj1); 170 + drm_gem_private_object_fini(gobj2); 179 171 } 180 172 181 173 static void test_multiple_loops(struct kunit *test)
+150 -148
drivers/gpu/drm/tidss/tidss_dispc.c
··· 4 4 * Author: Jyri Sarha <jsarha@ti.com> 5 5 */ 6 6 7 + #include <linux/bitfield.h> 7 8 #include <linux/clk.h> 8 9 #include <linux/delay.h> 9 10 #include <linux/dma-mapping.h> ··· 595 594 * number. For example 7:0 596 595 */ 597 596 598 - static u32 FLD_MASK(u32 start, u32 end) 599 - { 600 - return ((1 << (start - end + 1)) - 1) << end; 601 - } 597 + #define REG_GET(dispc, idx, mask) \ 598 + ((u32)FIELD_GET((mask), dispc_read((dispc), (idx)))) 602 599 603 - static u32 FLD_VAL(u32 val, u32 start, u32 end) 604 - { 605 - return (val << end) & FLD_MASK(start, end); 606 - } 600 + #define REG_FLD_MOD(dispc, idx, val, mask) \ 601 + ({ \ 602 + struct dispc_device *_dispc = (dispc); \ 603 + u32 _idx = (idx); \ 604 + u32 _reg = dispc_read(_dispc, _idx); \ 605 + FIELD_MODIFY((mask), &_reg, (val)); \ 606 + dispc_write(_dispc, _idx, _reg); \ 607 + }) 607 608 608 - static u32 FLD_GET(u32 val, u32 start, u32 end) 609 - { 610 - return (val & FLD_MASK(start, end)) >> end; 611 - } 609 + #define VID_REG_GET(dispc, hw_plane, idx, mask) \ 610 + ((u32)FIELD_GET((mask), dispc_vid_read((dispc), (hw_plane), (idx)))) 612 611 613 - static u32 FLD_MOD(u32 orig, u32 val, u32 start, u32 end) 614 - { 615 - return (orig & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end); 616 - } 612 + #define VID_REG_FLD_MOD(dispc, hw_plane, idx, val, mask) \ 613 + ({ \ 614 + struct dispc_device *_dispc = (dispc); \ 615 + u32 _hw_plane = (hw_plane); \ 616 + u32 _idx = (idx); \ 617 + u32 _reg = dispc_vid_read(_dispc, _hw_plane, _idx); \ 618 + FIELD_MODIFY((mask), &_reg, (val)); \ 619 + dispc_vid_write(_dispc, _hw_plane, _idx, _reg); \ 620 + }) 617 621 618 - static u32 REG_GET(struct dispc_device *dispc, u32 idx, u32 start, u32 end) 619 - { 620 - return FLD_GET(dispc_read(dispc, idx), start, end); 621 - } 622 + #define VP_REG_GET(dispc, vp, idx, mask) \ 623 + ((u32)FIELD_GET((mask), dispc_vp_read((dispc), (vp), (idx)))) 622 624 623 - static void REG_FLD_MOD(struct dispc_device *dispc, u32 idx, u32 val, 624 - u32 start, u32 end) 625 - { 626 - dispc_write(dispc, idx, FLD_MOD(dispc_read(dispc, idx), val, 627 - start, end)); 628 - } 625 + #define VP_REG_FLD_MOD(dispc, vp, idx, val, mask) \ 626 + ({ \ 627 + struct dispc_device *_dispc = (dispc); \ 628 + u32 _vp = (vp); \ 629 + u32 _idx = (idx); \ 630 + u32 _reg = dispc_vp_read(_dispc, _vp, _idx); \ 631 + FIELD_MODIFY((mask), &_reg, (val)); \ 632 + dispc_vp_write(_dispc, _vp, _idx, _reg); \ 633 + }) 629 634 630 - static u32 VID_REG_GET(struct dispc_device *dispc, u32 hw_plane, u32 idx, 631 - u32 start, u32 end) 632 - { 633 - return FLD_GET(dispc_vid_read(dispc, hw_plane, idx), start, end); 634 - } 635 - 636 - static void VID_REG_FLD_MOD(struct dispc_device *dispc, u32 hw_plane, u32 idx, 637 - u32 val, u32 start, u32 end) 638 - { 639 - dispc_vid_write(dispc, hw_plane, idx, 640 - FLD_MOD(dispc_vid_read(dispc, hw_plane, idx), 641 - val, start, end)); 642 - } 643 - 644 - static u32 VP_REG_GET(struct dispc_device *dispc, u32 vp, u32 idx, 645 - u32 start, u32 end) 646 - { 647 - return FLD_GET(dispc_vp_read(dispc, vp, idx), start, end); 648 - } 649 - 650 - static void VP_REG_FLD_MOD(struct dispc_device *dispc, u32 vp, u32 idx, u32 val, 651 - u32 start, u32 end) 652 - { 653 - dispc_vp_write(dispc, vp, idx, FLD_MOD(dispc_vp_read(dispc, vp, idx), 654 - val, start, end)); 655 - } 656 - 657 - __maybe_unused 658 - static u32 OVR_REG_GET(struct dispc_device *dispc, u32 ovr, u32 idx, 659 - u32 start, u32 end) 660 - { 661 - return FLD_GET(dispc_ovr_read(dispc, ovr, idx), start, end); 662 - } 663 - 664 - static void OVR_REG_FLD_MOD(struct dispc_device *dispc, u32 ovr, u32 idx, 665 - u32 val, u32 start, u32 end) 666 - { 667 - dispc_ovr_write(dispc, ovr, idx, 668 - FLD_MOD(dispc_ovr_read(dispc, ovr, idx), 669 - val, start, end)); 670 - } 635 + #define OVR_REG_FLD_MOD(dispc, ovr, idx, val, mask) \ 636 + ({ \ 637 + struct dispc_device *_dispc = (dispc); \ 638 + u32 _ovr = (ovr); \ 639 + u32 _idx = (idx); \ 640 + u32 _reg = dispc_ovr_read(_dispc, _ovr, _idx); \ 641 + FIELD_MODIFY((mask), &_reg, (val)); \ 642 + dispc_ovr_write(_dispc, _ovr, _idx, _reg); \ 643 + }) 671 644 672 645 static dispc_irq_t dispc_vp_irq_from_raw(u32 stat, u32 hw_videoport) 673 646 { ··· 1114 1139 v = 3; 1115 1140 } 1116 1141 1117 - VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8); 1142 + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 1143 + DISPC_VP_CONTROL_DATALINES_MASK); 1118 1144 } 1119 1145 1120 1146 static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport, ··· 1138 1162 1139 1163 oldi_cfg |= BIT(7); /* DEPOL */ 1140 1164 1141 - oldi_cfg = FLD_MOD(oldi_cfg, fmt->am65x_oldi_mode_reg_val, 3, 1); 1165 + FIELD_MODIFY(DISPC_VP_DSS_OLDI_CFG_MAP_MASK, &oldi_cfg, 1166 + fmt->am65x_oldi_mode_reg_val); 1142 1167 1143 1168 oldi_cfg |= BIT(12); /* SOFTRST */ 1144 1169 ··· 1201 1224 vbp = mode->crtc_vtotal - mode->crtc_vsync_end; 1202 1225 1203 1226 dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H, 1204 - FLD_VAL(hsw - 1, 7, 0) | 1205 - FLD_VAL(hfp - 1, 19, 8) | 1206 - FLD_VAL(hbp - 1, 31, 20)); 1227 + FIELD_PREP(DISPC_VP_TIMING_H_SYNC_PULSE_MASK, hsw - 1) | 1228 + FIELD_PREP(DISPC_VP_TIMING_H_FRONT_PORCH_MASK, hfp - 1) | 1229 + FIELD_PREP(DISPC_VP_TIMING_H_BACK_PORCH_MASK, hbp - 1)); 1207 1230 1208 1231 dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_V, 1209 - FLD_VAL(vsw - 1, 7, 0) | 1210 - FLD_VAL(vfp, 19, 8) | 1211 - FLD_VAL(vbp, 31, 20)); 1232 + FIELD_PREP(DISPC_VP_TIMING_V_SYNC_PULSE_MASK, vsw - 1) | 1233 + FIELD_PREP(DISPC_VP_TIMING_V_FRONT_PORCH_MASK, vfp) | 1234 + FIELD_PREP(DISPC_VP_TIMING_V_BACK_PORCH_MASK, vbp)); 1212 1235 1213 1236 ivs = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); 1214 1237 ··· 1231 1254 ieo = false; 1232 1255 1233 1256 dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ, 1234 - FLD_VAL(align, 18, 18) | 1235 - FLD_VAL(onoff, 17, 17) | 1236 - FLD_VAL(rf, 16, 16) | 1237 - FLD_VAL(ieo, 15, 15) | 1238 - FLD_VAL(ipc, 14, 14) | 1239 - FLD_VAL(ihs, 13, 13) | 1240 - FLD_VAL(ivs, 12, 12)); 1257 + FIELD_PREP(DISPC_VP_POL_FREQ_ALIGN_MASK, align) | 1258 + FIELD_PREP(DISPC_VP_POL_FREQ_ONOFF_MASK, onoff) | 1259 + FIELD_PREP(DISPC_VP_POL_FREQ_RF_MASK, rf) | 1260 + FIELD_PREP(DISPC_VP_POL_FREQ_IEO_MASK, ieo) | 1261 + FIELD_PREP(DISPC_VP_POL_FREQ_IPC_MASK, ipc) | 1262 + FIELD_PREP(DISPC_VP_POL_FREQ_IHS_MASK, ihs) | 1263 + FIELD_PREP(DISPC_VP_POL_FREQ_IVS_MASK, ivs)); 1241 1264 1242 1265 dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN, 1243 - FLD_VAL(mode->crtc_hdisplay - 1, 11, 0) | 1244 - FLD_VAL(mode->crtc_vdisplay - 1, 27, 16)); 1266 + FIELD_PREP(DISPC_VP_SIZE_SCREEN_HDISPLAY_MASK, 1267 + mode->crtc_hdisplay - 1) | 1268 + FIELD_PREP(DISPC_VP_SIZE_SCREEN_VDISPLAY_MASK, 1269 + mode->crtc_vdisplay - 1)); 1245 1270 1246 - VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0); 1271 + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 1272 + DISPC_VP_CONTROL_ENABLE_MASK); 1247 1273 } 1248 1274 1249 1275 void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport) 1250 1276 { 1251 - VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, 0, 0); 1277 + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, 1278 + DISPC_VP_CONTROL_ENABLE_MASK); 1252 1279 } 1253 1280 1254 1281 void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport) ··· 1266 1285 1267 1286 bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport) 1268 1287 { 1269 - return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5); 1288 + return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 1289 + DISPC_VP_CONTROL_GOBIT_MASK); 1270 1290 } 1271 1291 1272 1292 void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport) 1273 1293 { 1274 - WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5)); 1275 - VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 5, 5); 1294 + WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 1295 + DISPC_VP_CONTROL_GOBIT_MASK)); 1296 + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 1297 + DISPC_VP_CONTROL_GOBIT_MASK); 1276 1298 } 1277 1299 1278 1300 enum c8_to_c12_mode { C8_TO_C12_REPLICATE, C8_TO_C12_MAX, C8_TO_C12_MIN }; ··· 1475 1491 u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id; 1476 1492 1477 1493 OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), 1478 - hw_id, 4, 1); 1479 - OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), 1480 - x, 17, 6); 1481 - OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), 1482 - y, 30, 19); 1494 + hw_id, DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK); 1495 + OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), x, 1496 + DISPC_OVR_ATTRIBUTES_POSX_MASK); 1497 + OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), y, 1498 + DISPC_OVR_ATTRIBUTES_POSY_MASK); 1483 1499 } 1484 1500 1485 1501 static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc, ··· 1489 1505 u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id; 1490 1506 1491 1507 OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), 1492 - hw_id, 4, 1); 1493 - OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), 1494 - x, 13, 0); 1495 - OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), 1496 - y, 29, 16); 1508 + hw_id, DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK); 1509 + OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), x, 1510 + DISPC_OVR_ATTRIBUTES2_POSX_MASK); 1511 + OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), y, 1512 + DISPC_OVR_ATTRIBUTES2_POSY_MASK); 1497 1513 } 1498 1514 1499 1515 void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane, ··· 1528 1544 return; 1529 1545 1530 1546 OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), 1531 - !!enable, 0, 0); 1547 + !!enable, DISPC_OVR_ATTRIBUTES_ENABLE_MASK); 1532 1548 } 1533 1549 1534 1550 /* CSC */ ··· 1564 1580 static 1565 1581 void dispc_csc_offset_regval(const struct dispc_csc_coef *csc, u32 *regval) 1566 1582 { 1567 - #define OVAL(x, y) (FLD_VAL(x, 15, 3) | FLD_VAL(y, 31, 19)) 1583 + #define OVAL(x, y) (FIELD_PREP(GENMASK(15, 3), x) | FIELD_PREP(GENMASK(31, 19), y)) 1568 1584 regval[5] = OVAL(csc->preoffset[0], csc->preoffset[1]); 1569 1585 regval[6] = OVAL(csc->preoffset[2], csc->postoffset[0]); 1570 1586 regval[7] = OVAL(csc->postoffset[1], csc->postoffset[2]); 1571 1587 #undef OVAL 1572 1588 } 1573 1589 1574 - #define CVAL(x, y) (FLD_VAL(x, 10, 0) | FLD_VAL(y, 26, 16)) 1590 + #define CVAL(x, y) (FIELD_PREP(GENMASK(10, 0), x) | FIELD_PREP(GENMASK(26, 16), y)) 1575 1591 static 1576 1592 void dispc_csc_yuv2rgb_regval(const struct dispc_csc_coef *csc, u32 *regval) 1577 1593 { ··· 1751 1767 static void dispc_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane, 1752 1768 bool enable) 1753 1769 { 1754 - VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 9, 9); 1770 + VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 1771 + DISPC_VID_ATTRIBUTES_COLORCONVENABLE_MASK); 1755 1772 } 1756 1773 1757 1774 /* SCALER */ ··· 1811 1826 1812 1827 c1 = coefs->c1[phase]; 1813 1828 c2 = coefs->c2[phase]; 1814 - c12 = FLD_VAL(c1, 19, 10) | FLD_VAL(c2, 29, 20); 1829 + c12 = FIELD_PREP(GENMASK(19, 10), c1) | FIELD_PREP(GENMASK(29, 20), 1830 + c2); 1815 1831 1816 1832 dispc_vid_write(dispc, hw_plane, reg, c12); 1817 1833 } ··· 2009 2023 u32 fourcc) 2010 2024 { 2011 2025 /* HORIZONTAL RESIZE ENABLE */ 2012 - VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 2013 - sp->scale_x, 7, 7); 2026 + VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->scale_x, 2027 + DISPC_VID_ATTRIBUTES_HRESIZEENABLE_MASK); 2014 2028 2015 2029 /* VERTICAL RESIZE ENABLE */ 2016 - VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 2017 - sp->scale_y, 8, 8); 2030 + VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->scale_y, 2031 + DISPC_VID_ATTRIBUTES_VRESIZEENABLE_MASK); 2018 2032 2019 2033 /* Skip the rest if no scaling is used */ 2020 2034 if (!sp->scale_x && !sp->scale_y) 2021 2035 return; 2022 2036 2023 2037 /* VERTICAL 5-TAPS */ 2024 - VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 2025 - sp->five_taps, 21, 21); 2038 + VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->five_taps, 2039 + DISPC_VID_ATTRIBUTES_VERTICALTAPS_MASK); 2026 2040 2027 2041 if (dispc_fourcc_is_yuv(fourcc)) { 2028 2042 if (sp->scale_x) { ··· 2112 2126 if (dispc_color_formats[i].fourcc == fourcc) { 2113 2127 VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 2114 2128 dispc_color_formats[i].dss_code, 2115 - 6, 1); 2129 + DISPC_VID_ATTRIBUTES_FORMAT_MASK); 2116 2130 return; 2117 2131 } 2118 2132 } ··· 2234 2248 dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)dma_addr >> 32); 2235 2249 2236 2250 dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE, 2237 - (scale.in_w - 1) | ((scale.in_h - 1) << 16)); 2251 + FIELD_PREP(DISPC_VID_PICTURE_SIZE_MEMSIZEY_MASK, scale.in_h - 1) | 2252 + FIELD_PREP(DISPC_VID_PICTURE_SIZE_MEMSIZEX_MASK, scale.in_w - 1)); 2238 2253 2239 2254 /* For YUV422 format we use the macropixel size for pixel inc */ 2240 2255 if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY) ··· 2272 2285 2273 2286 if (!lite) { 2274 2287 dispc_vid_write(dispc, hw_plane, DISPC_VID_SIZE, 2275 - (state->crtc_w - 1) | 2276 - ((state->crtc_h - 1) << 16)); 2288 + FIELD_PREP(DISPC_VID_SIZE_SIZEY_MASK, 2289 + state->crtc_h - 1) | 2290 + FIELD_PREP(DISPC_VID_SIZE_SIZEX_MASK, 2291 + state->crtc_w - 1)); 2277 2292 2278 2293 dispc_vid_set_scaling(dispc, hw_plane, &scale, fourcc); 2279 2294 } ··· 2289 2300 } 2290 2301 2291 2302 dispc_vid_write(dispc, hw_plane, DISPC_VID_GLOBAL_ALPHA, 2292 - 0xFF & (state->alpha >> 8)); 2303 + FIELD_PREP(DISPC_VID_GLOBAL_ALPHA_GLOBALALPHA_MASK, 2304 + state->alpha >> 8)); 2293 2305 2294 2306 if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) 2295 2307 VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1, 2296 - 28, 28); 2308 + DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK); 2297 2309 else 2298 2310 VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0, 2299 - 28, 28); 2311 + DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK); 2300 2312 } 2301 2313 2302 2314 void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable) 2303 2315 { 2304 - VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0); 2316 + VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 2317 + DISPC_VID_ATTRIBUTES_ENABLE_MASK); 2305 2318 } 2306 2319 2307 2320 static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane) 2308 2321 { 2309 - return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0); 2322 + return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 2323 + DISPC_VID_BUF_SIZE_STATUS_BUFSIZE_MASK); 2310 2324 } 2311 2325 2312 2326 static void dispc_vid_set_mflag_threshold(struct dispc_device *dispc, 2313 2327 u32 hw_plane, u32 low, u32 high) 2314 2328 { 2315 2329 dispc_vid_write(dispc, hw_plane, DISPC_VID_MFLAG_THRESHOLD, 2316 - FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0)); 2330 + FIELD_PREP(DISPC_VID_MFLAG_THRESHOLD_HT_MFLAG_MASK, high) | 2331 + FIELD_PREP(DISPC_VID_MFLAG_THRESHOLD_LT_MFLAG_MASK, low)); 2317 2332 } 2318 2333 2319 2334 static void dispc_vid_set_buf_threshold(struct dispc_device *dispc, 2320 2335 u32 hw_plane, u32 low, u32 high) 2321 2336 { 2322 2337 dispc_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD, 2323 - FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0)); 2338 + FIELD_PREP(DISPC_VID_BUF_THRESHOLD_BUFHIGHTHRESHOLD_MASK, 2339 + high) | 2340 + FIELD_PREP(DISPC_VID_BUF_THRESHOLD_BUFLOWTHRESHOLD_MASK, 2341 + low)); 2324 2342 } 2325 2343 2326 2344 static void dispc_k2g_plane_init(struct dispc_device *dispc) ··· 2337 2341 dev_dbg(dispc->dev, "%s()\n", __func__); 2338 2342 2339 2343 /* MFLAG_CTRL = ENABLED */ 2340 - REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0); 2344 + REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 2345 + DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK); 2341 2346 /* MFLAG_START = MFLAGNORMALSTARTMODE */ 2342 - REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6); 2347 + REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 2348 + DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK); 2343 2349 2344 2350 for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) { 2345 2351 u32 size = dispc_vid_get_fifo_size(dispc, hw_plane); ··· 2378 2380 * register is ignored. 2379 2381 */ 2380 2382 VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1, 2381 - 19, 19); 2383 + DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK); 2382 2384 } 2383 2385 } 2384 2386 ··· 2390 2392 2391 2393 dev_dbg(dispc->dev, "%s()\n", __func__); 2392 2394 2393 - REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0); 2394 - REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3); 2395 + REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, DSS_CBA_CFG_PRI_LO_MASK); 2396 + REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, DSS_CBA_CFG_PRI_HI_MASK); 2395 2397 2396 2398 /* MFLAG_CTRL = ENABLED */ 2397 - REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0); 2399 + REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 2400 + DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK); 2398 2401 /* MFLAG_START = MFLAGNORMALSTARTMODE */ 2399 - REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6); 2402 + REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 2403 + DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK); 2400 2404 2401 2405 for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) { 2402 2406 u32 size = dispc_vid_get_fifo_size(dispc, hw_plane); ··· 2431 2431 2432 2432 /* Prefech up to PRELOAD value */ 2433 2433 VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0, 2434 - 19, 19); 2434 + DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK); 2435 2435 } 2436 2436 } 2437 2437 ··· 2461 2461 2462 2462 /* Enable the gamma Shadow bit-field for all VPs*/ 2463 2463 for (i = 0; i < dispc->feat->num_vps; i++) 2464 - VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2, 2); 2464 + VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2465 + DISPC_VP_CONFIG_GAMMAENABLE_MASK); 2465 2466 } 2466 2467 2467 2468 static void dispc_initial_config(struct dispc_device *dispc) ··· 2473 2472 /* Note: Hardcoded DPI routing on J721E for now */ 2474 2473 if (dispc->feat->subrev == DISPC_J721E) { 2475 2474 dispc_write(dispc, DISPC_CONNECTIONS, 2476 - FLD_VAL(2, 3, 0) | /* VP1 to DPI0 */ 2477 - FLD_VAL(8, 7, 4) /* VP3 to DPI1 */ 2475 + FIELD_PREP(DISPC_CONNECTIONS_DPI_0_CONN_MASK, 2) | /* VP1 to DPI0 */ 2476 + FIELD_PREP(DISPC_CONNECTIONS_DPI_1_CONN_MASK, 8) /* VP3 to DPI1 */ 2478 2477 ); 2479 2478 } 2480 2479 } ··· 2652 2651 cpr->m[CSC_BB] = dispc_S31_32_to_s2_8(ctm->matrix[8]); 2653 2652 } 2654 2653 2655 - #define CVAL(xR, xG, xB) (FLD_VAL(xR, 9, 0) | FLD_VAL(xG, 20, 11) | \ 2656 - FLD_VAL(xB, 31, 22)) 2654 + #define CVAL(xR, xG, xB) (FIELD_PREP(GENMASK(9, 0), xR) | FIELD_PREP(GENMASK(20, 11), xG) | \ 2655 + FIELD_PREP(GENMASK(31, 22), xB)) 2657 2656 2658 2657 static void dispc_k2g_vp_csc_cpr_regval(const struct dispc_csc_coef *csc, 2659 2658 u32 *regval) ··· 2695 2694 cprenable = 1; 2696 2695 } 2697 2696 2698 - VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, 2699 - cprenable, 15, 15); 2697 + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, cprenable, 2698 + DISPC_VP_CONFIG_CPR_MASK); 2700 2699 } 2701 2700 2702 2701 static s16 dispc_S31_32_to_s3_8(s64 coef) ··· 2761 2760 colorconvenable = 1; 2762 2761 } 2763 2762 2764 - VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, 2765 - colorconvenable, 24, 24); 2763 + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, colorconvenable, 2764 + DISPC_VP_CONFIG_COLORCONVENABLE_MASK); 2766 2765 } 2767 2766 2768 2767 static void dispc_vp_set_color_mgmt(struct dispc_device *dispc, ··· 2817 2816 2818 2817 clk_prepare_enable(dispc->fclk); 2819 2818 2820 - if (REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0) 2819 + if (REG_GET(dispc, DSS_SYSSTATUS, DSS_SYSSTATUS_DISPC_FUNC_RESETDONE) == 0) 2821 2820 dev_warn(dispc->dev, "DSS FUNC RESET not done!\n"); 2822 2821 2823 2822 dev_dbg(dispc->dev, "OMAP DSS7 rev 0x%x\n", 2824 2823 dispc_read(dispc, DSS_REVISION)); 2825 2824 2826 2825 dev_dbg(dispc->dev, "VP RESETDONE %d,%d,%d\n", 2827 - REG_GET(dispc, DSS_SYSSTATUS, 1, 1), 2828 - REG_GET(dispc, DSS_SYSSTATUS, 2, 2), 2829 - REG_GET(dispc, DSS_SYSSTATUS, 3, 3)); 2826 + REG_GET(dispc, DSS_SYSSTATUS, GENMASK(1, 1)), 2827 + REG_GET(dispc, DSS_SYSSTATUS, GENMASK(2, 2)), 2828 + REG_GET(dispc, DSS_SYSSTATUS, GENMASK(3, 3))); 2830 2829 2831 2830 if (dispc->feat->subrev == DISPC_AM625 || 2832 2831 dispc->feat->subrev == DISPC_AM65X) 2833 2832 dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n", 2834 - REG_GET(dispc, DSS_SYSSTATUS, 5, 5), 2835 - REG_GET(dispc, DSS_SYSSTATUS, 6, 6), 2836 - REG_GET(dispc, DSS_SYSSTATUS, 7, 7)); 2833 + REG_GET(dispc, DSS_SYSSTATUS, GENMASK(5, 5)), 2834 + REG_GET(dispc, DSS_SYSSTATUS, GENMASK(6, 6)), 2835 + REG_GET(dispc, DSS_SYSSTATUS, GENMASK(7, 7))); 2837 2836 2838 2837 dev_dbg(dispc->dev, "DISPC IDLE %d\n", 2839 - REG_GET(dispc, DSS_SYSSTATUS, 9, 9)); 2838 + REG_GET(dispc, DSS_SYSSTATUS, DSS_SYSSTATUS_DISPC_IDLE_STATUS)); 2840 2839 2841 2840 dispc_initial_config(dispc); 2842 2841 ··· 2913 2912 spin_unlock_irqrestore(&dispc->tidss->irq_lock, flags); 2914 2913 2915 2914 for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx) 2916 - VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0); 2915 + VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 2916 + DISPC_VP_CONTROL_ENABLE_MASK); 2917 2917 } 2918 2918 2919 2919 static int dispc_softreset(struct dispc_device *dispc) ··· 2928 2926 } 2929 2927 2930 2928 /* Soft reset */ 2931 - REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1); 2929 + REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, DSS_SYSCONFIG_SOFTRESET_MASK); 2932 2930 /* Wait for reset to complete */ 2933 2931 ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS, 2934 2932 val, val & 1, 100, 5000);
+76
drivers/gpu/drm/tidss/tidss_dispc_regs.h
··· 56 56 57 57 #define DSS_REVISION REG(DSS_REVISION) 58 58 #define DSS_SYSCONFIG REG(DSS_SYSCONFIG) 59 + #define DSS_SYSCONFIG_SOFTRESET_MASK GENMASK(1, 1) 60 + 59 61 #define DSS_SYSSTATUS REG(DSS_SYSSTATUS) 62 + #define DSS_SYSSTATUS_DISPC_IDLE_STATUS GENMASK(9, 9) 63 + #define DSS_SYSSTATUS_DISPC_FUNC_RESETDONE GENMASK(0, 0) 64 + 60 65 #define DISPC_IRQ_EOI REG(DISPC_IRQ_EOI) 61 66 #define DISPC_IRQSTATUS_RAW REG(DISPC_IRQSTATUS_RAW) 62 67 #define DISPC_IRQSTATUS REG(DISPC_IRQSTATUS) ··· 75 70 #define WB_IRQSTATUS REG(WB_IRQSTATUS) 76 71 77 72 #define DISPC_GLOBAL_MFLAG_ATTRIBUTE REG(DISPC_GLOBAL_MFLAG_ATTRIBUTE) 73 + #define DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK GENMASK(6, 6) 74 + #define DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK GENMASK(1, 0) 75 + 78 76 #define DISPC_GLOBAL_OUTPUT_ENABLE REG(DISPC_GLOBAL_OUTPUT_ENABLE) 79 77 #define DISPC_GLOBAL_BUFFER REG(DISPC_GLOBAL_BUFFER) 80 78 #define DSS_CBA_CFG REG(DSS_CBA_CFG) 79 + #define DSS_CBA_CFG_PRI_HI_MASK GENMASK(5, 3) 80 + #define DSS_CBA_CFG_PRI_LO_MASK GENMASK(2, 0) 81 + 81 82 #define DISPC_DBG_CONTROL REG(DISPC_DBG_CONTROL) 82 83 #define DISPC_DBG_STATUS REG(DISPC_DBG_STATUS) 83 84 #define DISPC_CLKGATING_DISABLE REG(DISPC_CLKGATING_DISABLE) ··· 99 88 #define FBDC_CONSTANT_COLOR_0 REG(FBDC_CONSTANT_COLOR_0) 100 89 #define FBDC_CONSTANT_COLOR_1 REG(FBDC_CONSTANT_COLOR_1) 101 90 #define DISPC_CONNECTIONS REG(DISPC_CONNECTIONS) 91 + #define DISPC_CONNECTIONS_DPI_1_CONN_MASK GENMASK(7, 4) 92 + #define DISPC_CONNECTIONS_DPI_0_CONN_MASK GENMASK(3, 0) 93 + 102 94 #define DISPC_MSS_VP1 REG(DISPC_MSS_VP1) 103 95 #define DISPC_MSS_VP3 REG(DISPC_MSS_VP3) 104 96 ··· 116 102 #define DISPC_VID_ACCUV2_0 0x18 117 103 #define DISPC_VID_ACCUV2_1 0x1c 118 104 #define DISPC_VID_ATTRIBUTES 0x20 105 + #define DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK GENMASK(28, 28) 106 + #define DISPC_VID_ATTRIBUTES_VERTICALTAPS_MASK GENMASK(21, 21) 107 + #define DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK GENMASK(19, 19) 108 + #define DISPC_VID_ATTRIBUTES_COLORCONVENABLE_MASK GENMASK(9, 9) 109 + #define DISPC_VID_ATTRIBUTES_VRESIZEENABLE_MASK GENMASK(8, 8) 110 + #define DISPC_VID_ATTRIBUTES_HRESIZEENABLE_MASK GENMASK(7, 7) 111 + #define DISPC_VID_ATTRIBUTES_FORMAT_MASK GENMASK(6, 1) 112 + #define DISPC_VID_ATTRIBUTES_ENABLE_MASK GENMASK(0, 0) 113 + 119 114 #define DISPC_VID_ATTRIBUTES2 0x24 120 115 #define DISPC_VID_BA_0 0x28 121 116 #define DISPC_VID_BA_1 0x2c 122 117 #define DISPC_VID_BA_UV_0 0x30 123 118 #define DISPC_VID_BA_UV_1 0x34 124 119 #define DISPC_VID_BUF_SIZE_STATUS 0x38 120 + #define DISPC_VID_BUF_SIZE_STATUS_BUFSIZE_MASK GENMASK(15, 0) 121 + 125 122 #define DISPC_VID_BUF_THRESHOLD 0x3c 123 + #define DISPC_VID_BUF_THRESHOLD_BUFHIGHTHRESHOLD_MASK GENMASK(31, 16) 124 + #define DISPC_VID_BUF_THRESHOLD_BUFLOWTHRESHOLD_MASK GENMASK(15, 0) 125 + 126 126 #define DISPC_VID_CSC_COEF(n) (0x40 + (n) * 4) 127 127 128 128 #define DISPC_VID_FIRH 0x5c ··· 165 137 #define DISPC_VID_FIR_COEF_V12_C(phase) (0x1bc + (phase) * 4) 166 138 167 139 #define DISPC_VID_GLOBAL_ALPHA 0x1fc 140 + #define DISPC_VID_GLOBAL_ALPHA_GLOBALALPHA_MASK GENMASK(7, 0) 141 + 168 142 #define DISPC_VID_K2G_IRQENABLE 0x200 /* K2G */ 169 143 #define DISPC_VID_K2G_IRQSTATUS 0x204 /* K2G */ 170 144 #define DISPC_VID_MFLAG_THRESHOLD 0x208 145 + #define DISPC_VID_MFLAG_THRESHOLD_HT_MFLAG_MASK GENMASK(31, 16) 146 + #define DISPC_VID_MFLAG_THRESHOLD_LT_MFLAG_MASK GENMASK(15, 0) 147 + 171 148 #define DISPC_VID_PICTURE_SIZE 0x20c 149 + #define DISPC_VID_PICTURE_SIZE_MEMSIZEY_MASK GENMASK(27, 16) 150 + #define DISPC_VID_PICTURE_SIZE_MEMSIZEX_MASK GENMASK(11, 0) 151 + 172 152 #define DISPC_VID_PIXEL_INC 0x210 173 153 #define DISPC_VID_K2G_POSITION 0x214 /* K2G */ 174 154 #define DISPC_VID_PRELOAD 0x218 175 155 #define DISPC_VID_ROW_INC 0x21c 176 156 #define DISPC_VID_SIZE 0x220 157 + #define DISPC_VID_SIZE_SIZEY_MASK GENMASK(27, 16) 158 + #define DISPC_VID_SIZE_SIZEX_MASK GENMASK(11, 0) 159 + 177 160 #define DISPC_VID_BA_EXT_0 0x22c 178 161 #define DISPC_VID_BA_EXT_1 0x230 179 162 #define DISPC_VID_BA_UV_EXT_0 0x234 ··· 212 173 #define DISPC_OVR_TRANS_COLOR_MIN 0x18 213 174 #define DISPC_OVR_TRANS_COLOR_MIN2 0x1c 214 175 #define DISPC_OVR_ATTRIBUTES(n) (0x20 + (n) * 4) 176 + #define DISPC_OVR_ATTRIBUTES_POSY_MASK GENMASK(30, 19) 177 + #define DISPC_OVR_ATTRIBUTES_POSX_MASK GENMASK(17, 6) 178 + #define DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK GENMASK(4, 1) 179 + #define DISPC_OVR_ATTRIBUTES_ENABLE_MASK GENMASK(0, 0) 180 + 215 181 #define DISPC_OVR_ATTRIBUTES2(n) (0x34 + (n) * 4) /* J721E */ 182 + #define DISPC_OVR_ATTRIBUTES2_POSY_MASK GENMASK(29, 16) 183 + #define DISPC_OVR_ATTRIBUTES2_POSX_MASK GENMASK(13, 0) 184 + 216 185 /* VP */ 217 186 218 187 #define DISPC_VP_CONFIG 0x0 188 + #define DISPC_VP_CONFIG_COLORCONVENABLE_MASK GENMASK(24, 24) 189 + #define DISPC_VP_CONFIG_CPR_MASK GENMASK(15, 15) 190 + #define DISPC_VP_CONFIG_GAMMAENABLE_MASK GENMASK(2, 2) 191 + 219 192 #define DISPC_VP_CONTROL 0x4 193 + #define DISPC_VP_CONTROL_DATALINES_MASK GENMASK(10, 8) 194 + #define DISPC_VP_CONTROL_GOBIT_MASK GENMASK(5, 5) 195 + #define DISPC_VP_CONTROL_ENABLE_MASK GENMASK(0, 0) 196 + 220 197 #define DISPC_VP_CSC_COEF0 0x8 221 198 #define DISPC_VP_CSC_COEF1 0xc 222 199 #define DISPC_VP_CSC_COEF2 0x10 ··· 244 189 #define DISPC_VP_DATA_CYCLE_2 0x1c 245 190 #define DISPC_VP_LINE_NUMBER 0x44 246 191 #define DISPC_VP_POL_FREQ 0x4c 192 + #define DISPC_VP_POL_FREQ_ALIGN_MASK GENMASK(18, 18) 193 + #define DISPC_VP_POL_FREQ_ONOFF_MASK GENMASK(17, 17) 194 + #define DISPC_VP_POL_FREQ_RF_MASK GENMASK(16, 16) 195 + #define DISPC_VP_POL_FREQ_IEO_MASK GENMASK(15, 15) 196 + #define DISPC_VP_POL_FREQ_IPC_MASK GENMASK(14, 14) 197 + #define DISPC_VP_POL_FREQ_IHS_MASK GENMASK(13, 13) 198 + #define DISPC_VP_POL_FREQ_IVS_MASK GENMASK(12, 12) 199 + 247 200 #define DISPC_VP_SIZE_SCREEN 0x50 201 + #define DISPC_VP_SIZE_SCREEN_HDISPLAY_MASK GENMASK(11, 0) 202 + #define DISPC_VP_SIZE_SCREEN_VDISPLAY_MASK GENMASK(27, 16) 203 + 248 204 #define DISPC_VP_TIMING_H 0x54 205 + #define DISPC_VP_TIMING_H_SYNC_PULSE_MASK GENMASK(7, 0) 206 + #define DISPC_VP_TIMING_H_FRONT_PORCH_MASK GENMASK(19, 8) 207 + #define DISPC_VP_TIMING_H_BACK_PORCH_MASK GENMASK(31, 20) 208 + 249 209 #define DISPC_VP_TIMING_V 0x58 210 + #define DISPC_VP_TIMING_V_SYNC_PULSE_MASK GENMASK(7, 0) 211 + #define DISPC_VP_TIMING_V_FRONT_PORCH_MASK GENMASK(19, 8) 212 + #define DISPC_VP_TIMING_V_BACK_PORCH_MASK GENMASK(31, 20) 213 + 250 214 #define DISPC_VP_CSC_COEF3 0x5c 251 215 #define DISPC_VP_CSC_COEF4 0x60 252 216 #define DISPC_VP_CSC_COEF5 0x64 ··· 294 220 #define DISPC_VP_SAFETY_LFSR_SEED 0x110 295 221 #define DISPC_VP_GAMMA_TABLE 0x120 296 222 #define DISPC_VP_DSS_OLDI_CFG 0x160 223 + #define DISPC_VP_DSS_OLDI_CFG_MAP_MASK GENMASK(3, 1) 224 + 297 225 #define DISPC_VP_DSS_OLDI_STATUS 0x164 298 226 #define DISPC_VP_DSS_OLDI_LB 0x168 299 227 #define DISPC_VP_DSS_MERGE_SPLIT 0x16c /* J721E */
+13 -1
drivers/gpu/drm/v3d/v3d_drv.c
··· 157 157 static void 158 158 v3d_postclose(struct drm_device *dev, struct drm_file *file) 159 159 { 160 + struct v3d_dev *v3d = to_v3d_dev(dev); 160 161 struct v3d_file_priv *v3d_priv = file->driver_priv; 162 + unsigned long irqflags; 161 163 enum v3d_queue q; 162 164 163 - for (q = 0; q < V3D_MAX_QUEUES; q++) 165 + for (q = 0; q < V3D_MAX_QUEUES; q++) { 166 + struct v3d_queue_state *queue = &v3d->queue[q]; 167 + struct v3d_job *job = queue->active_job; 168 + 164 169 drm_sched_entity_destroy(&v3d_priv->sched_entity[q]); 170 + 171 + if (job && job->base.entity == &v3d_priv->sched_entity[q]) { 172 + spin_lock_irqsave(&queue->queue_lock, irqflags); 173 + job->file_priv = NULL; 174 + spin_unlock_irqrestore(&queue->queue_lock, irqflags); 175 + } 176 + } 165 177 166 178 v3d_perfmon_close_file(v3d_priv); 167 179 kfree(v3d_priv);
+8 -14
drivers/gpu/drm/v3d/v3d_drv.h
··· 58 58 59 59 /* Stores the GPU stats for this queue in the global context. */ 60 60 struct v3d_stats stats; 61 + 62 + /* Currently active job for this queue */ 63 + struct v3d_job *active_job; 64 + spinlock_t queue_lock; 61 65 }; 62 66 63 67 /* Performance monitor object. The perform lifetime is controlled by userspace ··· 163 159 164 160 struct work_struct overflow_mem_work; 165 161 166 - struct v3d_bin_job *bin_job; 167 - struct v3d_render_job *render_job; 168 - struct v3d_tfu_job *tfu_job; 169 - struct v3d_csd_job *csd_job; 170 - 171 162 struct v3d_queue_state queue[V3D_MAX_QUEUES]; 172 - 173 - /* Spinlock used to synchronize the overflow memory 174 - * management against bin job submission. 175 - */ 176 - spinlock_t job_lock; 177 163 178 164 /* Used to track the active perfmon if any. */ 179 165 struct v3d_perfmon *active_perfmon; ··· 321 327 struct v3d_perfmon *perfmon; 322 328 323 329 /* File descriptor of the process that submitted the job that could be used 324 - * for collecting stats by process of GPU usage. 330 + * to collect per-process information about the GPU. 325 331 */ 326 - struct drm_file *file; 332 + struct v3d_file_priv *file_priv; 327 333 328 334 /* Callback for the freeing of the job on refcount going to 0. */ 329 335 void (*free)(struct kref *ref); ··· 564 570 565 571 /* v3d_fence.c */ 566 572 extern const struct dma_fence_ops v3d_fence_ops; 567 - struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); 573 + struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q); 568 574 569 575 /* v3d_gem.c */ 570 576 int v3d_gem_init(struct drm_device *dev); ··· 608 614 unsigned int count); 609 615 void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info, 610 616 unsigned int count); 611 - void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue); 617 + void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q); 612 618 int v3d_sched_init(struct v3d_dev *v3d); 613 619 void v3d_sched_fini(struct v3d_dev *v3d); 614 620
+6 -5
drivers/gpu/drm/v3d/v3d_fence.c
··· 3 3 4 4 #include "v3d_drv.h" 5 5 6 - struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue) 6 + struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q) 7 7 { 8 + struct v3d_queue_state *queue = &v3d->queue[q]; 8 9 struct v3d_fence *fence; 9 10 10 11 fence = kzalloc(sizeof(*fence), GFP_KERNEL); ··· 13 12 return ERR_PTR(-ENOMEM); 14 13 15 14 fence->dev = &v3d->drm; 16 - fence->queue = queue; 17 - fence->seqno = ++v3d->queue[queue].emit_seqno; 18 - dma_fence_init(&fence->base, &v3d_fence_ops, &v3d->job_lock, 19 - v3d->queue[queue].fence_context, fence->seqno); 15 + fence->queue = q; 16 + fence->seqno = ++queue->emit_seqno; 17 + dma_fence_init(&fence->base, &v3d_fence_ops, &queue->queue_lock, 18 + queue->fence_context, fence->seqno); 20 19 21 20 return &fence->base; 22 21 }
+5 -5
drivers/gpu/drm/v3d/v3d_gem.c
··· 271 271 queue->fence_context = dma_fence_context_alloc(1); 272 272 memset(&queue->stats, 0, sizeof(queue->stats)); 273 273 seqcount_init(&queue->stats.lock); 274 + 275 + spin_lock_init(&queue->queue_lock); 274 276 } 275 277 276 278 spin_lock_init(&v3d->mm_lock); 277 - spin_lock_init(&v3d->job_lock); 278 279 ret = drmm_mutex_init(dev, &v3d->bo_lock); 279 280 if (ret) 280 281 return ret; ··· 325 324 v3d_gem_destroy(struct drm_device *dev) 326 325 { 327 326 struct v3d_dev *v3d = to_v3d_dev(dev); 327 + enum v3d_queue q; 328 328 329 329 v3d_sched_fini(v3d); 330 330 v3d_gemfs_fini(v3d); ··· 333 331 /* Waiting for jobs to finish would need to be done before 334 332 * unregistering V3D. 335 333 */ 336 - WARN_ON(v3d->bin_job); 337 - WARN_ON(v3d->render_job); 338 - WARN_ON(v3d->tfu_job); 339 - WARN_ON(v3d->csd_job); 334 + for (q = 0; q < V3D_MAX_QUEUES; q++) 335 + WARN_ON(v3d->queue[q].active_job); 340 336 341 337 drm_mm_takedown(&v3d->mm); 342 338
+27 -41
drivers/gpu/drm/v3d/v3d_irq.c
··· 42 42 container_of(work, struct v3d_dev, overflow_mem_work); 43 43 struct drm_device *dev = &v3d->drm; 44 44 struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); 45 + struct v3d_queue_state *queue = &v3d->queue[V3D_BIN]; 46 + struct v3d_bin_job *bin_job; 45 47 struct drm_gem_object *obj; 46 48 unsigned long irqflags; 47 49 ··· 62 60 * bin job got scheduled, that's fine. We'll just give them 63 61 * some binner pool anyway. 64 62 */ 65 - spin_lock_irqsave(&v3d->job_lock, irqflags); 66 - if (!v3d->bin_job) { 67 - spin_unlock_irqrestore(&v3d->job_lock, irqflags); 63 + spin_lock_irqsave(&queue->queue_lock, irqflags); 64 + bin_job = (struct v3d_bin_job *)queue->active_job; 65 + 66 + if (!bin_job) { 67 + spin_unlock_irqrestore(&queue->queue_lock, irqflags); 68 68 goto out; 69 69 } 70 70 71 71 drm_gem_object_get(obj); 72 - list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list); 73 - spin_unlock_irqrestore(&v3d->job_lock, irqflags); 72 + list_add_tail(&bo->unref_head, &bin_job->render->unref_list); 73 + spin_unlock_irqrestore(&queue->queue_lock, irqflags); 74 74 75 75 v3d_mmu_flush_all(v3d); 76 76 ··· 81 77 82 78 out: 83 79 drm_gem_object_put(obj); 80 + } 81 + 82 + static void 83 + v3d_irq_signal_fence(struct v3d_dev *v3d, enum v3d_queue q, 84 + void (*trace_irq)(struct drm_device *, uint64_t)) 85 + { 86 + struct v3d_queue_state *queue = &v3d->queue[q]; 87 + struct v3d_fence *fence = to_v3d_fence(queue->active_job->irq_fence); 88 + 89 + v3d_job_update_stats(queue->active_job, q); 90 + trace_irq(&v3d->drm, fence->seqno); 91 + 92 + queue->active_job = NULL; 93 + dma_fence_signal(&fence->base); 84 94 } 85 95 86 96 static irqreturn_t ··· 120 102 } 121 103 122 104 if (intsts & V3D_INT_FLDONE) { 123 - struct v3d_fence *fence = 124 - to_v3d_fence(v3d->bin_job->base.irq_fence); 125 - 126 - v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN); 127 - trace_v3d_bcl_irq(&v3d->drm, fence->seqno); 128 - 129 - v3d->bin_job = NULL; 130 - dma_fence_signal(&fence->base); 131 - 105 + v3d_irq_signal_fence(v3d, V3D_BIN, trace_v3d_bcl_irq); 132 106 status = IRQ_HANDLED; 133 107 } 134 108 135 109 if (intsts & V3D_INT_FRDONE) { 136 - struct v3d_fence *fence = 137 - to_v3d_fence(v3d->render_job->base.irq_fence); 138 - 139 - v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER); 140 - trace_v3d_rcl_irq(&v3d->drm, fence->seqno); 141 - 142 - v3d->render_job = NULL; 143 - dma_fence_signal(&fence->base); 144 - 110 + v3d_irq_signal_fence(v3d, V3D_RENDER, trace_v3d_rcl_irq); 145 111 status = IRQ_HANDLED; 146 112 } 147 113 148 114 if (intsts & V3D_INT_CSDDONE(v3d->ver)) { 149 - struct v3d_fence *fence = 150 - to_v3d_fence(v3d->csd_job->base.irq_fence); 151 - 152 - v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD); 153 - trace_v3d_csd_irq(&v3d->drm, fence->seqno); 154 - 155 - v3d->csd_job = NULL; 156 - dma_fence_signal(&fence->base); 157 - 115 + v3d_irq_signal_fence(v3d, V3D_CSD, trace_v3d_csd_irq); 158 116 status = IRQ_HANDLED; 159 117 } 160 118 ··· 162 168 V3D_WRITE(V3D_HUB_INT_CLR, intsts); 163 169 164 170 if (intsts & V3D_HUB_INT_TFUC) { 165 - struct v3d_fence *fence = 166 - to_v3d_fence(v3d->tfu_job->base.irq_fence); 167 - 168 - v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU); 169 - trace_v3d_tfu_irq(&v3d->drm, fence->seqno); 170 - 171 - v3d->tfu_job = NULL; 172 - dma_fence_signal(&fence->base); 173 - 171 + v3d_irq_signal_fence(v3d, V3D_TFU, trace_v3d_tfu_irq); 174 172 status = IRQ_HANDLED; 175 173 } 176 174
+45 -40
drivers/gpu/drm/v3d/v3d_sched.c
··· 139 139 v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue) 140 140 { 141 141 struct v3d_dev *v3d = job->v3d; 142 - struct v3d_file_priv *file = job->file->driver_priv; 142 + struct v3d_file_priv *file = job->file_priv; 143 143 struct v3d_stats *global_stats = &v3d->queue[queue].stats; 144 144 struct v3d_stats *local_stats = &file->stats[queue]; 145 145 u64 now = local_clock(); ··· 194 194 } 195 195 196 196 void 197 - v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue) 197 + v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q) 198 198 { 199 199 struct v3d_dev *v3d = job->v3d; 200 - struct v3d_file_priv *file = job->file->driver_priv; 201 - struct v3d_stats *global_stats = &v3d->queue[queue].stats; 200 + struct v3d_queue_state *queue = &v3d->queue[q]; 201 + struct v3d_stats *global_stats = &queue->stats; 202 202 u64 now = local_clock(); 203 203 unsigned long flags; 204 204 ··· 209 209 preempt_disable(); 210 210 211 211 /* Don't update the local stats if the file context has already closed */ 212 - if (file) 213 - v3d_stats_update(&file->stats[queue], now); 214 - else 215 - drm_dbg(&v3d->drm, "The file descriptor was closed before job completion\n"); 212 + spin_lock(&queue->queue_lock); 213 + if (job->file_priv) 214 + v3d_stats_update(&job->file_priv->stats[q], now); 215 + spin_unlock(&queue->queue_lock); 216 216 217 217 v3d_stats_update(global_stats, now); 218 218 ··· 226 226 { 227 227 struct v3d_bin_job *job = to_bin_job(sched_job); 228 228 struct v3d_dev *v3d = job->base.v3d; 229 + struct v3d_queue_state *queue = &v3d->queue[V3D_BIN]; 229 230 struct drm_device *dev = &v3d->drm; 230 231 struct dma_fence *fence; 231 232 unsigned long irqflags; 232 233 233 234 if (unlikely(job->base.base.s_fence->finished.error)) { 234 - spin_lock_irqsave(&v3d->job_lock, irqflags); 235 - v3d->bin_job = NULL; 236 - spin_unlock_irqrestore(&v3d->job_lock, irqflags); 235 + spin_lock_irqsave(&queue->queue_lock, irqflags); 236 + queue->active_job = NULL; 237 + spin_unlock_irqrestore(&queue->queue_lock, irqflags); 237 238 return NULL; 238 239 } 239 240 240 241 /* Lock required around bin_job update vs 241 242 * v3d_overflow_mem_work(). 242 243 */ 243 - spin_lock_irqsave(&v3d->job_lock, irqflags); 244 - v3d->bin_job = job; 244 + spin_lock_irqsave(&queue->queue_lock, irqflags); 245 + queue->active_job = &job->base; 245 246 /* Clear out the overflow allocation, so we don't 246 247 * reuse the overflow attached to a previous job. 247 248 */ 248 249 V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0); 249 - spin_unlock_irqrestore(&v3d->job_lock, irqflags); 250 + spin_unlock_irqrestore(&queue->queue_lock, irqflags); 250 251 251 252 v3d_invalidate_caches(v3d); 252 253 ··· 291 290 struct dma_fence *fence; 292 291 293 292 if (unlikely(job->base.base.s_fence->finished.error)) { 294 - v3d->render_job = NULL; 293 + v3d->queue[V3D_RENDER].active_job = NULL; 295 294 return NULL; 296 295 } 297 296 298 - v3d->render_job = job; 297 + v3d->queue[V3D_RENDER].active_job = &job->base; 299 298 300 299 /* Can we avoid this flush? We need to be careful of 301 300 * scheduling, though -- imagine job0 rendering to texture and ··· 339 338 struct dma_fence *fence; 340 339 341 340 if (unlikely(job->base.base.s_fence->finished.error)) { 342 - v3d->tfu_job = NULL; 341 + v3d->queue[V3D_TFU].active_job = NULL; 343 342 return NULL; 344 343 } 345 344 346 - v3d->tfu_job = job; 345 + v3d->queue[V3D_TFU].active_job = &job->base; 347 346 348 347 fence = v3d_fence_create(v3d, V3D_TFU); 349 348 if (IS_ERR(fence)) ··· 387 386 int i, csd_cfg0_reg; 388 387 389 388 if (unlikely(job->base.base.s_fence->finished.error)) { 390 - v3d->csd_job = NULL; 389 + v3d->queue[V3D_CSD].active_job = NULL; 391 390 return NULL; 392 391 } 393 392 394 - v3d->csd_job = job; 393 + v3d->queue[V3D_CSD].active_job = &job->base; 395 394 396 395 v3d_invalidate_caches(v3d); 397 396 ··· 575 574 v3d_reset_performance_queries(struct v3d_cpu_job *job) 576 575 { 577 576 struct v3d_performance_query_info *performance_query = &job->performance_query; 578 - struct v3d_file_priv *v3d_priv = job->base.file->driver_priv; 577 + struct v3d_file_priv *v3d_priv = job->base.file_priv; 579 578 struct v3d_dev *v3d = job->base.v3d; 580 579 struct v3d_perfmon *perfmon; 581 580 ··· 605 604 { 606 605 struct v3d_performance_query_info *performance_query = 607 606 &job->performance_query; 608 - struct v3d_file_priv *v3d_priv = job->base.file->driver_priv; 607 + struct v3d_file_priv *v3d_priv = job->base.file_priv; 609 608 struct v3d_performance_query *perf_query = 610 609 &performance_query->queries[query]; 611 610 struct v3d_dev *v3d = job->base.v3d; ··· 701 700 trace_v3d_cpu_job_end(&v3d->drm, job->job_type); 702 701 v3d_job_update_stats(&job->base, V3D_CPU); 703 702 703 + /* Synchronous operation, so no fence to wait on. */ 704 704 return NULL; 705 705 } 706 706 ··· 717 715 718 716 v3d_job_update_stats(job, V3D_CACHE_CLEAN); 719 717 718 + /* Synchronous operation, so no fence to wait on. */ 720 719 return NULL; 721 720 } 722 721 723 722 static enum drm_gpu_sched_stat 724 - v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) 723 + v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job, 724 + enum v3d_queue q) 725 725 { 726 726 struct v3d_job *job = to_v3d_job(sched_job); 727 - struct v3d_file_priv *v3d_priv = job->file->driver_priv; 728 - enum v3d_queue q; 727 + struct v3d_file_priv *v3d_priv = job->file_priv; 728 + unsigned long irqflags; 729 + enum v3d_queue i; 729 730 730 731 mutex_lock(&v3d->reset_lock); 731 732 732 733 /* block scheduler */ 733 - for (q = 0; q < V3D_MAX_QUEUES; q++) 734 - drm_sched_stop(&v3d->queue[q].sched, sched_job); 734 + for (i = 0; i < V3D_MAX_QUEUES; i++) 735 + drm_sched_stop(&v3d->queue[i].sched, sched_job); 735 736 736 737 if (sched_job) 737 738 drm_sched_increase_karma(sched_job); ··· 743 738 v3d_reset(v3d); 744 739 745 740 v3d->reset_counter++; 746 - v3d_priv->reset_counter++; 741 + spin_lock_irqsave(&v3d->queue[q].queue_lock, irqflags); 742 + if (v3d_priv) 743 + v3d_priv->reset_counter++; 744 + spin_unlock_irqrestore(&v3d->queue[q].queue_lock, irqflags); 747 745 748 - for (q = 0; q < V3D_MAX_QUEUES; q++) 749 - drm_sched_resubmit_jobs(&v3d->queue[q].sched); 746 + for (i = 0; i < V3D_MAX_QUEUES; i++) 747 + drm_sched_resubmit_jobs(&v3d->queue[i].sched); 750 748 751 749 /* Unblock schedulers and restart their jobs. */ 752 - for (q = 0; q < V3D_MAX_QUEUES; q++) { 753 - drm_sched_start(&v3d->queue[q].sched, 0); 754 - } 750 + for (i = 0; i < V3D_MAX_QUEUES; i++) 751 + drm_sched_start(&v3d->queue[i].sched, 0); 755 752 756 753 mutex_unlock(&v3d->reset_lock); 757 754 ··· 781 774 return DRM_GPU_SCHED_STAT_NO_HANG; 782 775 } 783 776 784 - return v3d_gpu_reset_for_timeout(v3d, sched_job); 777 + return v3d_gpu_reset_for_timeout(v3d, sched_job, q); 785 778 } 786 779 787 780 static enum drm_gpu_sched_stat ··· 803 796 } 804 797 805 798 static enum drm_gpu_sched_stat 806 - v3d_generic_job_timedout(struct drm_sched_job *sched_job) 799 + v3d_tfu_job_timedout(struct drm_sched_job *sched_job) 807 800 { 808 801 struct v3d_job *job = to_v3d_job(sched_job); 809 802 810 - return v3d_gpu_reset_for_timeout(job->v3d, sched_job); 803 + return v3d_gpu_reset_for_timeout(job->v3d, sched_job, V3D_TFU); 811 804 } 812 805 813 806 static enum drm_gpu_sched_stat ··· 826 819 return DRM_GPU_SCHED_STAT_NO_HANG; 827 820 } 828 821 829 - return v3d_gpu_reset_for_timeout(v3d, sched_job); 822 + return v3d_gpu_reset_for_timeout(v3d, sched_job, V3D_CSD); 830 823 } 831 824 832 825 static const struct drm_sched_backend_ops v3d_bin_sched_ops = { ··· 843 836 844 837 static const struct drm_sched_backend_ops v3d_tfu_sched_ops = { 845 838 .run_job = v3d_tfu_job_run, 846 - .timedout_job = v3d_generic_job_timedout, 839 + .timedout_job = v3d_tfu_job_timedout, 847 840 .free_job = v3d_sched_job_free, 848 841 }; 849 842 ··· 855 848 856 849 static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = { 857 850 .run_job = v3d_cache_clean_job_run, 858 - .timedout_job = v3d_generic_job_timedout, 859 851 .free_job = v3d_sched_job_free 860 852 }; 861 853 862 854 static const struct drm_sched_backend_ops v3d_cpu_sched_ops = { 863 855 .run_job = v3d_cpu_job_run, 864 - .timedout_job = v3d_generic_job_timedout, 865 856 .free_job = v3d_cpu_job_free 866 857 }; 867 858
+1 -1
drivers/gpu/drm/v3d/v3d_submit.c
··· 166 166 167 167 job->v3d = v3d; 168 168 job->free = free; 169 - job->file = file_priv; 169 + job->file_priv = v3d_priv; 170 170 171 171 ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], 172 172 1, v3d_priv, file_priv->client_id);
+20
include/drm/bridge/dw_dp.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * Copyright (c) 2025 Rockchip Electronics Co., Ltd. 4 + */ 5 + 6 + #ifndef __DW_DP__ 7 + #define __DW_DP__ 8 + 9 + #include <linux/device.h> 10 + 11 + struct drm_encoder; 12 + struct dw_dp; 13 + 14 + struct dw_dp_plat_data { 15 + u32 max_link_rate; 16 + }; 17 + 18 + struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder, 19 + const struct dw_dp_plat_data *plat_data); 20 + #endif /* __DW_DP__ */
+23
include/drm/drm_bridge.h
··· 1321 1321 } 1322 1322 #endif 1323 1323 1324 + static inline bool drm_bridge_is_last(struct drm_bridge *bridge) 1325 + { 1326 + return list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain); 1327 + } 1328 + 1324 1329 /** 1325 1330 * drm_bridge_get_current_state() - Get the current bridge state 1326 1331 * @bridge: bridge object ··· 1413 1408 { 1414 1409 return drm_bridge_get(list_first_entry_or_null(&encoder->bridge_chain, 1415 1410 struct drm_bridge, chain_node)); 1411 + } 1412 + 1413 + /** 1414 + * drm_bridge_chain_get_last_bridge() - Get the last bridge in the chain 1415 + * @encoder: encoder object 1416 + * 1417 + * The refcount of the returned bridge is incremented. Use drm_bridge_put() 1418 + * when done with it. 1419 + * 1420 + * RETURNS: 1421 + * the last bridge in the chain, or NULL if @encoder has no bridge attached 1422 + * to it. 1423 + */ 1424 + static inline struct drm_bridge * 1425 + drm_bridge_chain_get_last_bridge(struct drm_encoder *encoder) 1426 + { 1427 + return drm_bridge_get(list_last_entry_or_null(&encoder->bridge_chain, 1428 + struct drm_bridge, chain_node)); 1416 1429 } 1417 1430 1418 1431 /**
+4
include/drm/drm_format_helper.h
··· 136 136 const struct iosys_map *src, const struct drm_framebuffer *fb, 137 137 const struct drm_rect *clip, struct drm_format_conv_state *state); 138 138 139 + void drm_fb_xrgb8888_to_gray2(struct iosys_map *dst, const unsigned int *dst_pitch, 140 + const struct iosys_map *src, const struct drm_framebuffer *fb, 141 + const struct drm_rect *clip, struct drm_format_conv_state *state); 142 + 139 143 #endif /* __LINUX_DRM_FORMAT_HELPER_H */
+26 -25
include/drm/drm_gem.h
··· 398 398 struct dma_resv _resv; 399 399 400 400 /** 401 - * @gpuva: 401 + * @gpuva: Fields used by GPUVM to manage mappings pointing to this GEM object. 402 402 * 403 - * Provides the list of GPU VAs attached to this GEM object. 403 + * When DRM_GPUVM_IMMEDIATE_MODE is set, this list is protected by the 404 + * mutex. Otherwise, the list is protected by the GEMs &dma_resv lock. 404 405 * 405 - * Drivers should lock list accesses with the GEMs &dma_resv lock 406 - * (&drm_gem_object.resv) or a custom lock if one is provided. 406 + * Note that all entries in this list must agree on whether 407 + * DRM_GPUVM_IMMEDIATE_MODE is set. 407 408 */ 408 409 struct { 410 + /** 411 + * @gpuva.list: list of GPUVM mappings attached to this GEM object. 412 + * 413 + * Drivers should lock list accesses with either the GEMs 414 + * &dma_resv lock (&drm_gem_object.resv) or the 415 + * &drm_gem_object.gpuva.lock mutex. 416 + */ 409 417 struct list_head list; 410 418 411 - #ifdef CONFIG_LOCKDEP 412 - struct lockdep_map *lock_dep_map; 413 - #endif 419 + /** 420 + * @gpuva.lock: lock protecting access to &drm_gem_object.gpuva.list 421 + * when DRM_GPUVM_IMMEDIATE_MODE is used. 422 + * 423 + * Only used when DRM_GPUVM_IMMEDIATE_MODE is set. It should be 424 + * safe to take this mutex during the fence signalling path, so 425 + * do not allocate memory while holding this lock. Otherwise, 426 + * the &dma_resv lock should be used. 427 + */ 428 + struct mutex lock; 414 429 } gpuva; 415 430 416 431 /** ··· 610 595 } 611 596 612 597 #ifdef CONFIG_LOCKDEP 613 - /** 614 - * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list. 615 - * @obj: the &drm_gem_object 616 - * @lock: the lock used to protect the gpuva list. The locking primitive 617 - * must contain a dep_map field. 618 - * 619 - * Call this if you're not proctecting access to the gpuva list with the 620 - * dma-resv lock, but with a custom lock. 621 - */ 622 - #define drm_gem_gpuva_set_lock(obj, lock) \ 623 - if (!WARN((obj)->gpuva.lock_dep_map, \ 624 - "GEM GPUVA lock should be set only once.")) \ 625 - (obj)->gpuva.lock_dep_map = &(lock)->dep_map 626 - #define drm_gem_gpuva_assert_lock_held(obj) \ 627 - lockdep_assert((obj)->gpuva.lock_dep_map ? \ 628 - lock_is_held((obj)->gpuva.lock_dep_map) : \ 598 + #define drm_gem_gpuva_assert_lock_held(gpuvm, obj) \ 599 + lockdep_assert(drm_gpuvm_immediate_mode(gpuvm) ? \ 600 + lockdep_is_held(&(obj)->gpuva.lock) : \ 629 601 dma_resv_held((obj)->resv)) 630 602 #else 631 - #define drm_gem_gpuva_set_lock(obj, lock) do {} while (0) 632 - #define drm_gem_gpuva_assert_lock_held(obj) do {} while (0) 603 + #define drm_gem_gpuva_assert_lock_held(gpuvm, obj) do {} while (0) 633 604 #endif 634 605 635 606 /**
+27 -3
include/drm/drm_gpuvm.h
··· 197 197 DRM_GPUVM_RESV_PROTECTED = BIT(0), 198 198 199 199 /** 200 + * @DRM_GPUVM_IMMEDIATE_MODE: use the locking scheme for GEMs designed 201 + * for modifying the GPUVM during the fence signalling path 202 + * 203 + * When set, gpuva.lock is used to protect gpuva.list in all GEM 204 + * objects associated with this GPUVM. Otherwise, the GEMs dma-resv is 205 + * used. 206 + */ 207 + DRM_GPUVM_IMMEDIATE_MODE = BIT(1), 208 + 209 + /** 200 210 * @DRM_GPUVM_USERBITS: user defined bits 201 211 */ 202 - DRM_GPUVM_USERBITS = BIT(1), 212 + DRM_GPUVM_USERBITS = BIT(2), 203 213 }; 204 214 205 215 /** ··· 377 367 drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm) 378 368 { 379 369 return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED; 370 + } 371 + 372 + /** 373 + * drm_gpuvm_immediate_mode() - indicates whether &DRM_GPUVM_IMMEDIATE_MODE is 374 + * set 375 + * @gpuvm: the &drm_gpuvm 376 + * 377 + * Returns: true if &DRM_GPUVM_IMMEDIATE_MODE is set, false otherwise. 378 + */ 379 + static inline bool 380 + drm_gpuvm_immediate_mode(struct drm_gpuvm *gpuvm) 381 + { 382 + return gpuvm->flags & DRM_GPUVM_IMMEDIATE_MODE; 380 383 } 381 384 382 385 /** ··· 765 742 { 766 743 struct drm_gpuvm_bo *vm_bo; 767 744 768 - drm_gem_gpuva_assert_lock_held(obj); 769 - drm_gem_for_each_gpuvm_bo(vm_bo, obj) 745 + drm_gem_for_each_gpuvm_bo(vm_bo, obj) { 746 + drm_gem_gpuva_assert_lock_held(vm_bo->vm, obj); 770 747 drm_gpuvm_bo_evict(vm_bo, evict); 748 + } 771 749 } 772 750 773 751 void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);
+7 -1
include/drm/drm_utils.h
··· 16 16 17 17 int drm_get_panel_orientation_quirk(int width, int height); 18 18 19 - int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid); 19 + struct drm_panel_backlight_quirk { 20 + u16 min_brightness; 21 + u32 brightness_mask; 22 + }; 23 + 24 + const struct drm_panel_backlight_quirk * 25 + drm_get_panel_backlight_quirk(const struct drm_edid *edid); 20 26 21 27 signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec); 22 28
+14
include/linux/list.h
··· 637 637 }) 638 638 639 639 /** 640 + * list_last_entry_or_null - get the last element from a list 641 + * @ptr: the list head to take the element from. 642 + * @type: the type of the struct this is embedded in. 643 + * @member: the name of the list_head within the struct. 644 + * 645 + * Note that if the list is empty, it returns NULL. 646 + */ 647 + #define list_last_entry_or_null(ptr, type, member) ({ \ 648 + struct list_head *head__ = (ptr); \ 649 + struct list_head *pos__ = READ_ONCE(head__->prev); \ 650 + pos__ != head__ ? list_entry(pos__, type, member) : NULL; \ 651 + }) 652 + 653 + /** 640 654 * list_next_entry - get the next element in list 641 655 * @pos: the type * to cursor 642 656 * @member: the name of the list_head within the struct.