Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2025-03-06' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

drm-misc-next for v6.15:

Cross-subsystem Changes:

base:
- component: Provide helper to query bound status

fbdev:
- fbtft: Remove access to page->index

Core Changes:

- Fix usage of logging macros in several places

gem:
- Add test function for imported dma-bufs and use it in core and helpers
- Avoid struct drm_gem_object.import_attach

tests:
- Fix lockdep warnings

ttm:
- Add helpers for TTM shrinker

Driver Changes:

adp:
- Add support for Apple Touch Bar displays on M1/M2

amdxdna:
- Fix interrupt handling

appletbdrm:
- Add support for Apple Touch Bar displays on x86

bridge:
- synopsys: Add HDMI audio support
- ti-sn65dsi83: Support negative DE polarity

ipu-v3:
- Remove unused code

nouveau:
- Avoid multiple -Wflex-array-member-not-at-end warnings

panthor:
- Fix CS_STATUS_ defines
- Improve locking

rockchip:
- analogix_dp: Add eDP support
- lvds: Improve logging
- vop2: Improve HDMI mode handling; Add support for RK3576
- Fix shutdown
- Support rk3562-mali

xe:
- Use TTM shrinker

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20250306130700.GA485504@linux.fritz.box

+7439 -2263
+83
Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/apple,h7-display-pipe-mipi.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Apple pre-DCP display controller MIPI interface 8 + 9 + maintainers: 10 + - Sasha Finkelstein <fnkl.kernel@gmail.com> 11 + 12 + description: 13 + The MIPI controller part of the pre-DCP Apple display controller 14 + 15 + allOf: 16 + - $ref: dsi-controller.yaml# 17 + 18 + properties: 19 + compatible: 20 + items: 21 + - enum: 22 + - apple,t8112-display-pipe-mipi 23 + - apple,t8103-display-pipe-mipi 24 + - const: apple,h7-display-pipe-mipi 25 + 26 + reg: 27 + maxItems: 1 28 + 29 + power-domains: 30 + maxItems: 1 31 + 32 + ports: 33 + $ref: /schemas/graph.yaml#/properties/ports 34 + 35 + properties: 36 + port@0: 37 + $ref: /schemas/graph.yaml#/properties/port 38 + description: Input port. Always connected to the primary controller 39 + 40 + port@1: 41 + $ref: /schemas/graph.yaml#/properties/port 42 + description: Output MIPI DSI port to the panel 43 + 44 + required: 45 + - port@0 46 + - port@1 47 + 48 + required: 49 + - compatible 50 + - reg 51 + - ports 52 + 53 + unevaluatedProperties: false 54 + 55 + examples: 56 + - | 57 + dsi@28200000 { 58 + compatible = "apple,t8103-display-pipe-mipi", "apple,h7-display-pipe-mipi"; 59 + reg = <0x28200000 0xc000>; 60 + power-domains = <&ps_dispdfr_mipi>; 61 + 62 + ports { 63 + #address-cells = <1>; 64 + #size-cells = <0>; 65 + 66 + port@0 { 67 + reg = <0>; 68 + 69 + dfr_adp_out_mipi: endpoint { 70 + remote-endpoint = <&dfr_adp_out_mipi>; 71 + }; 72 + }; 73 + 74 + port@1 { 75 + reg = <1>; 76 + 77 + dfr_panel_in: endpoint { 78 + remote-endpoint = <&dfr_mipi_out_panel>; 79 + }; 80 + }; 81 + }; 82 + }; 83 + ...
+88
Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/apple,h7-display-pipe.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Apple pre-DCP display controller 8 + 9 + maintainers: 10 + - Sasha Finkelstein <fnkl.kernel@gmail.com> 11 + 12 + description: 13 + A secondary display controller used to drive the "touchbar" on 14 + certain Apple laptops. 15 + 16 + properties: 17 + compatible: 18 + items: 19 + - enum: 20 + - apple,t8112-display-pipe 21 + - apple,t8103-display-pipe 22 + - const: apple,h7-display-pipe 23 + 24 + reg: 25 + items: 26 + - description: Primary register block, controls planes and blending 27 + - description: 28 + Contains other configuration registers like interrupt 29 + and FIFO control 30 + 31 + reg-names: 32 + items: 33 + - const: be 34 + - const: fe 35 + 36 + power-domains: 37 + description: 38 + Phandles to pmgr entries that are needed for this controller to turn on. 39 + Aside from that, their specific functions are unknown 40 + maxItems: 2 41 + 42 + interrupts: 43 + items: 44 + - description: Unknown function 45 + - description: Primary interrupt. Vsync events are reported via it 46 + 47 + interrupt-names: 48 + items: 49 + - const: be 50 + - const: fe 51 + 52 + iommus: 53 + maxItems: 1 54 + 55 + port: 56 + $ref: /schemas/graph.yaml#/properties/port 57 + description: Output port. Always connected to apple,h7-display-pipe-mipi 58 + 59 + required: 60 + - compatible 61 + - reg 62 + - interrupts 63 + - port 64 + 65 + additionalProperties: false 66 + 67 + examples: 68 + - | 69 + #include <dt-bindings/interrupt-controller/apple-aic.h> 70 + display-pipe@28200000 { 71 + compatible = "apple,t8103-display-pipe", "apple,h7-display-pipe"; 72 + reg = <0x28200000 0xc000>, 73 + <0x28400000 0x4000>; 74 + reg-names = "be", "fe"; 75 + power-domains = <&ps_dispdfr_fe>, <&ps_dispdfr_be>; 76 + interrupt-parent = <&aic>; 77 + interrupts = <AIC_IRQ 502 IRQ_TYPE_LEVEL_HIGH>, 78 + <AIC_IRQ 506 IRQ_TYPE_LEVEL_HIGH>; 79 + interrupt-names = "be", "fe"; 80 + iommus = <&displaydfr_dart 0>; 81 + 82 + port { 83 + dfr_adp_out_mipi: endpoint { 84 + remote-endpoint = <&dfr_mipi_in_adp>; 85 + }; 86 + }; 87 + }; 88 + ...
+58
Documentation/devicetree/bindings/display/panel/apple,summit.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/panel/apple,summit.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Apple "Summit" display panel 8 + 9 + maintainers: 10 + - Sasha Finkelstein <fnkl.kernel@gmail.com> 11 + 12 + description: 13 + An OLED panel used as a touchbar on certain Apple laptops. 14 + Contains a backlight device, which controls brightness of the panel itself. 15 + The backlight common properties are included for this reason 16 + 17 + allOf: 18 + - $ref: panel-common.yaml# 19 + - $ref: /schemas/leds/backlight/common.yaml# 20 + 21 + properties: 22 + compatible: 23 + items: 24 + - enum: 25 + - apple,j293-summit 26 + - apple,j493-summit 27 + - const: apple,summit 28 + 29 + reg: 30 + maxItems: 1 31 + 32 + required: 33 + - compatible 34 + - reg 35 + - max-brightness 36 + - port 37 + 38 + unevaluatedProperties: false 39 + 40 + examples: 41 + - | 42 + dsi { 43 + #address-cells = <1>; 44 + #size-cells = <0>; 45 + 46 + panel@0 { 47 + compatible = "apple,j293-summit", "apple,summit"; 48 + reg = <0>; 49 + max-brightness = <255>; 50 + 51 + port { 52 + endpoint { 53 + remote-endpoint = <&dfr_bridge_out>; 54 + }; 55 + }; 56 + }; 57 + }; 58 + ...
+91 -20
Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml
··· 14 14 maintainers: 15 15 - Sandy Huang <hjc@rock-chips.com> 16 16 - Heiko Stuebner <heiko@sntech.de> 17 + - Andy Yan <andyshrk@163.com> 17 18 18 19 properties: 19 20 compatible: 20 21 enum: 21 22 - rockchip,rk3566-vop 22 23 - rockchip,rk3568-vop 24 + - rockchip,rk3576-vop 23 25 - rockchip,rk3588-vop 24 26 25 27 reg: ··· 39 37 - const: gamma-lut 40 38 41 39 interrupts: 42 - maxItems: 1 40 + minItems: 1 41 + maxItems: 4 43 42 description: 44 - The VOP interrupt is shared by several interrupt sources, such as 45 - frame start (VSYNC), line flag and other status interrupts. 43 + For VOP version under rk3576, the interrupt is shared by several interrupt 44 + sources, such as frame start (VSYNC), line flag and other interrupt status. 45 + For VOP version from rk3576 there is a system interrupt for bus error, and 46 + every video port has it's independent interrupts for vsync and other video 47 + port related error interrupts. 48 + 49 + interrupt-names: 50 + items: 51 + - const: sys 52 + - const: vp0 53 + - const: vp1 54 + - const: vp2 46 55 47 56 # See compatible-specific constraints below. 48 57 clocks: ··· 137 124 properties: 138 125 compatible: 139 126 contains: 127 + enum: 128 + - rockchip,rk3566-vop 129 + - rockchip,rk3568-vop 130 + then: 131 + properties: 132 + clocks: 133 + maxItems: 5 134 + 135 + clock-names: 136 + maxItems: 5 137 + 138 + interrupts: 139 + maxItems: 1 140 + 141 + interrupt-names: false 142 + 143 + ports: 144 + required: 145 + - port@0 146 + - port@1 147 + - port@2 148 + 149 + rockchip,vo1-grf: false 150 + rockchip,vop-grf: false 151 + rockchip,pmu: false 152 + 153 + required: 154 + - rockchip,grf 155 + 156 + - if: 157 + properties: 158 + compatible: 159 + contains: 160 + enum: 161 + - rockchip,rk3576-vop 162 + then: 163 + properties: 164 + clocks: 165 + maxItems: 5 166 + 167 + clock-names: 168 + maxItems: 5 169 + 170 + interrupts: 171 + minItems: 4 172 + 173 + interrupt-names: 174 + minItems: 4 175 + 176 + ports: 177 + required: 178 + - port@0 179 + - port@1 180 + - port@2 181 + 182 + rockchip,vo1-grf: false 183 + rockchip,vop-grf: false 184 + 185 + required: 186 + - rockchip,grf 187 + - rockchip,pmu 188 + 189 + - if: 190 + properties: 191 + compatible: 192 + contains: 140 193 const: rockchip,rk3588-vop 141 194 then: 142 195 properties: 143 196 clocks: 144 197 minItems: 7 198 + maxItems: 9 199 + 145 200 clock-names: 146 201 minItems: 7 202 + maxItems: 9 203 + 204 + interrupts: 205 + maxItems: 1 206 + 207 + interrupt-names: false 147 208 148 209 ports: 149 210 required: ··· 231 144 - rockchip,vo1-grf 232 145 - rockchip,vop-grf 233 146 - rockchip,pmu 234 - 235 - else: 236 - properties: 237 - rockchip,vo1-grf: false 238 - rockchip,vop-grf: false 239 - rockchip,pmu: false 240 - 241 - clocks: 242 - maxItems: 5 243 - clock-names: 244 - maxItems: 5 245 - 246 - ports: 247 - required: 248 - - port@0 249 - - port@1 250 - - port@2 251 147 252 148 additionalProperties: false 253 149 ··· 258 188 "dclk_vp1", 259 189 "dclk_vp2"; 260 190 power-domains = <&power RK3568_PD_VO>; 191 + rockchip,grf = <&grf>; 261 192 iommus = <&vop_mmu>; 262 193 vop_out: ports { 263 194 #address-cells = <1>;
+1
Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
··· 25 25 - renesas,r9a07g044-mali 26 26 - renesas,r9a07g054-mali 27 27 - rockchip,px30-mali 28 + - rockchip,rk3562-mali 28 29 - rockchip,rk3568-mali 29 30 - rockchip,rk3576-mali 30 31 - const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable
+24
MAINTAINERS
··· 7149 7149 T: git https://gitlab.freedesktop.org/drm/misc/kernel.git 7150 7150 F: drivers/gpu/drm/sun4i/sun8i* 7151 7151 7152 + DRM DRIVER FOR APPLE TOUCH BARS 7153 + M: Aun-Ali Zaidi <admin@kodeit.net> 7154 + M: Aditya Garg <gargaditya08@live.com> 7155 + L: dri-devel@lists.freedesktop.org 7156 + S: Maintained 7157 + T: git https://gitlab.freedesktop.org/drm/misc/kernel.git 7158 + F: drivers/gpu/drm/tiny/appletbdrm.c 7159 + 7152 7160 DRM DRIVER FOR ARM PL111 CLCD 7153 7161 M: Linus Walleij <linus.walleij@linaro.org> 7154 7162 S: Maintained ··· 7835 7827 F: drivers/gpu/host1x/ 7836 7828 F: include/linux/host1x.h 7837 7829 F: include/uapi/drm/tegra_drm.h 7830 + 7831 + DRM DRIVERS FOR PRE-DCP APPLE DISPLAY OUTPUT 7832 + M: Sasha Finkelstein <fnkl.kernel@gmail.com> 7833 + R: Janne Grunau <j@jannau.net> 7834 + L: dri-devel@lists.freedesktop.org 7835 + L: asahi@lists.linux.dev 7836 + S: Maintained 7837 + W: https://asahilinux.org 7838 + B: https://github.com/AsahiLinux/linux/issues 7839 + C: irc://irc.oftc.net/asahi-dev 7840 + T: git https://gitlab.freedesktop.org/drm/misc/kernel.git 7841 + F: Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml 7842 + F: Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml 7843 + F: Documentation/devicetree/bindings/display/panel/apple,summit.yaml 7844 + F: drivers/gpu/drm/adp/ 7845 + F: drivers/gpu/drm/panel/panel-summit.c 7838 7846 7839 7847 DRM DRIVERS FOR RENESAS R-CAR 7840 7848 M: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+13 -4
drivers/accel/amdxdna/amdxdna_mailbox.c
··· 349 349 trace_mbox_irq_handle(MAILBOX_NAME, irq); 350 350 /* Schedule a rx_work to call the callback functions */ 351 351 queue_work(mb_chann->work_q, &mb_chann->rx_work); 352 - /* Clear IOHUB register */ 353 - mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0); 354 352 355 353 return IRQ_HANDLED; 356 354 } ··· 365 367 return; 366 368 } 367 369 370 + again: 371 + mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0); 372 + 368 373 while (1) { 369 374 /* 370 375 * If return is 0, keep consuming next message, until there is ··· 381 380 if (unlikely(ret)) { 382 381 MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret); 383 382 WRITE_ONCE(mb_chann->bad_state, true); 384 - disable_irq(mb_chann->msix_irq); 385 - break; 383 + return; 386 384 } 387 385 } 386 + 387 + /* 388 + * The hardware will not generate interrupt if firmware creates a new 389 + * response right after driver clears interrupt register. Check 390 + * the interrupt register to make sure there is not any new response 391 + * before exiting. 392 + */ 393 + if (mailbox_reg_read(mb_chann, mb_chann->iohub_int_addr)) 394 + goto again; 388 395 } 389 396 390 397 int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
+14
drivers/base/component.c
··· 569 569 } 570 570 EXPORT_SYMBOL_GPL(component_master_del); 571 571 572 + bool component_master_is_bound(struct device *parent, 573 + const struct component_master_ops *ops) 574 + { 575 + struct aggregate_device *adev; 576 + 577 + guard(mutex)(&component_mutex); 578 + adev = __aggregate_find(parent, ops); 579 + if (!adev) 580 + return 0; 581 + 582 + return adev->bound; 583 + } 584 + EXPORT_SYMBOL_GPL(component_master_is_bound); 585 + 572 586 static void component_unbind(struct component *component, 573 587 struct aggregate_device *adev, void *data) 574 588 {
+2
drivers/gpu/drm/Kconfig
··· 439 439 440 440 source "drivers/gpu/drm/tidss/Kconfig" 441 441 442 + source "drivers/gpu/drm/adp/Kconfig" 443 + 442 444 source "drivers/gpu/drm/xlnx/Kconfig" 443 445 444 446 source "drivers/gpu/drm/gud/Kconfig"
+1
drivers/gpu/drm/Makefile
··· 206 206 obj-y += tiny/ 207 207 obj-$(CONFIG_DRM_PL111) += pl111/ 208 208 obj-$(CONFIG_DRM_TVE200) += tve200/ 209 + obj-$(CONFIG_DRM_ADP) += adp/ 209 210 obj-$(CONFIG_DRM_XEN) += xen/ 210 211 obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ 211 212 obj-$(CONFIG_DRM_LIMA) += lima/
+17
drivers/gpu/drm/adp/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR MIT 2 + config DRM_ADP 3 + tristate "DRM Support for pre-DCP Apple display controllers" 4 + depends on DRM && OF && ARM64 5 + depends on ARCH_APPLE || COMPILE_TEST 6 + select DRM_KMS_HELPER 7 + select DRM_BRIDGE_CONNECTOR 8 + select DRM_DISPLAY_HELPER 9 + select DRM_KMS_DMA_HELPER 10 + select DRM_GEM_DMA_HELPER 11 + select DRM_PANEL_BRIDGE 12 + select VIDEOMODE_HELPERS 13 + select DRM_MIPI_DSI 14 + help 15 + Chose this option if you have an Apple Arm laptop with a touchbar. 16 + 17 + If M is selected, this module will be called adpdrm.
+5
drivers/gpu/drm/adp/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR MIT 2 + 3 + adpdrm-y := adp_drv.o 4 + adpdrm-mipi-y := adp-mipi.o 5 + obj-$(CONFIG_DRM_ADP) += adpdrm.o adpdrm-mipi.o
+276
drivers/gpu/drm/adp/adp-mipi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <linux/component.h> 4 + #include <linux/iopoll.h> 5 + #include <linux/of.h> 6 + #include <linux/platform_device.h> 7 + 8 + #include <drm/drm_bridge.h> 9 + #include <drm/drm_mipi_dsi.h> 10 + 11 + #define DSI_GEN_HDR 0x6c 12 + #define DSI_GEN_PLD_DATA 0x70 13 + 14 + #define DSI_CMD_PKT_STATUS 0x74 15 + 16 + #define GEN_PLD_R_EMPTY BIT(4) 17 + #define GEN_PLD_W_FULL BIT(3) 18 + #define GEN_PLD_W_EMPTY BIT(2) 19 + #define GEN_CMD_FULL BIT(1) 20 + #define GEN_CMD_EMPTY BIT(0) 21 + #define GEN_RD_CMD_BUSY BIT(6) 22 + #define CMD_PKT_STATUS_TIMEOUT_US 20000 23 + 24 + struct adp_mipi_drv_private { 25 + struct mipi_dsi_host dsi; 26 + struct drm_bridge bridge; 27 + struct drm_bridge *next_bridge; 28 + void __iomem *mipi; 29 + }; 30 + 31 + #define mipi_to_adp(x) container_of(x, struct adp_mipi_drv_private, dsi) 32 + 33 + static int adp_dsi_gen_pkt_hdr_write(struct adp_mipi_drv_private *adp, u32 hdr_val) 34 + { 35 + int ret; 36 + u32 val, mask; 37 + 38 + ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS, 39 + val, !(val & GEN_CMD_FULL), 1000, 40 + CMD_PKT_STATUS_TIMEOUT_US); 41 + if (ret) { 42 + dev_err(adp->dsi.dev, "failed to get available command FIFO\n"); 43 + return ret; 44 + } 45 + 46 + writel(hdr_val, adp->mipi + DSI_GEN_HDR); 47 + 48 + mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY; 49 + ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS, 50 + val, (val & mask) == mask, 51 + 1000, CMD_PKT_STATUS_TIMEOUT_US); 52 + if (ret) { 53 + dev_err(adp->dsi.dev, "failed to write command FIFO\n"); 54 + return ret; 55 + } 56 + 57 + return 0; 58 + } 59 + 60 + static int adp_dsi_write(struct adp_mipi_drv_private *adp, 61 + const struct mipi_dsi_packet *packet) 62 + { 63 + const u8 *tx_buf = packet->payload; 64 + int len = packet->payload_length, pld_data_bytes = sizeof(u32), ret; 65 + __le32 word; 66 + u32 val; 67 + 68 + while (len) { 69 + if (len < pld_data_bytes) { 70 + word = 0; 71 + memcpy(&word, tx_buf, len); 72 + writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA); 73 + len = 0; 74 + } else { 75 + memcpy(&word, tx_buf, pld_data_bytes); 76 + writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA); 77 + tx_buf += pld_data_bytes; 78 + len -= pld_data_bytes; 79 + } 80 + 81 + ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS, 82 + val, !(val & GEN_PLD_W_FULL), 1000, 83 + CMD_PKT_STATUS_TIMEOUT_US); 84 + if (ret) { 85 + dev_err(adp->dsi.dev, 86 + "failed to get available write payload FIFO\n"); 87 + return ret; 88 + } 89 + } 90 + 91 + word = 0; 92 + memcpy(&word, packet->header, sizeof(packet->header)); 93 + return adp_dsi_gen_pkt_hdr_write(adp, le32_to_cpu(word)); 94 + } 95 + 96 + static int adp_dsi_read(struct adp_mipi_drv_private *adp, 97 + const struct mipi_dsi_msg *msg) 98 + { 99 + int i, j, ret, len = msg->rx_len; 100 + u8 *buf = msg->rx_buf; 101 + u32 val; 102 + 103 + /* Wait end of the read operation */ 104 + ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS, 105 + val, !(val & GEN_RD_CMD_BUSY), 106 + 1000, CMD_PKT_STATUS_TIMEOUT_US); 107 + if (ret) { 108 + dev_err(adp->dsi.dev, "Timeout during read operation\n"); 109 + return ret; 110 + } 111 + 112 + for (i = 0; i < len; i += 4) { 113 + /* Read fifo must not be empty before all bytes are read */ 114 + ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS, 115 + val, !(val & GEN_PLD_R_EMPTY), 116 + 1000, CMD_PKT_STATUS_TIMEOUT_US); 117 + if (ret) { 118 + dev_err(adp->dsi.dev, "Read payload FIFO is empty\n"); 119 + return ret; 120 + } 121 + 122 + val = readl(adp->mipi + DSI_GEN_PLD_DATA); 123 + for (j = 0; j < 4 && j + i < len; j++) 124 + buf[i + j] = val >> (8 * j); 125 + } 126 + 127 + return ret; 128 + } 129 + 130 + static ssize_t adp_dsi_host_transfer(struct mipi_dsi_host *host, 131 + const struct mipi_dsi_msg *msg) 132 + { 133 + struct adp_mipi_drv_private *adp = mipi_to_adp(host); 134 + struct mipi_dsi_packet packet; 135 + int ret, nb_bytes; 136 + 137 + ret = mipi_dsi_create_packet(&packet, msg); 138 + if (ret) { 139 + dev_err(adp->dsi.dev, "failed to create packet: %d\n", ret); 140 + return ret; 141 + } 142 + 143 + ret = adp_dsi_write(adp, &packet); 144 + if (ret) 145 + return ret; 146 + 147 + if (msg->rx_buf && msg->rx_len) { 148 + ret = adp_dsi_read(adp, msg); 149 + if (ret) 150 + return ret; 151 + nb_bytes = msg->rx_len; 152 + } else { 153 + nb_bytes = packet.size; 154 + } 155 + 156 + return nb_bytes; 157 + } 158 + 159 + static int adp_dsi_bind(struct device *dev, struct device *master, void *data) 160 + { 161 + return 0; 162 + } 163 + 164 + static void adp_dsi_unbind(struct device *dev, struct device *master, void *data) 165 + { 166 + } 167 + 168 + static const struct component_ops adp_dsi_component_ops = { 169 + .bind = adp_dsi_bind, 170 + .unbind = adp_dsi_unbind, 171 + }; 172 + 173 + static int adp_dsi_host_attach(struct mipi_dsi_host *host, 174 + struct mipi_dsi_device *dev) 175 + { 176 + struct adp_mipi_drv_private *adp = mipi_to_adp(host); 177 + struct drm_bridge *next; 178 + int ret; 179 + 180 + next = devm_drm_of_get_bridge(adp->dsi.dev, adp->dsi.dev->of_node, 1, 0); 181 + if (IS_ERR(next)) 182 + return PTR_ERR(next); 183 + 184 + adp->next_bridge = next; 185 + 186 + drm_bridge_add(&adp->bridge); 187 + 188 + ret = component_add(host->dev, &adp_dsi_component_ops); 189 + if (ret) { 190 + pr_err("failed to add dsi_host component: %d\n", ret); 191 + drm_bridge_remove(&adp->bridge); 192 + return ret; 193 + } 194 + 195 + return 0; 196 + } 197 + 198 + static int adp_dsi_host_detach(struct mipi_dsi_host *host, 199 + struct mipi_dsi_device *dev) 200 + { 201 + struct adp_mipi_drv_private *adp = mipi_to_adp(host); 202 + 203 + component_del(host->dev, &adp_dsi_component_ops); 204 + drm_bridge_remove(&adp->bridge); 205 + return 0; 206 + } 207 + 208 + static const struct mipi_dsi_host_ops adp_dsi_host_ops = { 209 + .transfer = adp_dsi_host_transfer, 210 + .attach = adp_dsi_host_attach, 211 + .detach = adp_dsi_host_detach, 212 + }; 213 + 214 + static int adp_dsi_bridge_attach(struct drm_bridge *bridge, 215 + enum drm_bridge_attach_flags flags) 216 + { 217 + struct adp_mipi_drv_private *adp = 218 + container_of(bridge, struct adp_mipi_drv_private, bridge); 219 + 220 + return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags); 221 + } 222 + 223 + static const struct drm_bridge_funcs adp_dsi_bridge_funcs = { 224 + .attach = adp_dsi_bridge_attach, 225 + }; 226 + 227 + static int adp_mipi_probe(struct platform_device *pdev) 228 + { 229 + struct adp_mipi_drv_private *adp; 230 + 231 + adp = devm_kzalloc(&pdev->dev, sizeof(*adp), GFP_KERNEL); 232 + if (!adp) 233 + return -ENOMEM; 234 + 235 + adp->mipi = devm_platform_ioremap_resource(pdev, 0); 236 + if (IS_ERR(adp->mipi)) { 237 + dev_err(&pdev->dev, "failed to map mipi mmio"); 238 + return PTR_ERR(adp->mipi); 239 + } 240 + 241 + adp->dsi.dev = &pdev->dev; 242 + adp->dsi.ops = &adp_dsi_host_ops; 243 + adp->bridge.funcs = &adp_dsi_bridge_funcs; 244 + adp->bridge.of_node = pdev->dev.of_node; 245 + adp->bridge.type = DRM_MODE_CONNECTOR_DSI; 246 + dev_set_drvdata(&pdev->dev, adp); 247 + return mipi_dsi_host_register(&adp->dsi); 248 + } 249 + 250 + static void adp_mipi_remove(struct platform_device *pdev) 251 + { 252 + struct device *dev = &pdev->dev; 253 + struct adp_mipi_drv_private *adp = dev_get_drvdata(dev); 254 + 255 + mipi_dsi_host_unregister(&adp->dsi); 256 + } 257 + 258 + static const struct of_device_id adp_mipi_of_match[] = { 259 + { .compatible = "apple,h7-display-pipe-mipi", }, 260 + { }, 261 + }; 262 + MODULE_DEVICE_TABLE(of, adp_mipi_of_match); 263 + 264 + static struct platform_driver adp_mipi_platform_driver = { 265 + .driver = { 266 + .name = "adp-mipi", 267 + .of_match_table = adp_mipi_of_match, 268 + }, 269 + .probe = adp_mipi_probe, 270 + .remove = adp_mipi_remove, 271 + }; 272 + 273 + module_platform_driver(adp_mipi_platform_driver); 274 + 275 + MODULE_DESCRIPTION("Apple Display Pipe MIPI driver"); 276 + MODULE_LICENSE("GPL");
+612
drivers/gpu/drm/adp/adp_drv.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <linux/component.h> 4 + #include <linux/iopoll.h> 5 + #include <linux/of.h> 6 + #include <linux/platform_device.h> 7 + 8 + #include <drm/drm_atomic.h> 9 + #include <drm/drm_atomic_helper.h> 10 + #include <drm/drm_bridge.h> 11 + #include <drm/drm_bridge_connector.h> 12 + #include <drm/drm_drv.h> 13 + #include <drm/drm_fb_dma_helper.h> 14 + #include <drm/drm_framebuffer.h> 15 + #include <drm/drm_gem_atomic_helper.h> 16 + #include <drm/drm_gem_dma_helper.h> 17 + #include <drm/drm_gem_framebuffer_helper.h> 18 + #include <drm/drm_of.h> 19 + #include <drm/drm_probe_helper.h> 20 + #include <drm/drm_vblank.h> 21 + 22 + #define ADP_INT_STATUS 0x34 23 + #define ADP_INT_STATUS_INT_MASK 0x7 24 + #define ADP_INT_STATUS_VBLANK 0x1 25 + #define ADP_CTRL 0x100 26 + #define ADP_CTRL_VBLANK_ON 0x12 27 + #define ADP_CTRL_FIFO_ON 0x601 28 + #define ADP_SCREEN_SIZE 0x0c 29 + #define ADP_SCREEN_HSIZE GENMASK(15, 0) 30 + #define ADP_SCREEN_VSIZE GENMASK(31, 16) 31 + 32 + #define ADBE_FIFO 0x10c0 33 + #define ADBE_FIFO_SYNC 0xc0000000 34 + 35 + #define ADBE_BLEND_BYPASS 0x2020 36 + #define ADBE_BLEND_EN1 0x2028 37 + #define ADBE_BLEND_EN2 0x2074 38 + #define ADBE_BLEND_EN3 0x202c 39 + #define ADBE_BLEND_EN4 0x2034 40 + #define ADBE_MASK_BUF 0x2200 41 + 42 + #define ADBE_SRC_START 0x4040 43 + #define ADBE_SRC_SIZE 0x4048 44 + #define ADBE_DST_START 0x4050 45 + #define ADBE_DST_SIZE 0x4054 46 + #define ADBE_STRIDE 0x4038 47 + #define ADBE_FB_BASE 0x4030 48 + 49 + #define ADBE_LAYER_EN1 0x4020 50 + #define ADBE_LAYER_EN2 0x4068 51 + #define ADBE_LAYER_EN3 0x40b4 52 + #define ADBE_LAYER_EN4 0x40f4 53 + #define ADBE_SCALE_CTL 0x40ac 54 + #define ADBE_SCALE_CTL_BYPASS 0x100000 55 + 56 + #define ADBE_LAYER_CTL 0x1038 57 + #define ADBE_LAYER_CTL_ENABLE 0x10000 58 + 59 + #define ADBE_PIX_FMT 0x402c 60 + #define ADBE_PIX_FMT_XRGB32 0x53e4001 61 + 62 + static int adp_open(struct inode *inode, struct file *filp) 63 + { 64 + /* 65 + * The modesetting driver does not check the non-desktop connector 66 + * property and keeps the device open and locked. If the touchbar daemon 67 + * opens the device first, modesetting breaks the whole X session. 68 + * Simply refuse to open the device for X11 server processes as 69 + * workaround. 70 + */ 71 + if (current->comm[0] == 'X') 72 + return -EBUSY; 73 + 74 + return drm_open(inode, filp); 75 + } 76 + 77 + static const struct file_operations adp_fops = { 78 + .owner = THIS_MODULE, 79 + .open = adp_open, 80 + .release = drm_release, 81 + .unlocked_ioctl = drm_ioctl, 82 + .compat_ioctl = drm_compat_ioctl, 83 + .poll = drm_poll, 84 + .read = drm_read, 85 + .llseek = noop_llseek, 86 + .mmap = drm_gem_mmap, 87 + .fop_flags = FOP_UNSIGNED_OFFSET, 88 + DRM_GEM_DMA_UNMAPPED_AREA_FOPS 89 + }; 90 + 91 + static int adp_drm_gem_dumb_create(struct drm_file *file_priv, 92 + struct drm_device *drm, 93 + struct drm_mode_create_dumb *args) 94 + { 95 + args->height = ALIGN(args->height, 64); 96 + args->size = args->pitch * args->height; 97 + 98 + return drm_gem_dma_dumb_create_internal(file_priv, drm, args); 99 + } 100 + 101 + static const struct drm_driver adp_driver = { 102 + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 103 + .fops = &adp_fops, 104 + DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(adp_drm_gem_dumb_create), 105 + .name = "adp", 106 + .desc = "Apple Display Pipe DRM Driver", 107 + .major = 0, 108 + .minor = 1, 109 + }; 110 + 111 + struct adp_drv_private { 112 + struct drm_device drm; 113 + struct drm_crtc crtc; 114 + struct drm_encoder *encoder; 115 + struct drm_connector *connector; 116 + struct drm_bridge *next_bridge; 117 + void __iomem *be; 118 + void __iomem *fe; 119 + u32 *mask_buf; 120 + u64 mask_buf_size; 121 + dma_addr_t mask_iova; 122 + int be_irq; 123 + int fe_irq; 124 + spinlock_t irq_lock; 125 + struct drm_pending_vblank_event *event; 126 + }; 127 + 128 + #define to_adp(x) container_of(x, struct adp_drv_private, drm) 129 + #define crtc_to_adp(x) container_of(x, struct adp_drv_private, crtc) 130 + 131 + static int adp_plane_atomic_check(struct drm_plane *plane, 132 + struct drm_atomic_state *state) 133 + { 134 + struct drm_plane_state *new_plane_state; 135 + struct drm_crtc_state *crtc_state; 136 + 137 + new_plane_state = drm_atomic_get_new_plane_state(state, plane); 138 + 139 + if (!new_plane_state->crtc) 140 + return 0; 141 + 142 + crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc); 143 + if (IS_ERR(crtc_state)) 144 + return PTR_ERR(crtc_state); 145 + 146 + return drm_atomic_helper_check_plane_state(new_plane_state, 147 + crtc_state, 148 + DRM_PLANE_NO_SCALING, 149 + DRM_PLANE_NO_SCALING, 150 + true, true); 151 + } 152 + 153 + static void adp_plane_atomic_update(struct drm_plane *plane, 154 + struct drm_atomic_state *state) 155 + { 156 + struct adp_drv_private *adp; 157 + struct drm_rect src_rect; 158 + struct drm_gem_dma_object *obj; 159 + struct drm_framebuffer *fb; 160 + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); 161 + u32 src_pos, src_size, dst_pos, dst_size; 162 + 163 + if (!plane || !new_state) 164 + return; 165 + 166 + fb = new_state->fb; 167 + if (!fb) 168 + return; 169 + adp = to_adp(plane->dev); 170 + 171 + drm_rect_fp_to_int(&src_rect, &new_state->src); 172 + src_pos = src_rect.x1 << 16 | src_rect.y1; 173 + dst_pos = new_state->dst.x1 << 16 | new_state->dst.y1; 174 + src_size = drm_rect_width(&src_rect) << 16 | drm_rect_height(&src_rect); 175 + dst_size = drm_rect_width(&new_state->dst) << 16 | 176 + drm_rect_height(&new_state->dst); 177 + writel(src_pos, adp->be + ADBE_SRC_START); 178 + writel(src_size, adp->be + ADBE_SRC_SIZE); 179 + writel(dst_pos, adp->be + ADBE_DST_START); 180 + writel(dst_size, adp->be + ADBE_DST_SIZE); 181 + writel(fb->pitches[0], adp->be + ADBE_STRIDE); 182 + obj = drm_fb_dma_get_gem_obj(fb, 0); 183 + if (obj) 184 + writel(obj->dma_addr + fb->offsets[0], adp->be + ADBE_FB_BASE); 185 + 186 + writel(BIT(0), adp->be + ADBE_LAYER_EN1); 187 + writel(BIT(0), adp->be + ADBE_LAYER_EN2); 188 + writel(BIT(0), adp->be + ADBE_LAYER_EN3); 189 + writel(BIT(0), adp->be + ADBE_LAYER_EN4); 190 + writel(ADBE_SCALE_CTL_BYPASS, adp->be + ADBE_SCALE_CTL); 191 + writel(ADBE_LAYER_CTL_ENABLE | BIT(0), adp->be + ADBE_LAYER_CTL); 192 + writel(ADBE_PIX_FMT_XRGB32, adp->be + ADBE_PIX_FMT); 193 + } 194 + 195 + static void adp_plane_atomic_disable(struct drm_plane *plane, 196 + struct drm_atomic_state *state) 197 + { 198 + struct adp_drv_private *adp = to_adp(plane->dev); 199 + 200 + writel(0x0, adp->be + ADBE_LAYER_EN1); 201 + writel(0x0, adp->be + ADBE_LAYER_EN2); 202 + writel(0x0, adp->be + ADBE_LAYER_EN3); 203 + writel(0x0, adp->be + ADBE_LAYER_EN4); 204 + writel(ADBE_LAYER_CTL_ENABLE, adp->be + ADBE_LAYER_CTL); 205 + } 206 + 207 + static const struct drm_plane_helper_funcs adp_plane_helper_funcs = { 208 + .atomic_check = adp_plane_atomic_check, 209 + .atomic_update = adp_plane_atomic_update, 210 + .atomic_disable = adp_plane_atomic_disable, 211 + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS 212 + }; 213 + 214 + static const struct drm_plane_funcs adp_plane_funcs = { 215 + .update_plane = drm_atomic_helper_update_plane, 216 + .disable_plane = drm_atomic_helper_disable_plane, 217 + DRM_GEM_SHADOW_PLANE_FUNCS 218 + }; 219 + 220 + static const u32 plane_formats[] = { 221 + DRM_FORMAT_XRGB8888, 222 + }; 223 + 224 + #define ALL_CRTCS 1 225 + 226 + static struct drm_plane *adp_plane_new(struct adp_drv_private *adp) 227 + { 228 + struct drm_device *drm = &adp->drm; 229 + struct drm_plane *plane; 230 + 231 + plane = __drmm_universal_plane_alloc(drm, sizeof(struct drm_plane), 0, 232 + ALL_CRTCS, &adp_plane_funcs, 233 + plane_formats, ARRAY_SIZE(plane_formats), 234 + NULL, DRM_PLANE_TYPE_PRIMARY, "plane"); 235 + if (!plane) { 236 + drm_err(drm, "failed to allocate plane"); 237 + return ERR_PTR(-ENOMEM); 238 + } 239 + 240 + drm_plane_helper_add(plane, &adp_plane_helper_funcs); 241 + return plane; 242 + } 243 + 244 + static void adp_enable_vblank(struct adp_drv_private *adp) 245 + { 246 + u32 cur_ctrl; 247 + 248 + writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS); 249 + 250 + cur_ctrl = readl(adp->fe + ADP_CTRL); 251 + writel(cur_ctrl | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL); 252 + } 253 + 254 + static int adp_crtc_enable_vblank(struct drm_crtc *crtc) 255 + { 256 + struct drm_device *dev = crtc->dev; 257 + struct adp_drv_private *adp = to_adp(dev); 258 + 259 + adp_enable_vblank(adp); 260 + 261 + return 0; 262 + } 263 + 264 + static void adp_disable_vblank(struct adp_drv_private *adp) 265 + { 266 + u32 cur_ctrl; 267 + 268 + cur_ctrl = readl(adp->fe + ADP_CTRL); 269 + writel(cur_ctrl & ~ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL); 270 + writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS); 271 + } 272 + 273 + static void adp_crtc_disable_vblank(struct drm_crtc *crtc) 274 + { 275 + struct drm_device *dev = crtc->dev; 276 + struct adp_drv_private *adp = to_adp(dev); 277 + 278 + adp_disable_vblank(adp); 279 + } 280 + 281 + static void adp_crtc_atomic_enable(struct drm_crtc *crtc, 282 + struct drm_atomic_state *state) 283 + { 284 + struct adp_drv_private *adp = crtc_to_adp(crtc); 285 + 286 + writel(BIT(0), adp->be + ADBE_BLEND_EN2); 287 + writel(BIT(4), adp->be + ADBE_BLEND_EN1); 288 + writel(BIT(0), adp->be + ADBE_BLEND_EN3); 289 + writel(BIT(0), adp->be + ADBE_BLEND_BYPASS); 290 + writel(BIT(0), adp->be + ADBE_BLEND_EN4); 291 + } 292 + 293 + static void adp_crtc_atomic_disable(struct drm_crtc *crtc, 294 + struct drm_atomic_state *state) 295 + { 296 + struct adp_drv_private *adp = crtc_to_adp(crtc); 297 + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc); 298 + 299 + drm_atomic_helper_disable_planes_on_crtc(old_state, false); 300 + 301 + writel(0x0, adp->be + ADBE_BLEND_EN2); 302 + writel(0x0, adp->be + ADBE_BLEND_EN1); 303 + writel(0x0, adp->be + ADBE_BLEND_EN3); 304 + writel(0x0, adp->be + ADBE_BLEND_BYPASS); 305 + writel(0x0, adp->be + ADBE_BLEND_EN4); 306 + drm_crtc_vblank_off(crtc); 307 + } 308 + 309 + static void adp_crtc_atomic_flush(struct drm_crtc *crtc, 310 + struct drm_atomic_state *state) 311 + { 312 + u32 frame_num = 1; 313 + struct adp_drv_private *adp = crtc_to_adp(crtc); 314 + struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc); 315 + u64 new_size = ALIGN(new_state->mode.hdisplay * 316 + new_state->mode.vdisplay * 4, PAGE_SIZE); 317 + 318 + if (new_size != adp->mask_buf_size) { 319 + if (adp->mask_buf) 320 + dma_free_coherent(crtc->dev->dev, adp->mask_buf_size, 321 + adp->mask_buf, adp->mask_iova); 322 + adp->mask_buf = NULL; 323 + if (new_size != 0) { 324 + adp->mask_buf = dma_alloc_coherent(crtc->dev->dev, new_size, 325 + &adp->mask_iova, GFP_KERNEL); 326 + memset(adp->mask_buf, 0xFF, new_size); 327 + writel(adp->mask_iova, adp->be + ADBE_MASK_BUF); 328 + } 329 + adp->mask_buf_size = new_size; 330 + } 331 + writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO); 332 + //FIXME: use adbe flush interrupt 333 + spin_lock_irq(&crtc->dev->event_lock); 334 + if (crtc->state->event) { 335 + drm_crtc_vblank_get(crtc); 336 + adp->event = crtc->state->event; 337 + } 338 + crtc->state->event = NULL; 339 + spin_unlock_irq(&crtc->dev->event_lock); 340 + } 341 + 342 + static const struct drm_crtc_funcs adp_crtc_funcs = { 343 + .destroy = drm_crtc_cleanup, 344 + .set_config = drm_atomic_helper_set_config, 345 + .page_flip = drm_atomic_helper_page_flip, 346 + .reset = drm_atomic_helper_crtc_reset, 347 + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 348 + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 349 + .enable_vblank = adp_crtc_enable_vblank, 350 + .disable_vblank = adp_crtc_disable_vblank, 351 + }; 352 + 353 + 354 + static const struct drm_crtc_helper_funcs adp_crtc_helper_funcs = { 355 + .atomic_enable = adp_crtc_atomic_enable, 356 + .atomic_disable = adp_crtc_atomic_disable, 357 + .atomic_flush = adp_crtc_atomic_flush, 358 + }; 359 + 360 + static int adp_setup_crtc(struct adp_drv_private *adp) 361 + { 362 + struct drm_device *drm = &adp->drm; 363 + struct drm_plane *primary; 364 + int ret; 365 + 366 + primary = adp_plane_new(adp); 367 + if (IS_ERR(primary)) 368 + return PTR_ERR(primary); 369 + 370 + ret = drm_crtc_init_with_planes(drm, &adp->crtc, primary, 371 + NULL, &adp_crtc_funcs, NULL); 372 + if (ret) 373 + return ret; 374 + 375 + drm_crtc_helper_add(&adp->crtc, &adp_crtc_helper_funcs); 376 + return 0; 377 + } 378 + 379 + static const struct drm_mode_config_funcs adp_mode_config_funcs = { 380 + .fb_create = drm_gem_fb_create_with_dirty, 381 + .atomic_check = drm_atomic_helper_check, 382 + .atomic_commit = drm_atomic_helper_commit, 383 + }; 384 + 385 + static int adp_setup_mode_config(struct adp_drv_private *adp) 386 + { 387 + struct drm_device *drm = &adp->drm; 388 + int ret; 389 + u32 size; 390 + 391 + ret = drmm_mode_config_init(drm); 392 + if (ret) 393 + return ret; 394 + 395 + /* 396 + * Query screen size restrict the frame buffer size to the screen size 397 + * aligned to the next multiple of 64. This is not necessary but can be 398 + * used as simple check for non-desktop devices. 399 + * Xorg's modesetting driver does not care about the connector 400 + * "non-desktop" property. The max frame buffer width or height can be 401 + * easily checked and a device can be reject if the max width/height is 402 + * smaller than 120 for example. 403 + * Any touchbar daemon is not limited by this small framebuffer size. 404 + */ 405 + size = readl(adp->fe + ADP_SCREEN_SIZE); 406 + 407 + drm->mode_config.min_width = 32; 408 + drm->mode_config.min_height = 32; 409 + drm->mode_config.max_width = ALIGN(FIELD_GET(ADP_SCREEN_HSIZE, size), 64); 410 + drm->mode_config.max_height = ALIGN(FIELD_GET(ADP_SCREEN_VSIZE, size), 64); 411 + drm->mode_config.preferred_depth = 24; 412 + drm->mode_config.prefer_shadow = 0; 413 + drm->mode_config.funcs = &adp_mode_config_funcs; 414 + 415 + ret = adp_setup_crtc(adp); 416 + if (ret) { 417 + drm_err(drm, "failed to create crtc"); 418 + return ret; 419 + } 420 + 421 + adp->encoder = drmm_plain_encoder_alloc(drm, NULL, DRM_MODE_ENCODER_DSI, NULL); 422 + if (IS_ERR(adp->encoder)) { 423 + drm_err(drm, "failed to init encoder"); 424 + return PTR_ERR(adp->encoder); 425 + } 426 + adp->encoder->possible_crtcs = ALL_CRTCS; 427 + 428 + ret = drm_bridge_attach(adp->encoder, adp->next_bridge, NULL, 429 + DRM_BRIDGE_ATTACH_NO_CONNECTOR); 430 + if (ret) { 431 + drm_err(drm, "failed to init bridge chain"); 432 + return ret; 433 + } 434 + 435 + adp->connector = drm_bridge_connector_init(drm, adp->encoder); 436 + if (IS_ERR(adp->connector)) 437 + return PTR_ERR(adp->connector); 438 + 439 + drm_connector_attach_encoder(adp->connector, adp->encoder); 440 + 441 + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); 442 + if (ret < 0) { 443 + drm_err(drm, "failed to initialize vblank"); 444 + return ret; 445 + } 446 + 447 + drm_mode_config_reset(drm); 448 + 449 + return 0; 450 + } 451 + 452 + static int adp_parse_of(struct platform_device *pdev, struct adp_drv_private *adp) 453 + { 454 + struct device *dev = &pdev->dev; 455 + 456 + adp->be = devm_platform_ioremap_resource_byname(pdev, "be"); 457 + if (IS_ERR(adp->be)) { 458 + dev_err(dev, "failed to map display backend mmio"); 459 + return PTR_ERR(adp->be); 460 + } 461 + 462 + adp->fe = devm_platform_ioremap_resource_byname(pdev, "fe"); 463 + if (IS_ERR(adp->fe)) { 464 + dev_err(dev, "failed to map display pipe mmio"); 465 + return PTR_ERR(adp->fe); 466 + } 467 + 468 + adp->be_irq = platform_get_irq_byname(pdev, "be"); 469 + if (adp->be_irq < 0) 470 + return adp->be_irq; 471 + 472 + adp->fe_irq = platform_get_irq_byname(pdev, "fe"); 473 + if (adp->fe_irq < 0) 474 + return adp->fe_irq; 475 + 476 + return 0; 477 + } 478 + 479 + static irqreturn_t adp_fe_irq(int irq, void *arg) 480 + { 481 + struct adp_drv_private *adp = (struct adp_drv_private *)arg; 482 + u32 int_status; 483 + u32 int_ctl; 484 + 485 + spin_lock(&adp->irq_lock); 486 + 487 + int_status = readl(adp->fe + ADP_INT_STATUS); 488 + if (int_status & ADP_INT_STATUS_VBLANK) { 489 + drm_crtc_handle_vblank(&adp->crtc); 490 + spin_lock(&adp->crtc.dev->event_lock); 491 + if (adp->event) { 492 + int_ctl = readl(adp->fe + ADP_CTRL); 493 + if ((int_ctl & 0xF00) == 0x600) { 494 + drm_crtc_send_vblank_event(&adp->crtc, adp->event); 495 + adp->event = NULL; 496 + drm_crtc_vblank_put(&adp->crtc); 497 + } 498 + } 499 + spin_unlock(&adp->crtc.dev->event_lock); 500 + } 501 + 502 + writel(int_status, adp->fe + ADP_INT_STATUS); 503 + 504 + spin_unlock(&adp->irq_lock); 505 + 506 + return IRQ_HANDLED; 507 + } 508 + 509 + static int adp_drm_bind(struct device *dev) 510 + { 511 + struct drm_device *drm = dev_get_drvdata(dev); 512 + struct adp_drv_private *adp = to_adp(drm); 513 + int err; 514 + 515 + adp_disable_vblank(adp); 516 + writel(ADP_CTRL_FIFO_ON | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL); 517 + 518 + adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0); 519 + if (IS_ERR(adp->next_bridge)) { 520 + dev_err(dev, "failed to find next bridge"); 521 + return PTR_ERR(adp->next_bridge); 522 + } 523 + 524 + err = adp_setup_mode_config(adp); 525 + if (err < 0) 526 + return err; 527 + 528 + err = request_irq(adp->fe_irq, adp_fe_irq, 0, "adp-fe", adp); 529 + if (err) 530 + return err; 531 + 532 + err = drm_dev_register(&adp->drm, 0); 533 + if (err) 534 + return err; 535 + 536 + return 0; 537 + } 538 + 539 + static void adp_drm_unbind(struct device *dev) 540 + { 541 + struct drm_device *drm = dev_get_drvdata(dev); 542 + struct adp_drv_private *adp = to_adp(drm); 543 + 544 + drm_dev_unregister(drm); 545 + drm_atomic_helper_shutdown(drm); 546 + free_irq(adp->fe_irq, adp); 547 + } 548 + 549 + static const struct component_master_ops adp_master_ops = { 550 + .bind = adp_drm_bind, 551 + .unbind = adp_drm_unbind, 552 + }; 553 + 554 + static int compare_dev(struct device *dev, void *data) 555 + { 556 + return dev->of_node == data; 557 + } 558 + 559 + static int adp_probe(struct platform_device *pdev) 560 + { 561 + struct device_node *port; 562 + struct component_match *match = NULL; 563 + struct adp_drv_private *adp; 564 + int err; 565 + 566 + adp = devm_drm_dev_alloc(&pdev->dev, &adp_driver, struct adp_drv_private, drm); 567 + if (IS_ERR(adp)) 568 + return PTR_ERR(adp); 569 + 570 + spin_lock_init(&adp->irq_lock); 571 + 572 + dev_set_drvdata(&pdev->dev, &adp->drm); 573 + 574 + err = adp_parse_of(pdev, adp); 575 + if (err < 0) 576 + return err; 577 + 578 + port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0); 579 + if (!port) 580 + return -ENODEV; 581 + 582 + drm_of_component_match_add(&pdev->dev, &match, compare_dev, port); 583 + of_node_put(port); 584 + 585 + return component_master_add_with_match(&pdev->dev, &adp_master_ops, match); 586 + } 587 + 588 + static void adp_remove(struct platform_device *pdev) 589 + { 590 + component_master_del(&pdev->dev, &adp_master_ops); 591 + dev_set_drvdata(&pdev->dev, NULL); 592 + } 593 + 594 + static const struct of_device_id adp_of_match[] = { 595 + { .compatible = "apple,h7-display-pipe", }, 596 + { }, 597 + }; 598 + MODULE_DEVICE_TABLE(of, adp_of_match); 599 + 600 + static struct platform_driver adp_platform_driver = { 601 + .driver = { 602 + .name = "adp", 603 + .of_match_table = adp_of_match, 604 + }, 605 + .probe = adp_probe, 606 + .remove = adp_remove, 607 + }; 608 + 609 + module_platform_driver(adp_platform_driver); 610 + 611 + MODULE_DESCRIPTION("Apple Display Pipe DRM driver"); 612 + MODULE_LICENSE("GPL");
+1 -1
drivers/gpu/drm/bridge/sii902x.c
··· 887 887 lanes[0] = 0; 888 888 } else if (num_lanes < 0) { 889 889 dev_err(dev, 890 - "%s: Error gettin \"sil,i2s-data-lanes\": %d\n", 890 + "%s: Error getting \"sil,i2s-data-lanes\": %d\n", 891 891 __func__, num_lanes); 892 892 return num_lanes; 893 893 }
+489
drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
··· 36 36 37 37 #define SCRAMB_POLL_DELAY_MS 3000 38 38 39 + /* 40 + * Unless otherwise noted, entries in this table are 100% optimization. 41 + * Values can be obtained from dw_hdmi_qp_compute_n() but that function is 42 + * slow so we pre-compute values we expect to see. 43 + * 44 + * The values for TMDS 25175, 25200, 27000, 54000, 74250 and 148500 kHz are 45 + * the recommended N values specified in the Audio chapter of the HDMI 46 + * specification. 47 + */ 48 + static const struct dw_hdmi_audio_tmds_n { 49 + unsigned long tmds; 50 + unsigned int n_32k; 51 + unsigned int n_44k1; 52 + unsigned int n_48k; 53 + } common_tmds_n_table[] = { 54 + { .tmds = 25175000, .n_32k = 4576, .n_44k1 = 7007, .n_48k = 6864, }, 55 + { .tmds = 25200000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, 56 + { .tmds = 27000000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, 57 + { .tmds = 28320000, .n_32k = 4096, .n_44k1 = 5586, .n_48k = 6144, }, 58 + { .tmds = 30240000, .n_32k = 4096, .n_44k1 = 5642, .n_48k = 6144, }, 59 + { .tmds = 31500000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, }, 60 + { .tmds = 32000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, }, 61 + { .tmds = 33750000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, 62 + { .tmds = 36000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, }, 63 + { .tmds = 40000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, }, 64 + { .tmds = 49500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, }, 65 + { .tmds = 50000000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, }, 66 + { .tmds = 54000000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, 67 + { .tmds = 65000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, }, 68 + { .tmds = 68250000, .n_32k = 4096, .n_44k1 = 5376, .n_48k = 6144, }, 69 + { .tmds = 71000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, }, 70 + { .tmds = 72000000, .n_32k = 4096, .n_44k1 = 5635, .n_48k = 6144, }, 71 + { .tmds = 73250000, .n_32k = 11648, .n_44k1 = 14112, .n_48k = 6144, }, 72 + { .tmds = 74250000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, 73 + { .tmds = 75000000, .n_32k = 4096, .n_44k1 = 5880, .n_48k = 6144, }, 74 + { .tmds = 78750000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, }, 75 + { .tmds = 78800000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, }, 76 + { .tmds = 79500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, }, 77 + { .tmds = 83500000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, }, 78 + { .tmds = 85500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, }, 79 + { .tmds = 88750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, }, 80 + { .tmds = 97750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, }, 81 + { .tmds = 101000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, }, 82 + { .tmds = 106500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, }, 83 + { .tmds = 108000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, }, 84 + { .tmds = 115500000, .n_32k = 4096, .n_44k1 = 5712, .n_48k = 6144, }, 85 + { .tmds = 119000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, }, 86 + { .tmds = 135000000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, }, 87 + { .tmds = 146250000, .n_32k = 11648, .n_44k1 = 6272, .n_48k = 6144, }, 88 + { .tmds = 148500000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, 89 + { .tmds = 154000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, }, 90 + { .tmds = 162000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, }, 91 + 92 + /* For 297 MHz+ HDMI spec have some other rule for setting N */ 93 + { .tmds = 297000000, .n_32k = 3073, .n_44k1 = 4704, .n_48k = 5120, }, 94 + { .tmds = 594000000, .n_32k = 3073, .n_44k1 = 9408, .n_48k = 10240,}, 95 + 96 + /* End of table */ 97 + { .tmds = 0, .n_32k = 0, .n_44k1 = 0, .n_48k = 0, }, 98 + }; 99 + 100 + /* 101 + * These are the CTS values as recommended in the Audio chapter of the HDMI 102 + * specification. 103 + */ 104 + static const struct dw_hdmi_audio_tmds_cts { 105 + unsigned long tmds; 106 + unsigned int cts_32k; 107 + unsigned int cts_44k1; 108 + unsigned int cts_48k; 109 + } common_tmds_cts_table[] = { 110 + { .tmds = 25175000, .cts_32k = 28125, .cts_44k1 = 31250, .cts_48k = 28125, }, 111 + { .tmds = 25200000, .cts_32k = 25200, .cts_44k1 = 28000, .cts_48k = 25200, }, 112 + { .tmds = 27000000, .cts_32k = 27000, .cts_44k1 = 30000, .cts_48k = 27000, }, 113 + { .tmds = 54000000, .cts_32k = 54000, .cts_44k1 = 60000, .cts_48k = 54000, }, 114 + { .tmds = 74250000, .cts_32k = 74250, .cts_44k1 = 82500, .cts_48k = 74250, }, 115 + { .tmds = 148500000, .cts_32k = 148500, .cts_44k1 = 165000, .cts_48k = 148500, }, 116 + 117 + /* End of table */ 118 + { .tmds = 0, .cts_32k = 0, .cts_44k1 = 0, .cts_48k = 0, }, 119 + }; 120 + 39 121 struct dw_hdmi_qp_i2c { 40 122 struct i2c_adapter adap; 41 123 ··· 142 60 } phy; 143 61 144 62 struct regmap *regm; 63 + 64 + unsigned long tmds_char_rate; 145 65 }; 146 66 147 67 static void dw_hdmi_qp_write(struct dw_hdmi_qp *hdmi, unsigned int val, ··· 165 81 unsigned int mask, unsigned int reg) 166 82 { 167 83 regmap_update_bits(hdmi->regm, reg, mask, data); 84 + } 85 + 86 + static struct dw_hdmi_qp *dw_hdmi_qp_from_bridge(struct drm_bridge *bridge) 87 + { 88 + return container_of(bridge, struct dw_hdmi_qp, bridge); 89 + } 90 + 91 + static void dw_hdmi_qp_set_cts_n(struct dw_hdmi_qp *hdmi, unsigned int cts, 92 + unsigned int n) 93 + { 94 + /* Set N */ 95 + dw_hdmi_qp_mod(hdmi, n, AUDPKT_ACR_N_VALUE, AUDPKT_ACR_CONTROL0); 96 + 97 + /* Set CTS */ 98 + if (cts) 99 + dw_hdmi_qp_mod(hdmi, AUDPKT_ACR_CTS_OVR_EN, AUDPKT_ACR_CTS_OVR_EN_MSK, 100 + AUDPKT_ACR_CONTROL1); 101 + else 102 + dw_hdmi_qp_mod(hdmi, 0, AUDPKT_ACR_CTS_OVR_EN_MSK, 103 + AUDPKT_ACR_CONTROL1); 104 + 105 + dw_hdmi_qp_mod(hdmi, AUDPKT_ACR_CTS_OVR_VAL(cts), AUDPKT_ACR_CTS_OVR_VAL_MSK, 106 + AUDPKT_ACR_CONTROL1); 107 + } 108 + 109 + static int dw_hdmi_qp_match_tmds_n_table(struct dw_hdmi_qp *hdmi, 110 + unsigned long pixel_clk, 111 + unsigned long freq) 112 + { 113 + const struct dw_hdmi_audio_tmds_n *tmds_n = NULL; 114 + int i; 115 + 116 + for (i = 0; common_tmds_n_table[i].tmds != 0; i++) { 117 + if (pixel_clk == common_tmds_n_table[i].tmds) { 118 + tmds_n = &common_tmds_n_table[i]; 119 + break; 120 + } 121 + } 122 + 123 + if (!tmds_n) 124 + return -ENOENT; 125 + 126 + switch (freq) { 127 + case 32000: 128 + return tmds_n->n_32k; 129 + case 44100: 130 + case 88200: 131 + case 176400: 132 + return (freq / 44100) * tmds_n->n_44k1; 133 + case 48000: 134 + case 96000: 135 + case 192000: 136 + return (freq / 48000) * tmds_n->n_48k; 137 + default: 138 + return -ENOENT; 139 + } 140 + } 141 + 142 + static u32 dw_hdmi_qp_audio_math_diff(unsigned int freq, unsigned int n, 143 + unsigned int pixel_clk) 144 + { 145 + u64 cts = mul_u32_u32(pixel_clk, n); 146 + 147 + return do_div(cts, 128 * freq); 148 + } 149 + 150 + static unsigned int dw_hdmi_qp_compute_n(struct dw_hdmi_qp *hdmi, 151 + unsigned long pixel_clk, 152 + unsigned long freq) 153 + { 154 + unsigned int min_n = DIV_ROUND_UP((128 * freq), 1500); 155 + unsigned int max_n = (128 * freq) / 300; 156 + unsigned int ideal_n = (128 * freq) / 1000; 157 + unsigned int best_n_distance = ideal_n; 158 + unsigned int best_n = 0; 159 + u64 best_diff = U64_MAX; 160 + int n; 161 + 162 + /* If the ideal N could satisfy the audio math, then just take it */ 163 + if (dw_hdmi_qp_audio_math_diff(freq, ideal_n, pixel_clk) == 0) 164 + return ideal_n; 165 + 166 + for (n = min_n; n <= max_n; n++) { 167 + u64 diff = dw_hdmi_qp_audio_math_diff(freq, n, pixel_clk); 168 + 169 + if (diff < best_diff || 170 + (diff == best_diff && abs(n - ideal_n) < best_n_distance)) { 171 + best_n = n; 172 + best_diff = diff; 173 + best_n_distance = abs(best_n - ideal_n); 174 + } 175 + 176 + /* 177 + * The best N already satisfy the audio math, and also be 178 + * the closest value to ideal N, so just cut the loop. 179 + */ 180 + if (best_diff == 0 && (abs(n - ideal_n) > best_n_distance)) 181 + break; 182 + } 183 + 184 + return best_n; 185 + } 186 + 187 + static unsigned int dw_hdmi_qp_find_n(struct dw_hdmi_qp *hdmi, unsigned long pixel_clk, 188 + unsigned long sample_rate) 189 + { 190 + int n = dw_hdmi_qp_match_tmds_n_table(hdmi, pixel_clk, sample_rate); 191 + 192 + if (n > 0) 193 + return n; 194 + 195 + dev_warn(hdmi->dev, "Rate %lu missing; compute N dynamically\n", 196 + pixel_clk); 197 + 198 + return dw_hdmi_qp_compute_n(hdmi, pixel_clk, sample_rate); 199 + } 200 + 201 + static unsigned int dw_hdmi_qp_find_cts(struct dw_hdmi_qp *hdmi, unsigned long pixel_clk, 202 + unsigned long sample_rate) 203 + { 204 + const struct dw_hdmi_audio_tmds_cts *tmds_cts = NULL; 205 + int i; 206 + 207 + for (i = 0; common_tmds_cts_table[i].tmds != 0; i++) { 208 + if (pixel_clk == common_tmds_cts_table[i].tmds) { 209 + tmds_cts = &common_tmds_cts_table[i]; 210 + break; 211 + } 212 + } 213 + 214 + if (!tmds_cts) 215 + return 0; 216 + 217 + switch (sample_rate) { 218 + case 32000: 219 + return tmds_cts->cts_32k; 220 + case 44100: 221 + case 88200: 222 + case 176400: 223 + return tmds_cts->cts_44k1; 224 + case 48000: 225 + case 96000: 226 + case 192000: 227 + return tmds_cts->cts_48k; 228 + default: 229 + return -ENOENT; 230 + } 231 + } 232 + 233 + static void dw_hdmi_qp_set_audio_interface(struct dw_hdmi_qp *hdmi, 234 + struct hdmi_codec_daifmt *fmt, 235 + struct hdmi_codec_params *hparms) 236 + { 237 + u32 conf0 = 0; 238 + 239 + /* Reset the audio data path of the AVP */ 240 + dw_hdmi_qp_write(hdmi, AVP_DATAPATH_PACKET_AUDIO_SWINIT_P, GLOBAL_SWRESET_REQUEST); 241 + 242 + /* Disable AUDS, ACR, AUDI */ 243 + dw_hdmi_qp_mod(hdmi, 0, 244 + PKTSCHED_ACR_TX_EN | PKTSCHED_AUDS_TX_EN | PKTSCHED_AUDI_TX_EN, 245 + PKTSCHED_PKT_EN); 246 + 247 + /* Clear the audio FIFO */ 248 + dw_hdmi_qp_write(hdmi, AUDIO_FIFO_CLR_P, AUDIO_INTERFACE_CONTROL0); 249 + 250 + /* Select I2S interface as the audio source */ 251 + dw_hdmi_qp_mod(hdmi, AUD_IF_I2S, AUD_IF_SEL_MSK, AUDIO_INTERFACE_CONFIG0); 252 + 253 + /* Enable the active i2s lanes */ 254 + switch (hparms->channels) { 255 + case 7 ... 8: 256 + conf0 |= I2S_LINES_EN(3); 257 + fallthrough; 258 + case 5 ... 6: 259 + conf0 |= I2S_LINES_EN(2); 260 + fallthrough; 261 + case 3 ... 4: 262 + conf0 |= I2S_LINES_EN(1); 263 + fallthrough; 264 + default: 265 + conf0 |= I2S_LINES_EN(0); 266 + break; 267 + } 268 + 269 + dw_hdmi_qp_mod(hdmi, conf0, I2S_LINES_EN_MSK, AUDIO_INTERFACE_CONFIG0); 270 + 271 + /* 272 + * Enable bpcuv generated internally for L-PCM, or received 273 + * from stream for NLPCM/HBR. 274 + */ 275 + switch (fmt->bit_fmt) { 276 + case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE: 277 + conf0 = (hparms->channels == 8) ? AUD_HBR : AUD_ASP; 278 + conf0 |= I2S_BPCUV_RCV_EN; 279 + break; 280 + default: 281 + conf0 = AUD_ASP | I2S_BPCUV_RCV_DIS; 282 + break; 283 + } 284 + 285 + dw_hdmi_qp_mod(hdmi, conf0, I2S_BPCUV_RCV_MSK | AUD_FORMAT_MSK, 286 + AUDIO_INTERFACE_CONFIG0); 287 + 288 + /* Enable audio FIFO auto clear when overflow */ 289 + dw_hdmi_qp_mod(hdmi, AUD_FIFO_INIT_ON_OVF_EN, AUD_FIFO_INIT_ON_OVF_MSK, 290 + AUDIO_INTERFACE_CONFIG0); 291 + } 292 + 293 + /* 294 + * When transmitting IEC60958 linear PCM audio, these registers allow to 295 + * configure the channel status information of all the channel status 296 + * bits in the IEC60958 frame. For the moment this configuration is only 297 + * used when the I2S audio interface, General Purpose Audio (GPA), 298 + * or AHB audio DMA (AHBAUDDMA) interface is active 299 + * (for S/PDIF interface this information comes from the stream). 300 + */ 301 + static void dw_hdmi_qp_set_channel_status(struct dw_hdmi_qp *hdmi, 302 + u8 *channel_status, bool ref2stream) 303 + { 304 + /* 305 + * AUDPKT_CHSTATUS_OVR0: { RSV, RSV, CS1, CS0 } 306 + * AUDPKT_CHSTATUS_OVR1: { CS6, CS5, CS4, CS3 } 307 + * 308 + * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | 309 + * CS0: | Mode | d | c | b | a | 310 + * CS1: | Category Code | 311 + * CS2: | Channel Number | Source Number | 312 + * CS3: | Clock Accuracy | Sample Freq | 313 + * CS4: | Ori Sample Freq | Word Length | 314 + * CS5: | | CGMS-A | 315 + * CS6~CS23: Reserved 316 + * 317 + * a: use of channel status block 318 + * b: linear PCM identification: 0 for lpcm, 1 for nlpcm 319 + * c: copyright information 320 + * d: additional format information 321 + */ 322 + 323 + if (ref2stream) 324 + channel_status[0] |= IEC958_AES0_NONAUDIO; 325 + 326 + if ((dw_hdmi_qp_read(hdmi, AUDIO_INTERFACE_CONFIG0) & GENMASK(25, 24)) == AUD_HBR) { 327 + /* fixup cs for HBR */ 328 + channel_status[3] = (channel_status[3] & 0xf0) | IEC958_AES3_CON_FS_768000; 329 + channel_status[4] = (channel_status[4] & 0x0f) | IEC958_AES4_CON_ORIGFS_NOTID; 330 + } 331 + 332 + dw_hdmi_qp_write(hdmi, channel_status[0] | (channel_status[1] << 8), 333 + AUDPKT_CHSTATUS_OVR0); 334 + 335 + regmap_bulk_write(hdmi->regm, AUDPKT_CHSTATUS_OVR1, &channel_status[3], 1); 336 + 337 + if (ref2stream) 338 + dw_hdmi_qp_mod(hdmi, 0, 339 + AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK, 340 + AUDPKT_CONTROL0); 341 + else 342 + dw_hdmi_qp_mod(hdmi, AUDPKT_PBIT_FORCE_EN | AUDPKT_CHSTATUS_OVR_EN, 343 + AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK, 344 + AUDPKT_CONTROL0); 345 + } 346 + 347 + static void dw_hdmi_qp_set_sample_rate(struct dw_hdmi_qp *hdmi, unsigned long long tmds_char_rate, 348 + unsigned int sample_rate) 349 + { 350 + unsigned int n, cts; 351 + 352 + n = dw_hdmi_qp_find_n(hdmi, tmds_char_rate, sample_rate); 353 + cts = dw_hdmi_qp_find_cts(hdmi, tmds_char_rate, sample_rate); 354 + 355 + dw_hdmi_qp_set_cts_n(hdmi, cts, n); 356 + } 357 + 358 + static int dw_hdmi_qp_audio_enable(struct drm_connector *connector, 359 + struct drm_bridge *bridge) 360 + { 361 + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); 362 + 363 + if (hdmi->tmds_char_rate) 364 + dw_hdmi_qp_mod(hdmi, 0, AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE); 365 + 366 + return 0; 367 + } 368 + 369 + static int dw_hdmi_qp_audio_prepare(struct drm_connector *connector, 370 + struct drm_bridge *bridge, 371 + struct hdmi_codec_daifmt *fmt, 372 + struct hdmi_codec_params *hparms) 373 + { 374 + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); 375 + bool ref2stream = false; 376 + 377 + if (!hdmi->tmds_char_rate) 378 + return -ENODEV; 379 + 380 + if (fmt->bit_clk_provider | fmt->frame_clk_provider) { 381 + dev_err(hdmi->dev, "unsupported clock settings\n"); 382 + return -EINVAL; 383 + } 384 + 385 + if (fmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE) 386 + ref2stream = true; 387 + 388 + dw_hdmi_qp_set_audio_interface(hdmi, fmt, hparms); 389 + dw_hdmi_qp_set_sample_rate(hdmi, hdmi->tmds_char_rate, hparms->sample_rate); 390 + dw_hdmi_qp_set_channel_status(hdmi, hparms->iec.status, ref2stream); 391 + drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector, &hparms->cea); 392 + 393 + return 0; 394 + } 395 + 396 + static void dw_hdmi_qp_audio_disable_regs(struct dw_hdmi_qp *hdmi) 397 + { 398 + /* 399 + * Keep ACR, AUDI, AUDS packet always on to make SINK device 400 + * active for better compatibility and user experience. 401 + * 402 + * This also fix POP sound on some SINK devices which wakeup 403 + * from suspend to active. 404 + */ 405 + dw_hdmi_qp_mod(hdmi, I2S_BPCUV_RCV_DIS, I2S_BPCUV_RCV_MSK, 406 + AUDIO_INTERFACE_CONFIG0); 407 + dw_hdmi_qp_mod(hdmi, AUDPKT_PBIT_FORCE_EN | AUDPKT_CHSTATUS_OVR_EN, 408 + AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK, 409 + AUDPKT_CONTROL0); 410 + 411 + dw_hdmi_qp_mod(hdmi, AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, 412 + AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE); 413 + } 414 + 415 + static void dw_hdmi_qp_audio_disable(struct drm_connector *connector, 416 + struct drm_bridge *bridge) 417 + { 418 + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); 419 + 420 + drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector); 421 + 422 + if (hdmi->tmds_char_rate) 423 + dw_hdmi_qp_audio_disable_regs(hdmi); 168 424 } 169 425 170 426 static int dw_hdmi_qp_i2c_read(struct dw_hdmi_qp *hdmi, ··· 785 361 return 0; 786 362 } 787 363 364 + /* 365 + * Static values documented in the TRM 366 + * Different values are only used for debug purposes 367 + */ 368 + #define DW_HDMI_QP_AUDIO_INFOFRAME_HB1 0x1 369 + #define DW_HDMI_QP_AUDIO_INFOFRAME_HB2 0xa 370 + 371 + static int dw_hdmi_qp_config_audio_infoframe(struct dw_hdmi_qp *hdmi, 372 + const u8 *buffer, size_t len) 373 + { 374 + /* 375 + * AUDI_CONTENTS0: { RSV, HB2, HB1, RSV } 376 + * AUDI_CONTENTS1: { PB3, PB2, PB1, PB0 } 377 + * AUDI_CONTENTS2: { PB7, PB6, PB5, PB4 } 378 + * 379 + * PB0: CheckSum 380 + * PB1: | CT3 | CT2 | CT1 | CT0 | F13 | CC2 | CC1 | CC0 | 381 + * PB2: | F27 | F26 | F25 | SF2 | SF1 | SF0 | SS1 | SS0 | 382 + * PB3: | F37 | F36 | F35 | F34 | F33 | F32 | F31 | F30 | 383 + * PB4: | CA7 | CA6 | CA5 | CA4 | CA3 | CA2 | CA1 | CA0 | 384 + * PB5: | DM_INH | LSV3 | LSV2 | LSV1 | LSV0 | F52 | F51 | F50 | 385 + * PB6~PB10: Reserved 386 + * 387 + * AUDI_CONTENTS0 default value defined by HDMI specification, 388 + * and shall only be changed for debug purposes. 389 + */ 390 + u32 header_bytes = (DW_HDMI_QP_AUDIO_INFOFRAME_HB1 << 8) | 391 + (DW_HDMI_QP_AUDIO_INFOFRAME_HB2 << 16); 392 + 393 + regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS0, &header_bytes, 1); 394 + regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS1, &buffer[3], 1); 395 + regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[4], 1); 396 + 397 + /* Enable ACR, AUDI, AMD */ 398 + dw_hdmi_qp_mod(hdmi, 399 + PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN, 400 + PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN, 401 + PKTSCHED_PKT_EN); 402 + 403 + /* Enable AUDS */ 404 + dw_hdmi_qp_mod(hdmi, PKTSCHED_AUDS_TX_EN, PKTSCHED_AUDS_TX_EN, PKTSCHED_PKT_EN); 405 + 406 + return 0; 407 + } 408 + 788 409 static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge, 789 410 struct drm_atomic_state *state) 790 411 { ··· 850 381 dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n", 851 382 __func__, conn_state->hdmi.tmds_char_rate); 852 383 op_mode = 0; 384 + hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate; 853 385 } else { 854 386 dev_dbg(hdmi->dev, "%s mode=DVI\n", __func__); 855 387 op_mode = OPMODE_DVI; ··· 868 398 struct drm_atomic_state *state) 869 399 { 870 400 struct dw_hdmi_qp *hdmi = bridge->driver_private; 401 + 402 + hdmi->tmds_char_rate = 0; 871 403 872 404 hdmi->phy.ops->disable(hdmi, hdmi->phy.data); 873 405 } ··· 926 454 dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN); 927 455 break; 928 456 457 + case HDMI_INFOFRAME_TYPE_AUDIO: 458 + dw_hdmi_qp_mod(hdmi, 0, 459 + PKTSCHED_ACR_TX_EN | 460 + PKTSCHED_AUDS_TX_EN | 461 + PKTSCHED_AUDI_TX_EN, 462 + PKTSCHED_PKT_EN); 463 + break; 929 464 default: 930 465 dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type); 931 466 } ··· 955 476 case HDMI_INFOFRAME_TYPE_DRM: 956 477 return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len); 957 478 479 + case HDMI_INFOFRAME_TYPE_AUDIO: 480 + return dw_hdmi_qp_config_audio_infoframe(hdmi, buffer, len); 481 + 958 482 default: 959 483 dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type); 960 484 return 0; ··· 975 493 .hdmi_tmds_char_rate_valid = dw_hdmi_qp_bridge_tmds_char_rate_valid, 976 494 .hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe, 977 495 .hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe, 496 + .hdmi_audio_startup = dw_hdmi_qp_audio_enable, 497 + .hdmi_audio_shutdown = dw_hdmi_qp_audio_disable, 498 + .hdmi_audio_prepare = dw_hdmi_qp_audio_prepare, 978 499 }; 979 500 980 501 static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id) ··· 1086 601 hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi); 1087 602 if (IS_ERR(hdmi->bridge.ddc)) 1088 603 return ERR_CAST(hdmi->bridge.ddc); 604 + 605 + hdmi->bridge.hdmi_audio_max_i2s_playback_channels = 8; 606 + hdmi->bridge.hdmi_audio_dev = dev; 607 + hdmi->bridge.hdmi_audio_dai_port = 1; 1089 608 1090 609 ret = devm_drm_bridge_add(dev, &hdmi->bridge); 1091 610 if (ret)
+2
drivers/gpu/drm/bridge/ti-sn65dsi83.c
··· 561 561 REG_LVDS_FMT_HS_NEG_POLARITY : 0) | 562 562 (mode->flags & DRM_MODE_FLAG_NVSYNC ? 563 563 REG_LVDS_FMT_VS_NEG_POLARITY : 0); 564 + val |= bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_LOW ? 565 + REG_LVDS_FMT_DE_NEG_POLARITY : 0; 564 566 565 567 /* Set up bits-per-pixel, 18bpp or 24bpp. */ 566 568 if (lvds_format_24bpp) {
+1 -1
drivers/gpu/drm/drm_fb_dma_helper.c
··· 178 178 dma_obj = drm_fb_dma_get_gem_obj(fb, 0); 179 179 180 180 /* Buffer should be accessible from the CPU */ 181 - if (dma_obj->base.import_attach) 181 + if (drm_gem_is_imported(&dma_obj->base)) 182 182 return -ENODEV; 183 183 184 184 /* Buffer should be already mapped to CPU */
+54
drivers/gpu/drm/drm_format_helper.c
··· 702 702 } 703 703 EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888); 704 704 705 + static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels) 706 + { 707 + u8 *dbuf8 = dbuf; 708 + const __le32 *sbuf32 = sbuf; 709 + unsigned int x; 710 + u32 pix; 711 + 712 + for (x = 0; x < pixels; x++) { 713 + pix = le32_to_cpu(sbuf32[x]); 714 + /* write red-green-blue to output in little endianness */ 715 + *dbuf8++ = (pix & 0x00ff0000) >> 16; 716 + *dbuf8++ = (pix & 0x0000ff00) >> 8; 717 + *dbuf8++ = (pix & 0x000000ff) >> 0; 718 + } 719 + } 720 + 721 + /** 722 + * drm_fb_xrgb8888_to_bgr888 - Convert XRGB8888 to BGR888 clip buffer 723 + * @dst: Array of BGR888 destination buffers 724 + * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines 725 + * within @dst; can be NULL if scanlines are stored next to each other. 726 + * @src: Array of XRGB8888 source buffers 727 + * @fb: DRM framebuffer 728 + * @clip: Clip rectangle area to copy 729 + * @state: Transform and conversion state 730 + * 731 + * This function copies parts of a framebuffer to display memory and converts the 732 + * color format during the process. Destination and framebuffer formats must match. The 733 + * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at 734 + * least as many entries as there are planes in @fb's format. Each entry stores the 735 + * value for the format's respective color plane at the same index. 736 + * 737 + * This function does not apply clipping on @dst (i.e. the destination is at the 738 + * top-left corner). 739 + * 740 + * Drivers can use this function for BGR888 devices that don't natively 741 + * support XRGB8888. 742 + */ 743 + void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch, 744 + const struct iosys_map *src, const struct drm_framebuffer *fb, 745 + const struct drm_rect *clip, struct drm_format_conv_state *state) 746 + { 747 + static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 748 + 3, 749 + }; 750 + 751 + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, 752 + drm_fb_xrgb8888_to_bgr888_line); 753 + } 754 + EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888); 755 + 705 756 static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels) 706 757 { 707 758 __le32 *dbuf32 = dbuf; ··· 1154 1103 return 0; 1155 1104 } else if (dst_format == DRM_FORMAT_RGB888) { 1156 1105 drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state); 1106 + return 0; 1107 + } else if (dst_format == DRM_FORMAT_BGR888) { 1108 + drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state); 1157 1109 return 0; 1158 1110 } else if (dst_format == DRM_FORMAT_ARGB8888) { 1159 1111 drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state);
+2 -2
drivers/gpu/drm/drm_gem.c
··· 348 348 return -ENOENT; 349 349 350 350 /* Don't allow imported objects to be mapped */ 351 - if (obj->import_attach) { 351 + if (drm_gem_is_imported(obj)) { 352 352 ret = -EINVAL; 353 353 goto out; 354 354 } ··· 1178 1178 drm_vma_node_start(&obj->vma_node)); 1179 1179 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1180 1180 drm_printf_indent(p, indent, "imported=%s\n", 1181 - str_yes_no(obj->import_attach)); 1181 + str_yes_no(drm_gem_is_imported(obj))); 1182 1182 1183 1183 if (obj->funcs->print_info) 1184 1184 obj->funcs->print_info(p, indent, obj);
+2 -2
drivers/gpu/drm/drm_gem_dma_helper.c
··· 228 228 struct drm_gem_object *gem_obj = &dma_obj->base; 229 229 struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr); 230 230 231 - if (gem_obj->import_attach) { 231 + if (drm_gem_is_imported(gem_obj)) { 232 232 if (dma_obj->vaddr) 233 - dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map); 233 + dma_buf_vunmap_unlocked(gem_obj->dma_buf, &map); 234 234 drm_prime_gem_destroy(gem_obj, dma_obj->sgt); 235 235 } else if (dma_obj->vaddr) { 236 236 if (dma_obj->map_noncoherent)
+4 -8
drivers/gpu/drm/drm_gem_framebuffer_helper.c
··· 419 419 static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir, 420 420 unsigned int num_planes) 421 421 { 422 - struct dma_buf_attachment *import_attach; 423 422 struct drm_gem_object *obj; 424 423 int ret; 425 424 ··· 427 428 obj = drm_gem_fb_get_obj(fb, num_planes); 428 429 if (!obj) 429 430 continue; 430 - import_attach = obj->import_attach; 431 - if (!import_attach) 431 + if (!drm_gem_is_imported(obj)) 432 432 continue; 433 - ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir); 433 + ret = dma_buf_end_cpu_access(obj->dma_buf, dir); 434 434 if (ret) 435 435 drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n", 436 436 ret, num_planes, dir); ··· 452 454 */ 453 455 int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir) 454 456 { 455 - struct dma_buf_attachment *import_attach; 456 457 struct drm_gem_object *obj; 457 458 unsigned int i; 458 459 int ret; ··· 462 465 ret = -EINVAL; 463 466 goto err___drm_gem_fb_end_cpu_access; 464 467 } 465 - import_attach = obj->import_attach; 466 - if (!import_attach) 468 + if (!drm_gem_is_imported(obj)) 467 469 continue; 468 - ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir); 470 + ret = dma_buf_begin_cpu_access(obj->dma_buf, dir); 469 471 if (ret) 470 472 goto err___drm_gem_fb_end_cpu_access; 471 473 }
+15 -15
drivers/gpu/drm/drm_gem_shmem_helper.c
··· 160 160 { 161 161 struct drm_gem_object *obj = &shmem->base; 162 162 163 - if (obj->import_attach) { 163 + if (drm_gem_is_imported(obj)) { 164 164 drm_prime_gem_destroy(obj, shmem->sgt); 165 165 } else { 166 166 dma_resv_lock(shmem->base.resv, NULL); ··· 255 255 256 256 dma_resv_assert_held(shmem->base.resv); 257 257 258 - drm_WARN_ON(shmem->base.dev, shmem->base.import_attach); 258 + drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base)); 259 259 260 260 ret = drm_gem_shmem_get_pages(shmem); 261 261 ··· 286 286 struct drm_gem_object *obj = &shmem->base; 287 287 int ret; 288 288 289 - drm_WARN_ON(obj->dev, obj->import_attach); 289 + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); 290 290 291 291 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); 292 292 if (ret) ··· 309 309 { 310 310 struct drm_gem_object *obj = &shmem->base; 311 311 312 - drm_WARN_ON(obj->dev, obj->import_attach); 312 + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); 313 313 314 314 dma_resv_lock(shmem->base.resv, NULL); 315 315 drm_gem_shmem_unpin_locked(shmem); ··· 338 338 struct drm_gem_object *obj = &shmem->base; 339 339 int ret = 0; 340 340 341 - if (obj->import_attach) { 342 - ret = dma_buf_vmap(obj->import_attach->dmabuf, map); 341 + if (drm_gem_is_imported(obj)) { 342 + ret = dma_buf_vmap(obj->dma_buf, map); 343 343 if (!ret) { 344 344 if (drm_WARN_ON(obj->dev, map->is_iomem)) { 345 - dma_buf_vunmap(obj->import_attach->dmabuf, map); 345 + dma_buf_vunmap(obj->dma_buf, map); 346 346 return -EIO; 347 347 } 348 348 } ··· 378 378 return 0; 379 379 380 380 err_put_pages: 381 - if (!obj->import_attach) 381 + if (!drm_gem_is_imported(obj)) 382 382 drm_gem_shmem_put_pages(shmem); 383 383 err_zero_use: 384 384 shmem->vmap_use_count = 0; ··· 404 404 { 405 405 struct drm_gem_object *obj = &shmem->base; 406 406 407 - if (obj->import_attach) { 408 - dma_buf_vunmap(obj->import_attach->dmabuf, map); 407 + if (drm_gem_is_imported(obj)) { 408 + dma_buf_vunmap(obj->dma_buf, map); 409 409 } else { 410 410 dma_resv_assert_held(shmem->base.resv); 411 411 ··· 566 566 struct drm_gem_object *obj = vma->vm_private_data; 567 567 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 568 568 569 - drm_WARN_ON(obj->dev, obj->import_attach); 569 + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); 570 570 571 571 dma_resv_lock(shmem->base.resv, NULL); 572 572 ··· 618 618 struct drm_gem_object *obj = &shmem->base; 619 619 int ret; 620 620 621 - if (obj->import_attach) { 621 + if (drm_gem_is_imported(obj)) { 622 622 /* Reset both vm_ops and vm_private_data, so we don't end up with 623 623 * vm_ops pointing to our implementation if the dma-buf backend 624 624 * doesn't set those fields. ··· 663 663 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, 664 664 struct drm_printer *p, unsigned int indent) 665 665 { 666 - if (shmem->base.import_attach) 666 + if (drm_gem_is_imported(&shmem->base)) 667 667 return; 668 668 669 669 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); ··· 690 690 { 691 691 struct drm_gem_object *obj = &shmem->base; 692 692 693 - drm_WARN_ON(obj->dev, obj->import_attach); 693 + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); 694 694 695 695 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); 696 696 } ··· 705 705 if (shmem->sgt) 706 706 return shmem->sgt; 707 707 708 - drm_WARN_ON(obj->dev, obj->import_attach); 708 + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); 709 709 710 710 ret = drm_gem_shmem_get_pages(shmem); 711 711 if (ret)
+1 -1
drivers/gpu/drm/drm_mipi_dbi.c
··· 218 218 switch (fb->format->format) { 219 219 case DRM_FORMAT_RGB565: 220 220 if (swap) 221 - drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach, 221 + drm_fb_swab(&dst_map, NULL, src, fb, clip, !drm_gem_is_imported(gem), 222 222 fmtcnv_state); 223 223 else 224 224 drm_fb_memcpy(&dst_map, NULL, src, fb, clip);
+6 -6
drivers/gpu/drm/drm_mipi_dsi.c
··· 162 162 u32 reg; 163 163 164 164 if (of_alias_from_compatible(node, info.type, sizeof(info.type)) < 0) { 165 - drm_err(host, "modalias failure on %pOF\n", node); 165 + dev_err(host->dev, "modalias failure on %pOF\n", node); 166 166 return ERR_PTR(-EINVAL); 167 167 } 168 168 169 169 ret = of_property_read_u32(node, "reg", &reg); 170 170 if (ret) { 171 - drm_err(host, "device node %pOF has no valid reg property: %d\n", 171 + dev_err(host->dev, "device node %pOF has no valid reg property: %d\n", 172 172 node, ret); 173 173 return ERR_PTR(-EINVAL); 174 174 } ··· 206 206 int ret; 207 207 208 208 if (!info) { 209 - drm_err(host, "invalid mipi_dsi_device_info pointer\n"); 209 + dev_err(host->dev, "invalid mipi_dsi_device_info pointer\n"); 210 210 return ERR_PTR(-EINVAL); 211 211 } 212 212 213 213 if (info->channel > 3) { 214 - drm_err(host, "invalid virtual channel: %u\n", info->channel); 214 + dev_err(host->dev, "invalid virtual channel: %u\n", info->channel); 215 215 return ERR_PTR(-EINVAL); 216 216 } 217 217 218 218 dsi = mipi_dsi_device_alloc(host); 219 219 if (IS_ERR(dsi)) { 220 - drm_err(host, "failed to allocate DSI device %ld\n", 220 + dev_err(host->dev, "failed to allocate DSI device %ld\n", 221 221 PTR_ERR(dsi)); 222 222 return dsi; 223 223 } ··· 228 228 229 229 ret = mipi_dsi_device_add(dsi); 230 230 if (ret) { 231 - drm_err(host, "failed to add DSI device %d\n", ret); 231 + dev_err(host->dev, "failed to add DSI device %d\n", ret); 232 232 kfree(dsi); 233 233 return ERR_PTR(ret); 234 234 }
+1 -7
drivers/gpu/drm/drm_prime.c
··· 453 453 } 454 454 455 455 mutex_lock(&dev->object_name_lock); 456 - /* re-export the original imported object */ 457 - if (obj->import_attach) { 458 - dmabuf = obj->import_attach->dmabuf; 459 - get_dma_buf(dmabuf); 460 - goto out_have_obj; 461 - } 462 - 456 + /* re-export the original imported/exported object */ 463 457 if (obj->dma_buf) { 464 458 get_dma_buf(obj->dma_buf); 465 459 dmabuf = obj->dma_buf;
+1 -1
drivers/gpu/drm/gma500/cdv_device.c
··· 215 215 * Bonus Launch to work around the issue, by degrading 216 216 * performance. 217 217 */ 218 - CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108); 218 + CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108); 219 219 } 220 220 221 221 /**
+1 -2
drivers/gpu/drm/gma500/cdv_intel_dp.c
··· 855 855 856 856 memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter)); 857 857 intel_dp->adapter.owner = THIS_MODULE; 858 - strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 859 - intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 858 + strscpy(intel_dp->adapter.name, name); 860 859 intel_dp->adapter.algo_data = &intel_dp->algo; 861 860 intel_dp->adapter.dev.parent = connector->base.kdev; 862 861
+21 -11
drivers/gpu/drm/nouveau/include/nvif/ioctl.h
··· 3 3 #define __NVIF_IOCTL_H__ 4 4 5 5 struct nvif_ioctl_v0 { 6 - __u8 version; 6 + /* New members MUST be added within the struct_group() macro below. */ 7 + struct_group_tagged(nvif_ioctl_v0_hdr, __hdr, 8 + __u8 version; 7 9 #define NVIF_IOCTL_V0_SCLASS 0x01 8 10 #define NVIF_IOCTL_V0_NEW 0x02 9 11 #define NVIF_IOCTL_V0_DEL 0x03 10 12 #define NVIF_IOCTL_V0_MTHD 0x04 11 13 #define NVIF_IOCTL_V0_MAP 0x07 12 14 #define NVIF_IOCTL_V0_UNMAP 0x08 13 - __u8 type; 14 - __u8 pad02[4]; 15 + __u8 type; 16 + __u8 pad02[4]; 15 17 #define NVIF_IOCTL_V0_OWNER_NVIF 0x00 16 18 #define NVIF_IOCTL_V0_OWNER_ANY 0xff 17 - __u8 owner; 19 + __u8 owner; 18 20 #define NVIF_IOCTL_V0_ROUTE_NVIF 0x00 19 21 #define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff 20 - __u8 route; 21 - __u64 token; 22 - __u64 object; 22 + __u8 route; 23 + __u64 token; 24 + __u64 object; 25 + ); 23 26 __u8 data[]; /* ioctl data (below) */ 24 27 }; 28 + static_assert(offsetof(struct nvif_ioctl_v0, data) == sizeof(struct nvif_ioctl_v0_hdr), 29 + "struct member likely outside of struct_group()"); 25 30 26 31 struct nvif_ioctl_sclass_v0 { 27 32 /* nvif_ioctl ... */ ··· 56 51 }; 57 52 58 53 struct nvif_ioctl_mthd_v0 { 59 - /* nvif_ioctl ... */ 60 - __u8 version; 61 - __u8 method; 62 - __u8 pad02[6]; 54 + /* New members MUST be added within the struct_group() macro below. */ 55 + struct_group_tagged(nvif_ioctl_mthd_v0_hdr, __hdr, 56 + /* nvif_ioctl ... */ 57 + __u8 version; 58 + __u8 method; 59 + __u8 pad02[6]; 60 + ); 63 61 __u8 data[]; /* method data (class.h) */ 64 62 }; 63 + static_assert(offsetof(struct nvif_ioctl_mthd_v0, data) == sizeof(struct nvif_ioctl_mthd_v0_hdr), 64 + "struct member likely outside of struct_group()"); 65 65 66 66 struct nvif_ioctl_map_v0 { 67 67 /* nvif_ioctl ... */
+2 -2
drivers/gpu/drm/nouveau/nouveau_svm.c
··· 79 79 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a) 80 80 81 81 struct nouveau_pfnmap_args { 82 - struct nvif_ioctl_v0 i; 83 - struct nvif_ioctl_mthd_v0 m; 82 + struct nvif_ioctl_v0_hdr i; 83 + struct nvif_ioctl_mthd_v0_hdr m; 84 84 struct nvif_vmm_pfnmap_v0 p; 85 85 }; 86 86
+6 -6
drivers/gpu/drm/nouveau/nvif/object.c
··· 57 57 nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass) 58 58 { 59 59 struct { 60 - struct nvif_ioctl_v0 ioctl; 60 + struct nvif_ioctl_v0_hdr ioctl; 61 61 struct nvif_ioctl_sclass_v0 sclass; 62 62 } *args = NULL; 63 63 int ret, cnt = 0, i; ··· 101 101 nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size) 102 102 { 103 103 struct { 104 - struct nvif_ioctl_v0 ioctl; 104 + struct nvif_ioctl_v0_hdr ioctl; 105 105 struct nvif_ioctl_mthd_v0 mthd; 106 106 } *args; 107 107 u32 args_size; ··· 135 135 nvif_object_unmap_handle(struct nvif_object *object) 136 136 { 137 137 struct { 138 - struct nvif_ioctl_v0 ioctl; 138 + struct nvif_ioctl_v0_hdr ioctl; 139 139 struct nvif_ioctl_unmap unmap; 140 140 } args = { 141 141 .ioctl.type = NVIF_IOCTL_V0_UNMAP, ··· 149 149 u64 *handle, u64 *length) 150 150 { 151 151 struct { 152 - struct nvif_ioctl_v0 ioctl; 152 + struct nvif_ioctl_v0_hdr ioctl; 153 153 struct nvif_ioctl_map_v0 map; 154 154 } *args; 155 155 u32 argn = sizeof(*args) + argc; ··· 211 211 nvif_object_dtor(struct nvif_object *object) 212 212 { 213 213 struct { 214 - struct nvif_ioctl_v0 ioctl; 214 + struct nvif_ioctl_v0_hdr ioctl; 215 215 struct nvif_ioctl_del del; 216 216 } args = { 217 217 .ioctl.type = NVIF_IOCTL_V0_DEL, ··· 230 230 s32 oclass, void *data, u32 size, struct nvif_object *object) 231 231 { 232 232 struct { 233 - struct nvif_ioctl_v0 ioctl; 233 + struct nvif_ioctl_v0_hdr ioctl; 234 234 struct nvif_ioctl_new_v0 new; 235 235 } *args; 236 236 int ret = 0;
+2
drivers/gpu/drm/panel/Kconfig
··· 1020 1020 depends on OF 1021 1021 depends on DRM_MIPI_DSI 1022 1022 depends on BACKLIGHT_CLASS_DEVICE 1023 + select DRM_DISPLAY_DSC_HELPER 1024 + select DRM_DISPLAY_HELPER 1023 1025 help 1024 1026 Say Y here if you want to enable support for Visionox RM692E5 amoled 1025 1027 display panels, such as the one found in the Nothing Phone (1)
+4 -5
drivers/gpu/drm/panthor/panthor_fw.c
··· 636 636 u32 ehdr) 637 637 { 638 638 struct panthor_fw_build_info_hdr hdr; 639 - char header[9]; 640 - const char git_sha_header[sizeof(header)] = "git_sha: "; 639 + static const char git_sha_header[] = "git_sha: "; 640 + const int header_len = sizeof(git_sha_header) - 1; 641 641 int ret; 642 642 643 643 ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr)); ··· 651 651 return 0; 652 652 } 653 653 654 - if (memcmp(git_sha_header, fw->data + hdr.meta_start, 655 - sizeof(git_sha_header))) { 654 + if (memcmp(git_sha_header, fw->data + hdr.meta_start, header_len)) { 656 655 /* Not the expected header, this isn't metadata we understand */ 657 656 return 0; 658 657 } ··· 664 665 } 665 666 666 667 drm_info(&ptdev->base, "Firmware git sha: %s\n", 667 - fw->data + hdr.meta_start + sizeof(git_sha_header)); 668 + fw->data + hdr.meta_start + header_len); 668 669 669 670 return 0; 670 671 }
+3 -3
drivers/gpu/drm/panthor/panthor_fw.h
··· 102 102 #define CS_STATUS_BLOCKED_REASON_SB_WAIT 1 103 103 #define CS_STATUS_BLOCKED_REASON_PROGRESS_WAIT 2 104 104 #define CS_STATUS_BLOCKED_REASON_SYNC_WAIT 3 105 - #define CS_STATUS_BLOCKED_REASON_DEFERRED 5 106 - #define CS_STATUS_BLOCKED_REASON_RES 6 107 - #define CS_STATUS_BLOCKED_REASON_FLUSH 7 105 + #define CS_STATUS_BLOCKED_REASON_DEFERRED 4 106 + #define CS_STATUS_BLOCKED_REASON_RESOURCE 5 107 + #define CS_STATUS_BLOCKED_REASON_FLUSH 6 108 108 #define CS_STATUS_BLOCKED_REASON_MASK GENMASK(3, 0) 109 109 u32 status_blocked_reason; 110 110 u32 status_wait_sync_value_hi;
+30 -32
drivers/gpu/drm/panthor/panthor_heap.c
··· 97 97 98 98 /** @gpu_contexts: Buffer object containing the GPU heap contexts. */ 99 99 struct panthor_kernel_bo *gpu_contexts; 100 + 101 + /** @size: Size of all chunks across all heaps in the pool. */ 102 + atomic_t size; 100 103 }; 101 104 102 105 static int panthor_heap_ctx_stride(struct panthor_device *ptdev) ··· 121 118 panthor_get_heap_ctx_offset(pool, id); 122 119 } 123 120 124 - static void panthor_free_heap_chunk(struct panthor_vm *vm, 121 + static void panthor_free_heap_chunk(struct panthor_heap_pool *pool, 125 122 struct panthor_heap *heap, 126 123 struct panthor_heap_chunk *chunk) 127 124 { ··· 130 127 heap->chunk_count--; 131 128 mutex_unlock(&heap->lock); 132 129 130 + atomic_sub(heap->chunk_size, &pool->size); 131 + 133 132 panthor_kernel_bo_destroy(chunk->bo); 134 133 kfree(chunk); 135 134 } 136 135 137 - static int panthor_alloc_heap_chunk(struct panthor_device *ptdev, 138 - struct panthor_vm *vm, 136 + static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool, 139 137 struct panthor_heap *heap, 140 138 bool initial_chunk) 141 139 { ··· 148 144 if (!chunk) 149 145 return -ENOMEM; 150 146 151 - chunk->bo = panthor_kernel_bo_create(ptdev, vm, heap->chunk_size, 147 + chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size, 152 148 DRM_PANTHOR_BO_NO_MMAP, 153 149 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC, 154 150 PANTHOR_VM_KERNEL_AUTO_VA); ··· 184 180 heap->chunk_count++; 185 181 mutex_unlock(&heap->lock); 186 182 183 + atomic_add(heap->chunk_size, &pool->size); 184 + 187 185 return 0; 188 186 189 187 err_destroy_bo: ··· 197 191 return ret; 198 192 } 199 193 200 - static void panthor_free_heap_chunks(struct panthor_vm *vm, 194 + static void panthor_free_heap_chunks(struct panthor_heap_pool *pool, 201 195 struct panthor_heap *heap) 202 196 { 203 197 struct panthor_heap_chunk *chunk, *tmp; 204 198 205 199 list_for_each_entry_safe(chunk, tmp, &heap->chunks, node) 206 - panthor_free_heap_chunk(vm, heap, chunk); 200 + panthor_free_heap_chunk(pool, heap, chunk); 207 201 } 208 202 209 - static int panthor_alloc_heap_chunks(struct panthor_device *ptdev, 210 - struct panthor_vm *vm, 203 + static int panthor_alloc_heap_chunks(struct panthor_heap_pool *pool, 211 204 struct panthor_heap *heap, 212 205 u32 chunk_count) 213 206 { ··· 214 209 u32 i; 215 210 216 211 for (i = 0; i < chunk_count; i++) { 217 - ret = panthor_alloc_heap_chunk(ptdev, vm, heap, true); 212 + ret = panthor_alloc_heap_chunk(pool, heap, true); 218 213 if (ret) 219 214 return ret; 220 215 } ··· 231 226 if (!heap) 232 227 return -EINVAL; 233 228 234 - panthor_free_heap_chunks(pool->vm, heap); 229 + panthor_free_heap_chunks(pool, heap); 235 230 mutex_destroy(&heap->lock); 236 231 kfree(heap); 237 232 return 0; ··· 313 308 heap->max_chunks = max_chunks; 314 309 heap->target_in_flight = target_in_flight; 315 310 316 - ret = panthor_alloc_heap_chunks(pool->ptdev, vm, heap, 317 - initial_chunk_count); 311 + ret = panthor_alloc_heap_chunks(pool, heap, initial_chunk_count); 318 312 if (ret) 319 313 goto err_free_heap; 320 314 ··· 346 342 return id; 347 343 348 344 err_free_heap: 349 - panthor_free_heap_chunks(pool->vm, heap); 345 + panthor_free_heap_chunks(pool, heap); 350 346 mutex_destroy(&heap->lock); 351 347 kfree(heap); 352 348 ··· 393 389 removed = chunk; 394 390 list_del(&chunk->node); 395 391 heap->chunk_count--; 392 + atomic_sub(heap->chunk_size, &pool->size); 396 393 break; 397 394 } 398 395 } ··· 471 466 * further jobs in this queue fail immediately instead of having to 472 467 * wait for the job timeout. 473 468 */ 474 - ret = panthor_alloc_heap_chunk(pool->ptdev, pool->vm, heap, false); 469 + ret = panthor_alloc_heap_chunk(pool, heap, false); 475 470 if (ret) 476 471 goto out_unlock; 477 472 ··· 565 560 if (ret) 566 561 goto err_destroy_pool; 567 562 563 + atomic_add(pool->gpu_contexts->obj->size, &pool->size); 564 + 568 565 return pool; 569 566 570 567 err_destroy_pool: ··· 601 594 xa_for_each(&pool->xa, i, heap) 602 595 drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i)); 603 596 604 - if (!IS_ERR_OR_NULL(pool->gpu_contexts)) 597 + if (!IS_ERR_OR_NULL(pool->gpu_contexts)) { 598 + atomic_sub(pool->gpu_contexts->obj->size, &pool->size); 605 599 panthor_kernel_bo_destroy(pool->gpu_contexts); 600 + } 606 601 607 602 /* Reflects the fact the pool has been destroyed. */ 608 603 pool->vm = NULL; ··· 614 605 } 615 606 616 607 /** 617 - * panthor_heap_pool_size() - Calculate size of all chunks across all heaps in a pool 618 - * @pool: Pool whose total chunk size to calculate. 608 + * panthor_heap_pool_size() - Get a heap pool's total size 609 + * @pool: Pool whose total chunks size to return 619 610 * 620 - * This function adds the size of all heap chunks across all heaps in the 621 - * argument pool. It also adds the size of the gpu contexts kernel bo. 622 - * It is meant to be used by fdinfo for displaying the size of internal 623 - * driver BO's that aren't exposed to userspace through a GEM handle. 611 + * Returns the aggregated size of all chunks for all heaps in the pool 624 612 * 625 613 */ 626 614 size_t panthor_heap_pool_size(struct panthor_heap_pool *pool) 627 615 { 628 - struct panthor_heap *heap; 629 - unsigned long i; 630 - size_t size = 0; 616 + if (!pool) 617 + return 0; 631 618 632 - down_read(&pool->lock); 633 - xa_for_each(&pool->xa, i, heap) 634 - size += heap->chunk_size * heap->chunk_count; 635 - up_read(&pool->lock); 636 - 637 - size += pool->gpu_contexts->obj->size; 638 - 639 - return size; 619 + return atomic_read(&pool->size); 640 620 }
+1 -7
drivers/gpu/drm/panthor/panthor_mmu.c
··· 1963 1963 1964 1964 xa_lock(&pfile->vms->xa); 1965 1965 xa_for_each(&pfile->vms->xa, i, vm) { 1966 - size_t size = 0; 1967 - 1968 - mutex_lock(&vm->heaps.lock); 1969 - if (vm->heaps.pool) 1970 - size = panthor_heap_pool_size(vm->heaps.pool); 1971 - mutex_unlock(&vm->heaps.lock); 1972 - 1966 + size_t size = panthor_heap_pool_size(vm->heaps.pool); 1973 1967 stats->resident += size; 1974 1968 if (vm->as.id >= 0) 1975 1969 stats->active += size;
+12 -14
drivers/gpu/drm/panthor/panthor_sched.c
··· 9 9 #include <drm/panthor_drm.h> 10 10 11 11 #include <linux/build_bug.h> 12 + #include <linux/cleanup.h> 12 13 #include <linux/clk.h> 13 14 #include <linux/delay.h> 14 15 #include <linux/dma-mapping.h> ··· 632 631 struct panthor_gpu_usage data; 633 632 634 633 /** 635 - * @lock: Mutex to govern concurrent access from drm file's fdinfo callback 636 - * and job post-completion processing function 634 + * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo 635 + * callback and job post-completion processing function 637 636 */ 638 - struct mutex lock; 637 + spinlock_t lock; 639 638 640 639 /** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */ 641 640 size_t kbo_sizes; ··· 910 909 struct panthor_group, 911 910 release_work); 912 911 u32 i; 913 - 914 - mutex_destroy(&group->fdinfo.lock); 915 912 916 913 for (i = 0; i < group->queue_count; i++) 917 914 group_free_queue(group, group->queues[i]); ··· 2860 2861 struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap; 2861 2862 struct panthor_job_profiling_data *data = &slots[job->profiling.slot]; 2862 2863 2863 - mutex_lock(&group->fdinfo.lock); 2864 - if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES) 2865 - fdinfo->cycles += data->cycles.after - data->cycles.before; 2866 - if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) 2867 - fdinfo->time += data->time.after - data->time.before; 2868 - mutex_unlock(&group->fdinfo.lock); 2864 + scoped_guard(spinlock, &group->fdinfo.lock) { 2865 + if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES) 2866 + fdinfo->cycles += data->cycles.after - data->cycles.before; 2867 + if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) 2868 + fdinfo->time += data->time.after - data->time.before; 2869 + } 2869 2870 } 2870 2871 2871 2872 void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) ··· 2879 2880 2880 2881 xa_lock(&gpool->xa); 2881 2882 xa_for_each(&gpool->xa, i, group) { 2882 - mutex_lock(&group->fdinfo.lock); 2883 + guard(spinlock)(&group->fdinfo.lock); 2883 2884 pfile->stats.cycles += group->fdinfo.data.cycles; 2884 2885 pfile->stats.time += group->fdinfo.data.time; 2885 2886 group->fdinfo.data.cycles = 0; 2886 2887 group->fdinfo.data.time = 0; 2887 - mutex_unlock(&group->fdinfo.lock); 2888 2888 } 2889 2889 xa_unlock(&gpool->xa); 2890 2890 } ··· 3535 3537 mutex_unlock(&sched->reset.lock); 3536 3538 3537 3539 add_group_kbo_sizes(group->ptdev, group); 3538 - mutex_init(&group->fdinfo.lock); 3540 + spin_lock_init(&group->fdinfo.lock); 3539 3541 3540 3542 return gid; 3541 3543
+77 -37
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
··· 32 32 33 33 #include "rockchip_drm_drv.h" 34 34 35 - #define RK3288_GRF_SOC_CON6 0x25c 36 - #define RK3288_EDP_LCDC_SEL BIT(5) 37 - #define RK3399_GRF_SOC_CON20 0x6250 38 - #define RK3399_EDP_LCDC_SEL BIT(5) 39 - 40 - #define HIWORD_UPDATE(val, mask) (val | (mask) << 16) 41 - 42 35 #define PSR_WAIT_LINE_FLAG_TIMEOUT_MS 100 36 + 37 + #define GRF_REG_FIELD(_reg, _lsb, _msb) { \ 38 + .reg = _reg, \ 39 + .lsb = _lsb, \ 40 + .msb = _msb, \ 41 + .valid = true, \ 42 + } 43 + 44 + struct rockchip_grf_reg_field { 45 + u32 reg; 46 + u32 lsb; 47 + u32 msb; 48 + bool valid; 49 + }; 43 50 44 51 /** 45 52 * struct rockchip_dp_chip_data - splite the grf setting of kind of chips 46 - * @lcdsel_grf_reg: grf register offset of lcdc select 47 - * @lcdsel_big: reg value of selecting vop big for eDP 48 - * @lcdsel_lit: reg value of selecting vop little for eDP 53 + * @lcdc_sel: grf register field of lcdc_sel 49 54 * @chip_type: specific chip type 55 + * @reg: register base address 50 56 */ 51 57 struct rockchip_dp_chip_data { 52 - u32 lcdsel_grf_reg; 53 - u32 lcdsel_big; 54 - u32 lcdsel_lit; 58 + const struct rockchip_grf_reg_field lcdc_sel; 55 59 u32 chip_type; 60 + u32 reg; 56 61 }; 57 62 58 63 struct rockchip_dp_device { ··· 87 82 static struct rockchip_dp_device *pdata_encoder_to_dp(struct analogix_dp_plat_data *plat_data) 88 83 { 89 84 return container_of(plat_data, struct rockchip_dp_device, plat_data); 85 + } 86 + 87 + static int rockchip_grf_write(struct regmap *grf, u32 reg, u32 mask, u32 val) 88 + { 89 + return regmap_write(grf, reg, (mask << 16) | (val & mask)); 90 + } 91 + 92 + static int rockchip_grf_field_write(struct regmap *grf, 93 + const struct rockchip_grf_reg_field *field, 94 + u32 val) 95 + { 96 + u32 mask; 97 + 98 + if (!field->valid) 99 + return 0; 100 + 101 + mask = GENMASK(field->msb, field->lsb); 102 + val <<= field->lsb; 103 + 104 + return rockchip_grf_write(grf, field->reg, mask, val); 90 105 } 91 106 92 107 static int rockchip_dp_pre_init(struct rockchip_dp_device *dp) ··· 206 181 struct drm_crtc *crtc; 207 182 struct drm_crtc_state *old_crtc_state; 208 183 int ret; 209 - u32 val; 210 184 211 185 crtc = rockchip_dp_drm_get_new_crtc(encoder, state); 212 186 if (!crtc) ··· 216 192 if (old_crtc_state && old_crtc_state->self_refresh_active) 217 193 return; 218 194 219 - ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); 220 - if (ret < 0) 221 - return; 222 - 223 - if (ret) 224 - val = dp->data->lcdsel_lit; 225 - else 226 - val = dp->data->lcdsel_big; 227 - 228 - DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG"); 229 - 230 195 ret = clk_prepare_enable(dp->grfclk); 231 196 if (ret < 0) { 232 197 DRM_DEV_ERROR(dp->dev, "failed to enable grfclk %d\n", ret); 233 198 return; 234 199 } 235 200 236 - ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val); 201 + ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); 202 + if (ret < 0) 203 + return; 204 + 205 + DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG"); 206 + 207 + ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, ret); 237 208 if (ret != 0) 238 209 DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret); 239 210 ··· 398 379 const struct rockchip_dp_chip_data *dp_data; 399 380 struct drm_panel *panel = NULL; 400 381 struct rockchip_dp_device *dp; 382 + struct resource *res; 383 + int i; 401 384 int ret; 402 385 403 386 dp_data = of_device_get_match_data(dev); ··· 414 393 if (!dp) 415 394 return -ENOMEM; 416 395 396 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 397 + 398 + i = 0; 399 + while (dp_data[i].reg) { 400 + if (dp_data[i].reg == res->start) { 401 + dp->data = &dp_data[i]; 402 + break; 403 + } 404 + 405 + i++; 406 + } 407 + 408 + if (!dp->data) 409 + return dev_err_probe(dev, -EINVAL, "no chip-data for %s node\n", 410 + dev->of_node->name); 411 + 417 412 dp->dev = dev; 418 413 dp->adp = ERR_PTR(-ENODEV); 419 - dp->data = dp_data; 420 414 dp->plat_data.panel = panel; 421 415 dp->plat_data.dev_type = dp->data->chip_type; 422 416 dp->plat_data.power_on = rockchip_dp_poweron; ··· 483 447 static DEFINE_RUNTIME_DEV_PM_OPS(rockchip_dp_pm_ops, rockchip_dp_suspend, 484 448 rockchip_dp_resume, NULL); 485 449 486 - static const struct rockchip_dp_chip_data rk3399_edp = { 487 - .lcdsel_grf_reg = RK3399_GRF_SOC_CON20, 488 - .lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL), 489 - .lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL), 490 - .chip_type = RK3399_EDP, 450 + static const struct rockchip_dp_chip_data rk3399_edp[] = { 451 + { 452 + .lcdc_sel = GRF_REG_FIELD(0x6250, 5, 5), 453 + .chip_type = RK3399_EDP, 454 + .reg = 0xff970000, 455 + }, 456 + { /* sentinel */ } 491 457 }; 492 458 493 - static const struct rockchip_dp_chip_data rk3288_dp = { 494 - .lcdsel_grf_reg = RK3288_GRF_SOC_CON6, 495 - .lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL), 496 - .lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL), 497 - .chip_type = RK3288_DP, 459 + static const struct rockchip_dp_chip_data rk3288_dp[] = { 460 + { 461 + .lcdc_sel = GRF_REG_FIELD(0x025c, 5, 5), 462 + .chip_type = RK3288_DP, 463 + .reg = 0xff970000, 464 + }, 465 + { /* sentinel */ } 498 466 }; 499 467 500 468 static const struct of_device_id rockchip_dp_dt_ids[] = {
+8 -8
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
··· 203 203 204 204 hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 205 205 if (IS_ERR(hdmi->regmap)) { 206 - drm_err(hdmi, "Unable to get rockchip,grf\n"); 206 + dev_err(hdmi->dev, "Unable to get rockchip,grf\n"); 207 207 return PTR_ERR(hdmi->regmap); 208 208 } 209 209 ··· 214 214 if (IS_ERR(hdmi->ref_clk)) { 215 215 ret = PTR_ERR(hdmi->ref_clk); 216 216 if (ret != -EPROBE_DEFER) 217 - drm_err(hdmi, "failed to get reference clock\n"); 217 + dev_err(hdmi->dev, "failed to get reference clock\n"); 218 218 return ret; 219 219 } 220 220 ··· 222 222 if (IS_ERR(hdmi->grf_clk)) { 223 223 ret = PTR_ERR(hdmi->grf_clk); 224 224 if (ret != -EPROBE_DEFER) 225 - drm_err(hdmi, "failed to get grf clock\n"); 225 + dev_err(hdmi->dev, "failed to get grf clock\n"); 226 226 return ret; 227 227 } 228 228 ··· 302 302 303 303 ret = clk_prepare_enable(hdmi->grf_clk); 304 304 if (ret < 0) { 305 - drm_err(hdmi, "failed to enable grfclk %d\n", ret); 305 + dev_err(hdmi->dev, "failed to enable grfclk %d\n", ret); 306 306 return; 307 307 } 308 308 309 309 ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val); 310 310 if (ret != 0) 311 - drm_err(hdmi, "Could not write to GRF: %d\n", ret); 311 + dev_err(hdmi->dev, "Could not write to GRF: %d\n", ret); 312 312 313 313 clk_disable_unprepare(hdmi->grf_clk); 314 - drm_dbg(hdmi, "vop %s output to hdmi\n", ret ? "LIT" : "BIG"); 314 + dev_dbg(hdmi->dev, "vop %s output to hdmi\n", ret ? "LIT" : "BIG"); 315 315 } 316 316 317 317 static int ··· 574 574 ret = rockchip_hdmi_parse_dt(hdmi); 575 575 if (ret) { 576 576 if (ret != -EPROBE_DEFER) 577 - drm_err(hdmi, "Unable to parse OF data\n"); 577 + dev_err(hdmi->dev, "Unable to parse OF data\n"); 578 578 return ret; 579 579 } 580 580 ··· 582 582 if (IS_ERR(hdmi->phy)) { 583 583 ret = PTR_ERR(hdmi->phy); 584 584 if (ret != -EPROBE_DEFER) 585 - drm_err(hdmi, "failed to get phy\n"); 585 + dev_err(hdmi->dev, "failed to get phy\n"); 586 586 return ret; 587 587 } 588 588
+8 -8
drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
··· 242 242 if (drm) { 243 243 changed = drm_helper_hpd_irq_event(drm); 244 244 if (changed) 245 - drm_dbg(hdmi, "connector status changed\n"); 245 + dev_dbg(hdmi->dev, "connector status changed\n"); 246 246 } 247 247 } 248 248 ··· 472 472 } 473 473 } 474 474 if (hdmi->port_id < 0) { 475 - drm_err(hdmi, "Failed to match HDMI port ID\n"); 475 + dev_err(hdmi->dev, "Failed to match HDMI port ID\n"); 476 476 return hdmi->port_id; 477 477 } 478 478 ··· 496 496 hdmi->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, 497 497 "rockchip,grf"); 498 498 if (IS_ERR(hdmi->regmap)) { 499 - drm_err(hdmi, "Unable to get rockchip,grf\n"); 499 + dev_err(hdmi->dev, "Unable to get rockchip,grf\n"); 500 500 return PTR_ERR(hdmi->regmap); 501 501 } 502 502 503 503 hdmi->vo_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, 504 504 "rockchip,vo-grf"); 505 505 if (IS_ERR(hdmi->vo_regmap)) { 506 - drm_err(hdmi, "Unable to get rockchip,vo-grf\n"); 506 + dev_err(hdmi->dev, "Unable to get rockchip,vo-grf\n"); 507 507 return PTR_ERR(hdmi->vo_regmap); 508 508 } 509 509 510 510 ret = devm_clk_bulk_get_all_enabled(hdmi->dev, &clks); 511 511 if (ret < 0) { 512 - drm_err(hdmi, "Failed to get clocks: %d\n", ret); 512 + dev_err(hdmi->dev, "Failed to get clocks: %d\n", ret); 513 513 return ret; 514 514 } 515 515 ··· 517 517 GPIOD_OUT_HIGH); 518 518 if (IS_ERR(hdmi->enable_gpio)) { 519 519 ret = PTR_ERR(hdmi->enable_gpio); 520 - drm_err(hdmi, "Failed to request enable GPIO: %d\n", ret); 520 + dev_err(hdmi->dev, "Failed to request enable GPIO: %d\n", ret); 521 521 return ret; 522 522 } 523 523 ··· 525 525 if (IS_ERR(hdmi->phy)) { 526 526 ret = PTR_ERR(hdmi->phy); 527 527 if (ret != -EPROBE_DEFER) 528 - drm_err(hdmi, "failed to get phy: %d\n", ret); 528 + dev_err(hdmi->dev, "failed to get phy: %d\n", ret); 529 529 return ret; 530 530 } 531 531 ··· 564 564 connector = drm_bridge_connector_init(drm, encoder); 565 565 if (IS_ERR(connector)) { 566 566 ret = PTR_ERR(connector); 567 - drm_err(hdmi, "failed to init bridge connector: %d\n", ret); 567 + dev_err(hdmi->dev, "failed to init bridge connector: %d\n", ret); 568 568 return ret; 569 569 } 570 570
+4 -2
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
··· 484 484 485 485 static void rockchip_drm_platform_shutdown(struct platform_device *pdev) 486 486 { 487 - struct drm_device *drm = platform_get_drvdata(pdev); 487 + if (component_master_is_bound(&pdev->dev, &rockchip_drm_ops)) { 488 + struct drm_device *drm = platform_get_drvdata(pdev); 488 489 489 - drm_atomic_helper_shutdown(drm); 490 + drm_atomic_helper_shutdown(drm); 491 + } 490 492 } 491 493 492 494 static const struct of_device_id rockchip_drm_dt_ids[] = {
+291 -1283
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
··· 33 33 #include <drm/drm_vblank.h> 34 34 35 35 #include <uapi/linux/videodev2.h> 36 - #include <dt-bindings/soc/rockchip,vop2.h> 37 36 38 37 #include "rockchip_drm_gem.h" 39 38 #include "rockchip_drm_vop2.h" ··· 101 102 VOP2_AFBC_FMT_INVALID = -1, 102 103 }; 103 104 104 - union vop2_alpha_ctrl { 105 - u32 val; 106 - struct { 107 - /* [0:1] */ 108 - u32 color_mode:1; 109 - u32 alpha_mode:1; 110 - /* [2:3] */ 111 - u32 blend_mode:2; 112 - u32 alpha_cal_mode:1; 113 - /* [5:7] */ 114 - u32 factor_mode:3; 115 - /* [8:9] */ 116 - u32 alpha_en:1; 117 - u32 src_dst_swap:1; 118 - u32 reserved:6; 119 - /* [16:23] */ 120 - u32 glb_alpha:8; 121 - } bits; 122 - }; 123 - 124 - struct vop2_alpha { 125 - union vop2_alpha_ctrl src_color_ctrl; 126 - union vop2_alpha_ctrl dst_color_ctrl; 127 - union vop2_alpha_ctrl src_alpha_ctrl; 128 - union vop2_alpha_ctrl dst_alpha_ctrl; 129 - }; 130 - 131 - struct vop2_alpha_config { 132 - bool src_premulti_en; 133 - bool dst_premulti_en; 134 - bool src_pixel_alpha_en; 135 - bool dst_pixel_alpha_en; 136 - u16 src_glb_alpha_value; 137 - u16 dst_glb_alpha_value; 138 - }; 139 - 140 - struct vop2_win { 141 - struct vop2 *vop2; 142 - struct drm_plane base; 143 - const struct vop2_win_data *data; 144 - struct regmap_field *reg[VOP2_WIN_MAX_REG]; 145 - 146 - /** 147 - * @win_id: graphic window id, a cluster may be split into two 148 - * graphics windows. 149 - */ 150 - u8 win_id; 151 - u8 delay; 152 - u32 offset; 153 - 154 - enum drm_plane_type type; 155 - }; 156 - 157 - struct vop2_video_port { 158 - struct drm_crtc crtc; 159 - struct vop2 *vop2; 160 - struct clk *dclk; 161 - struct clk *dclk_src; 162 - unsigned int id; 163 - const struct vop2_video_port_data *data; 164 - 165 - struct completion dsp_hold_completion; 166 - 167 - /** 168 - * @win_mask: Bitmask of windows attached to the video port; 169 - */ 170 - u32 win_mask; 171 - 172 - struct vop2_win *primary_plane; 173 - struct drm_pending_vblank_event *event; 174 - 175 - unsigned int nlayers; 176 - }; 177 - 178 - struct vop2 { 179 - struct device *dev; 180 - struct drm_device *drm; 181 - struct vop2_video_port vps[ROCKCHIP_MAX_CRTC]; 182 - 183 - const struct vop2_data *data; 184 - /* 185 - * Number of windows that are registered as plane, may be less than the 186 - * total number of hardware windows. 187 - */ 188 - u32 registered_num_wins; 189 - 190 - struct resource *res; 191 - void __iomem *regs; 192 - struct regmap *map; 193 - 194 - struct regmap *sys_grf; 195 - struct regmap *vop_grf; 196 - struct regmap *vo1_grf; 197 - struct regmap *sys_pmu; 198 - 199 - /* physical map length of vop2 register */ 200 - u32 len; 201 - 202 - void __iomem *lut_regs; 203 - 204 - /* protects crtc enable/disable */ 205 - struct mutex vop2_lock; 206 - 207 - int irq; 208 - 209 - /* 210 - * Some global resources are shared between all video ports(crtcs), so 211 - * we need a ref counter here. 212 - */ 213 - unsigned int enable_count; 214 - struct clk *hclk; 215 - struct clk *aclk; 216 - struct clk *pclk; 217 - struct clk *pll_hdmiphy0; 218 - 219 - /* optional internal rgb encoder */ 220 - struct rockchip_rgb *rgb; 221 - 222 - /* must be put at the end of the struct */ 223 - struct vop2_win win[]; 224 - }; 225 - 226 105 #define VOP2_MAX_DCLK_RATE 600000000 227 - 228 - #define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \ 229 - (x) == ROCKCHIP_VOP2_EP_HDMI1) 230 - 231 - #define vop2_output_if_is_dp(x) ((x) == ROCKCHIP_VOP2_EP_DP0 || \ 232 - (x) == ROCKCHIP_VOP2_EP_DP1) 233 - 234 - #define vop2_output_if_is_edp(x) ((x) == ROCKCHIP_VOP2_EP_EDP0 || \ 235 - (x) == ROCKCHIP_VOP2_EP_EDP1) 236 - 237 - #define vop2_output_if_is_mipi(x) ((x) == ROCKCHIP_VOP2_EP_MIPI0 || \ 238 - (x) == ROCKCHIP_VOP2_EP_MIPI1) 239 - 240 - #define vop2_output_if_is_lvds(x) ((x) == ROCKCHIP_VOP2_EP_LVDS0 || \ 241 - (x) == ROCKCHIP_VOP2_EP_LVDS1) 242 - 243 - #define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0) 244 106 245 107 /* 246 108 * bus-format types. ··· 136 276 137 277 static const struct regmap_config vop2_regmap_config; 138 278 139 - static struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc) 140 - { 141 - return container_of(crtc, struct vop2_video_port, crtc); 142 - } 143 - 144 - static struct vop2_win *to_vop2_win(struct drm_plane *p) 145 - { 146 - return container_of(p, struct vop2_win, base); 147 - } 148 - 149 279 static void vop2_lock(struct vop2 *vop2) 150 280 { 151 281 mutex_lock(&vop2->vop2_lock); ··· 144 294 static void vop2_unlock(struct vop2 *vop2) 145 295 { 146 296 mutex_unlock(&vop2->vop2_lock); 147 - } 148 - 149 - static void vop2_writel(struct vop2 *vop2, u32 offset, u32 v) 150 - { 151 - regmap_write(vop2->map, offset, v); 152 - } 153 - 154 - static void vop2_vp_write(struct vop2_video_port *vp, u32 offset, u32 v) 155 - { 156 - regmap_write(vp->vop2->map, vp->data->offset + offset, v); 157 - } 158 - 159 - static u32 vop2_readl(struct vop2 *vop2, u32 offset) 160 - { 161 - u32 val; 162 - 163 - regmap_read(vop2->map, offset, &val); 164 - 165 - return val; 166 - } 167 - 168 - static u32 vop2_vp_read(struct vop2_video_port *vp, u32 offset) 169 - { 170 - u32 val; 171 - 172 - regmap_read(vp->vop2->map, vp->data->offset + offset, &val); 173 - 174 - return val; 175 - } 176 - 177 - static void vop2_win_write(const struct vop2_win *win, unsigned int reg, u32 v) 178 - { 179 - regmap_field_write(win->reg[reg], v); 180 - } 181 - 182 - static bool vop2_cluster_window(const struct vop2_win *win) 183 - { 184 - return win->data->feature & WIN_FEATURE_CLUSTER; 185 297 } 186 298 187 299 /* ··· 355 543 356 544 static bool vop2_output_rg_swap(struct vop2 *vop2, u32 bus_format) 357 545 { 358 - if (vop2->data->soc_id == 3588) { 546 + if (vop2->version == VOP_VERSION_RK3588) { 359 547 if (bus_format == MEDIA_BUS_FMT_YUV8_1X24 || 360 548 bus_format == MEDIA_BUS_FMT_YUV10_1X30) 361 549 return true; ··· 408 596 if (modifier == DRM_FORMAT_MOD_INVALID) 409 597 return false; 410 598 411 - if (vop2->data->soc_id == 3568 || vop2->data->soc_id == 3566) { 599 + if (vop2->version == VOP_VERSION_RK3568) { 412 600 if (vop2_cluster_window(win)) { 413 601 if (modifier == DRM_FORMAT_MOD_LINEAR) { 414 602 drm_dbg_kms(vop2->drm, ··· 419 607 } 420 608 421 609 if (format == DRM_FORMAT_XRGB2101010 || format == DRM_FORMAT_XBGR2101010) { 422 - if (vop2->data->soc_id == 3588) { 610 + if (vop2->version == VOP_VERSION_RK3588) { 423 611 if (!rockchip_afbc(plane, modifier)) { 424 612 drm_dbg_kms(vop2->drm, "Only support 32 bpp format with afbc\n"); 425 613 return false; ··· 818 1006 static void vop2_enable(struct vop2 *vop2) 819 1007 { 820 1008 int ret; 1009 + u32 version; 821 1010 822 1011 ret = pm_runtime_resume_and_get(vop2->dev); 823 1012 if (ret < 0) { ··· 838 1025 return; 839 1026 } 840 1027 1028 + version = vop2_readl(vop2, RK3568_VERSION_INFO); 1029 + if (version != vop2->version) { 1030 + drm_err(vop2->drm, "Hardware version(0x%08x) mismatch\n", version); 1031 + return; 1032 + } 1033 + 1034 + /* 1035 + * rk3566 share the same vop version with rk3568, so 1036 + * we need to use soc_id for identification here. 1037 + */ 841 1038 if (vop2->data->soc_id == 3566) 842 1039 vop2_writel(vop2, RK3568_OTP_WIN_EN, 1); 843 1040 844 - if (vop2->data->soc_id == 3588) 1041 + if (vop2->version == VOP_VERSION_RK3588) 845 1042 rk3588_vop2_power_domain_enable_all(vop2); 846 1043 847 1044 vop2_writel(vop2, RK3568_REG_CFG_DONE, RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN); ··· 932 1109 933 1110 static inline bool vop2_supports_seamless_gamma_lut_update(struct vop2 *vop2) 934 1111 { 935 - return (vop2->data->soc_id != 3566 && vop2->data->soc_id != 3568); 1112 + return vop2->version != VOP_VERSION_RK3568; 936 1113 } 937 1114 938 1115 static bool vop2_gamma_lut_in_use(struct vop2 *vop2, struct vop2_video_port *vp) ··· 1274 1451 &fb->format->format, 1275 1452 afbc_en ? "AFBC" : "", &yrgb_mst); 1276 1453 1277 - if (vop2->data->soc_id > 3568) { 1454 + if (vop2->version > VOP_VERSION_RK3568) { 1278 1455 vop2_win_write(win, VOP2_WIN_AXI_BUS_ID, win->data->axi_bus_id); 1279 1456 vop2_win_write(win, VOP2_WIN_AXI_YRGB_R_ID, win->data->axi_yrgb_r_id); 1280 1457 vop2_win_write(win, VOP2_WIN_AXI_UV_R_ID, win->data->axi_uv_r_id); 1281 1458 } 1459 + 1460 + if (vop2->version >= VOP_VERSION_RK3576) 1461 + vop2_win_write(win, VOP2_WIN_VP_SEL, vp->id); 1282 1462 1283 1463 if (vop2_cluster_window(win)) 1284 1464 vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, half_block_en); ··· 1337 1511 * this bit is gating disable, we should write 1 to 1338 1512 * disable gating when enable afbc. 1339 1513 */ 1340 - if (vop2->data->soc_id == 3566 || vop2->data->soc_id == 3568) 1514 + if (vop2->version == VOP_VERSION_RK3568) 1341 1515 vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 0); 1342 1516 else 1343 1517 vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 1); ··· 1347 1521 else 1348 1522 vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0); 1349 1523 1524 + if (vop2->version >= VOP_VERSION_RK3576) { 1525 + vop2_win_write(win, VOP2_WIN_AFBC_PLD_OFFSET_EN, 1); 1526 + vop2_win_write(win, VOP2_WIN_AFBC_PLD_OFFSET, yrgb_mst); 1527 + } 1528 + 1350 1529 transform_offset = vop2_afbc_transform_offset(pstate, half_block_en); 1351 1530 vop2_win_write(win, VOP2_WIN_AFBC_HDR_PTR, yrgb_mst); 1352 1531 vop2_win_write(win, VOP2_WIN_AFBC_PIC_SIZE, act_info); 1353 - vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, transform_offset); 1532 + vop2_win_write(win, VOP2_WIN_TRANSFORM_OFFSET, transform_offset); 1354 1533 vop2_win_write(win, VOP2_WIN_AFBC_PIC_OFFSET, ((src->x1 >> 16) | src->y1)); 1355 1534 vop2_win_write(win, VOP2_WIN_AFBC_DSP_OFFSET, (dest->x1 | (dest->y1 << 16))); 1356 1535 vop2_win_write(win, VOP2_WIN_AFBC_PIC_VIR_WIDTH, stride); ··· 1366 1535 } else { 1367 1536 if (vop2_cluster_window(win)) { 1368 1537 vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 0); 1369 - vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, 0); 1538 + vop2_win_write(win, VOP2_WIN_TRANSFORM_OFFSET, 0); 1370 1539 } 1371 1540 1372 1541 vop2_win_write(win, VOP2_WIN_YRGB_VIR, DIV_ROUND_UP(fb->pitches[0], 4)); ··· 1385 1554 1386 1555 rb_swap = vop2_win_rb_swap(fb->format->format); 1387 1556 vop2_win_write(win, VOP2_WIN_RB_SWAP, rb_swap); 1388 - if (!vop2_cluster_window(win)) { 1389 - uv_swap = vop2_win_uv_swap(fb->format->format); 1390 - vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap); 1391 - } 1557 + uv_swap = vop2_win_uv_swap(fb->format->format); 1558 + vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap); 1392 1559 1393 1560 if (fb->format->is_yuv) { 1394 1561 vop2_win_write(win, VOP2_WIN_UV_VIR, DIV_ROUND_UP(fb->pitches[1], 4)); ··· 1560 1731 static void vop2_post_config(struct drm_crtc *crtc) 1561 1732 { 1562 1733 struct vop2_video_port *vp = to_vop2_video_port(crtc); 1734 + struct vop2 *vop2 = vp->vop2; 1563 1735 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 1564 1736 u16 vtotal = mode->crtc_vtotal; 1565 1737 u16 hdisplay = mode->crtc_hdisplay; ··· 1571 1741 u32 top_margin = 100, bottom_margin = 100; 1572 1742 u16 hsize = hdisplay * (left_margin + right_margin) / 200; 1573 1743 u16 vsize = vdisplay * (top_margin + bottom_margin) / 200; 1574 - u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; 1575 1744 u16 hact_end, vact_end; 1576 1745 u32 val; 1577 - u32 bg_dly; 1578 - u32 pre_scan_dly; 1579 1746 1580 - bg_dly = vp->data->pre_scan_max_dly[3]; 1581 - vop2_writel(vp->vop2, RK3568_VP_BG_MIX_CTRL(vp->id), 1582 - FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly)); 1583 - 1584 - pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len; 1585 - vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly); 1747 + vop2->ops->setup_bg_dly(vp); 1586 1748 1587 1749 vsize = rounddown(vsize, 2); 1588 1750 hsize = rounddown(hsize, 2); ··· 1608 1786 } 1609 1787 1610 1788 vop2_vp_write(vp, RK3568_VP_DSP_BG, 0); 1611 - } 1612 - 1613 - static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) 1614 - { 1615 - struct vop2 *vop2 = vp->vop2; 1616 - struct drm_crtc *crtc = &vp->crtc; 1617 - u32 die, dip; 1618 - 1619 - die = vop2_readl(vop2, RK3568_DSP_IF_EN); 1620 - dip = vop2_readl(vop2, RK3568_DSP_IF_POL); 1621 - 1622 - switch (id) { 1623 - case ROCKCHIP_VOP2_EP_RGB0: 1624 - die &= ~RK3568_SYS_DSP_INFACE_EN_RGB_MUX; 1625 - die |= RK3568_SYS_DSP_INFACE_EN_RGB | 1626 - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_RGB_MUX, vp->id); 1627 - dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL; 1628 - dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags); 1629 - if (polflags & POLFLAG_DCLK_INV) 1630 - regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3)); 1631 - else 1632 - regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16)); 1633 - break; 1634 - case ROCKCHIP_VOP2_EP_HDMI0: 1635 - die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX; 1636 - die |= RK3568_SYS_DSP_INFACE_EN_HDMI | 1637 - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id); 1638 - dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL; 1639 - dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags); 1640 - break; 1641 - case ROCKCHIP_VOP2_EP_EDP0: 1642 - die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX; 1643 - die |= RK3568_SYS_DSP_INFACE_EN_EDP | 1644 - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id); 1645 - dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL; 1646 - dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags); 1647 - break; 1648 - case ROCKCHIP_VOP2_EP_MIPI0: 1649 - die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX; 1650 - die |= RK3568_SYS_DSP_INFACE_EN_MIPI0 | 1651 - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX, vp->id); 1652 - dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL; 1653 - dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags); 1654 - break; 1655 - case ROCKCHIP_VOP2_EP_MIPI1: 1656 - die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX; 1657 - die |= RK3568_SYS_DSP_INFACE_EN_MIPI1 | 1658 - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id); 1659 - dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL; 1660 - dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags); 1661 - break; 1662 - case ROCKCHIP_VOP2_EP_LVDS0: 1663 - die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX; 1664 - die |= RK3568_SYS_DSP_INFACE_EN_LVDS0 | 1665 - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX, vp->id); 1666 - dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL; 1667 - dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags); 1668 - break; 1669 - case ROCKCHIP_VOP2_EP_LVDS1: 1670 - die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX; 1671 - die |= RK3568_SYS_DSP_INFACE_EN_LVDS1 | 1672 - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX, vp->id); 1673 - dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL; 1674 - dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags); 1675 - break; 1676 - default: 1677 - drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id); 1678 - return 0; 1679 - } 1680 - 1681 - dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD; 1682 - 1683 - vop2_writel(vop2, RK3568_DSP_IF_EN, die); 1684 - vop2_writel(vop2, RK3568_DSP_IF_POL, dip); 1685 - 1686 - return crtc->state->adjusted_mode.crtc_clock * 1000LL; 1687 - } 1688 - 1689 - /* 1690 - * calc the dclk on rk3588 1691 - * the available div of dclk is 1, 2, 4 1692 - */ 1693 - static unsigned long rk3588_calc_dclk(unsigned long child_clk, unsigned long max_dclk) 1694 - { 1695 - if (child_clk * 4 <= max_dclk) 1696 - return child_clk * 4; 1697 - else if (child_clk * 2 <= max_dclk) 1698 - return child_clk * 2; 1699 - else if (child_clk <= max_dclk) 1700 - return child_clk; 1701 - else 1702 - return 0; 1703 - } 1704 - 1705 - /* 1706 - * 4 pixclk/cycle on rk3588 1707 - * RGB/eDP/HDMI: if_pixclk >= dclk_core 1708 - * DP: dp_pixclk = dclk_out <= dclk_core 1709 - * DSI: mipi_pixclk <= dclk_out <= dclk_core 1710 - */ 1711 - static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id, 1712 - int *dclk_core_div, int *dclk_out_div, 1713 - int *if_pixclk_div, int *if_dclk_div) 1714 - { 1715 - struct vop2 *vop2 = vp->vop2; 1716 - struct drm_crtc *crtc = &vp->crtc; 1717 - struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; 1718 - struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state); 1719 - int output_mode = vcstate->output_mode; 1720 - unsigned long v_pixclk = adjusted_mode->crtc_clock * 1000LL; /* video timing pixclk */ 1721 - unsigned long dclk_core_rate = v_pixclk >> 2; 1722 - unsigned long dclk_rate = v_pixclk; 1723 - unsigned long dclk_out_rate; 1724 - unsigned long if_pixclk_rate; 1725 - int K = 1; 1726 - 1727 - if (vop2_output_if_is_hdmi(id)) { 1728 - /* 1729 - * K = 2: dclk_core = if_pixclk_rate > if_dclk_rate 1730 - * K = 1: dclk_core = hdmie_edp_dclk > if_pixclk_rate 1731 - */ 1732 - if (output_mode == ROCKCHIP_OUT_MODE_YUV420) { 1733 - dclk_rate = dclk_rate >> 1; 1734 - K = 2; 1735 - } 1736 - 1737 - /* 1738 - * if_pixclk_rate = (dclk_core_rate << 1) / K; 1739 - * if_dclk_rate = dclk_core_rate / K; 1740 - * *if_pixclk_div = dclk_rate / if_pixclk_rate; 1741 - * *if_dclk_div = dclk_rate / if_dclk_rate; 1742 - */ 1743 - *if_pixclk_div = 2; 1744 - *if_dclk_div = 4; 1745 - } else if (vop2_output_if_is_edp(id)) { 1746 - /* 1747 - * edp_pixclk = edp_dclk > dclk_core 1748 - */ 1749 - if_pixclk_rate = v_pixclk / K; 1750 - dclk_rate = if_pixclk_rate * K; 1751 - /* 1752 - * *if_pixclk_div = dclk_rate / if_pixclk_rate; 1753 - * *if_dclk_div = *if_pixclk_div; 1754 - */ 1755 - *if_pixclk_div = K; 1756 - *if_dclk_div = K; 1757 - } else if (vop2_output_if_is_dp(id)) { 1758 - if (output_mode == ROCKCHIP_OUT_MODE_YUV420) 1759 - dclk_out_rate = v_pixclk >> 3; 1760 - else 1761 - dclk_out_rate = v_pixclk >> 2; 1762 - 1763 - dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000); 1764 - if (!dclk_rate) { 1765 - drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n", 1766 - dclk_out_rate); 1767 - return 0; 1768 - } 1769 - *dclk_out_div = dclk_rate / dclk_out_rate; 1770 - } else if (vop2_output_if_is_mipi(id)) { 1771 - if_pixclk_rate = dclk_core_rate / K; 1772 - /* 1773 - * dclk_core = dclk_out * K = if_pixclk * K = v_pixclk / 4 1774 - */ 1775 - dclk_out_rate = if_pixclk_rate; 1776 - /* 1777 - * dclk_rate = N * dclk_core_rate N = (1,2,4 ), 1778 - * we get a little factor here 1779 - */ 1780 - dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000); 1781 - if (!dclk_rate) { 1782 - drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n", 1783 - dclk_out_rate); 1784 - return 0; 1785 - } 1786 - *dclk_out_div = dclk_rate / dclk_out_rate; 1787 - /* 1788 - * mipi pixclk == dclk_out 1789 - */ 1790 - *if_pixclk_div = 1; 1791 - } else if (vop2_output_if_is_dpi(id)) { 1792 - dclk_rate = v_pixclk; 1793 - } 1794 - 1795 - *dclk_core_div = dclk_rate / dclk_core_rate; 1796 - *if_pixclk_div = ilog2(*if_pixclk_div); 1797 - *if_dclk_div = ilog2(*if_dclk_div); 1798 - *dclk_core_div = ilog2(*dclk_core_div); 1799 - *dclk_out_div = ilog2(*dclk_out_div); 1800 - 1801 - drm_dbg(vop2->drm, "dclk: %ld, pixclk_div: %d, dclk_div: %d\n", 1802 - dclk_rate, *if_pixclk_div, *if_dclk_div); 1803 - 1804 - return dclk_rate; 1805 - } 1806 - 1807 - /* 1808 - * MIPI port mux on rk3588: 1809 - * 0: Video Port2 1810 - * 1: Video Port3 1811 - * 3: Video Port 1(MIPI1 only) 1812 - */ 1813 - static u32 rk3588_get_mipi_port_mux(int vp_id) 1814 - { 1815 - if (vp_id == 1) 1816 - return 3; 1817 - else if (vp_id == 3) 1818 - return 1; 1819 - else 1820 - return 0; 1821 - } 1822 - 1823 - static u32 rk3588_get_hdmi_pol(u32 flags) 1824 - { 1825 - u32 val; 1826 - 1827 - val = (flags & DRM_MODE_FLAG_NHSYNC) ? BIT(HSYNC_POSITIVE) : 0; 1828 - val |= (flags & DRM_MODE_FLAG_NVSYNC) ? BIT(VSYNC_POSITIVE) : 0; 1829 - 1830 - return val; 1831 - } 1832 - 1833 - static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) 1834 - { 1835 - struct vop2 *vop2 = vp->vop2; 1836 - int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_div; 1837 - unsigned long clock; 1838 - u32 die, dip, div, vp_clk_div, val; 1839 - 1840 - clock = rk3588_calc_cru_cfg(vp, id, &dclk_core_div, &dclk_out_div, 1841 - &if_pixclk_div, &if_dclk_div); 1842 - if (!clock) 1843 - return 0; 1844 - 1845 - vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div); 1846 - vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div); 1847 - 1848 - die = vop2_readl(vop2, RK3568_DSP_IF_EN); 1849 - dip = vop2_readl(vop2, RK3568_DSP_IF_POL); 1850 - div = vop2_readl(vop2, RK3568_DSP_IF_CTRL); 1851 - 1852 - switch (id) { 1853 - case ROCKCHIP_VOP2_EP_HDMI0: 1854 - div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV; 1855 - div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV; 1856 - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div); 1857 - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div); 1858 - die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX; 1859 - die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 | 1860 - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id); 1861 - val = rk3588_get_hdmi_pol(polflags); 1862 - regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1)); 1863 - regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5)); 1864 - break; 1865 - case ROCKCHIP_VOP2_EP_HDMI1: 1866 - div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV; 1867 - div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV; 1868 - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV, if_dclk_div); 1869 - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV, if_pixclk_div); 1870 - die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX; 1871 - die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 | 1872 - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id); 1873 - val = rk3588_get_hdmi_pol(polflags); 1874 - regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4)); 1875 - regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7)); 1876 - break; 1877 - case ROCKCHIP_VOP2_EP_EDP0: 1878 - div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV; 1879 - div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV; 1880 - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div); 1881 - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div); 1882 - die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX; 1883 - die |= RK3588_SYS_DSP_INFACE_EN_EDP0 | 1884 - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id); 1885 - regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0)); 1886 - break; 1887 - case ROCKCHIP_VOP2_EP_EDP1: 1888 - div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV; 1889 - div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV; 1890 - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div); 1891 - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div); 1892 - die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX; 1893 - die |= RK3588_SYS_DSP_INFACE_EN_EDP1 | 1894 - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id); 1895 - regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3)); 1896 - break; 1897 - case ROCKCHIP_VOP2_EP_MIPI0: 1898 - div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV; 1899 - div |= FIELD_PREP(RK3588_DSP_IF_MIPI0_PCLK_DIV, if_pixclk_div); 1900 - die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX; 1901 - val = rk3588_get_mipi_port_mux(vp->id); 1902 - die |= RK3588_SYS_DSP_INFACE_EN_MIPI0 | 1903 - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX, !!val); 1904 - break; 1905 - case ROCKCHIP_VOP2_EP_MIPI1: 1906 - div &= ~RK3588_DSP_IF_MIPI1_PCLK_DIV; 1907 - div |= FIELD_PREP(RK3588_DSP_IF_MIPI1_PCLK_DIV, if_pixclk_div); 1908 - die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX; 1909 - val = rk3588_get_mipi_port_mux(vp->id); 1910 - die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 | 1911 - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, val); 1912 - break; 1913 - case ROCKCHIP_VOP2_EP_DP0: 1914 - die &= ~RK3588_SYS_DSP_INFACE_EN_DP0_MUX; 1915 - die |= RK3588_SYS_DSP_INFACE_EN_DP0 | 1916 - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP0_MUX, vp->id); 1917 - dip &= ~RK3588_DSP_IF_POL__DP0_PIN_POL; 1918 - dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags); 1919 - break; 1920 - case ROCKCHIP_VOP2_EP_DP1: 1921 - die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX; 1922 - die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 | 1923 - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id); 1924 - dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL; 1925 - dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags); 1926 - break; 1927 - default: 1928 - drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id); 1929 - return 0; 1930 - } 1931 - 1932 - dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD; 1933 - 1934 - vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div); 1935 - vop2_writel(vop2, RK3568_DSP_IF_EN, die); 1936 - vop2_writel(vop2, RK3568_DSP_IF_CTRL, div); 1937 - vop2_writel(vop2, RK3568_DSP_IF_POL, dip); 1938 - 1939 - return clock; 1940 - } 1941 - 1942 - static unsigned long vop2_set_intf_mux(struct vop2_video_port *vp, int ep_id, u32 polflags) 1943 - { 1944 - struct vop2 *vop2 = vp->vop2; 1945 - 1946 - if (vop2->data->soc_id == 3566 || vop2->data->soc_id == 3568) 1947 - return rk3568_set_intf_mux(vp, ep_id, polflags); 1948 - else if (vop2->data->soc_id == 3588) 1949 - return rk3588_set_intf_mux(vp, ep_id, polflags); 1950 - else 1951 - return 0; 1952 1789 } 1953 1790 1954 1791 static int us_to_vertical_line(struct drm_display_mode *mode, int us) ··· 1682 2201 * process multi(1/2/4/8) pixels per cycle, so the dclk feed by the 1683 2202 * system cru may be the 1/2 or 1/4 of mode->clock. 1684 2203 */ 1685 - clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags); 2204 + clock = vop2->ops->setup_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags); 1686 2205 } 1687 2206 1688 2207 if (!clock) { ··· 1751 2270 * Switch to HDMI PHY PLL as DCLK source for display modes up 1752 2271 * to 4K@60Hz, if available, otherwise keep using the system CRU. 1753 2272 */ 1754 - if (vop2->pll_hdmiphy0 && clock <= VOP2_MAX_DCLK_RATE) { 2273 + if ((vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) && clock <= VOP2_MAX_DCLK_RATE) { 1755 2274 drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) { 1756 2275 struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder); 1757 2276 1758 2277 if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) { 2278 + if (!vop2->pll_hdmiphy0) 2279 + break; 2280 + 1759 2281 if (!vp->dclk_src) 1760 2282 vp->dclk_src = clk_get_parent(vp->dclk); 1761 2283 ··· 1766 2282 if (ret < 0) 1767 2283 drm_warn(vop2->drm, 1768 2284 "Could not switch to HDMI0 PHY PLL: %d\n", ret); 2285 + break; 2286 + } 2287 + 2288 + if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) { 2289 + if (!vop2->pll_hdmiphy1) 2290 + break; 2291 + 2292 + if (!vp->dclk_src) 2293 + vp->dclk_src = clk_get_parent(vp->dclk); 2294 + 2295 + ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1); 2296 + if (ret < 0) 2297 + drm_warn(vop2->drm, 2298 + "Could not switch to HDMI1 PHY PLL: %d\n", ret); 1769 2299 break; 1770 2300 } 1771 2301 } ··· 1849 2351 return 0; 1850 2352 } 1851 2353 1852 - static bool is_opaque(u16 alpha) 1853 - { 1854 - return (alpha >> 8) == 0xff; 1855 - } 1856 - 1857 - static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config, 1858 - struct vop2_alpha *alpha) 1859 - { 1860 - int src_glb_alpha_en = is_opaque(alpha_config->src_glb_alpha_value) ? 0 : 1; 1861 - int dst_glb_alpha_en = is_opaque(alpha_config->dst_glb_alpha_value) ? 0 : 1; 1862 - int src_color_mode = alpha_config->src_premulti_en ? 1863 - ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL; 1864 - int dst_color_mode = alpha_config->dst_premulti_en ? 1865 - ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL; 1866 - 1867 - alpha->src_color_ctrl.val = 0; 1868 - alpha->dst_color_ctrl.val = 0; 1869 - alpha->src_alpha_ctrl.val = 0; 1870 - alpha->dst_alpha_ctrl.val = 0; 1871 - 1872 - if (!alpha_config->src_pixel_alpha_en) 1873 - alpha->src_color_ctrl.bits.blend_mode = ALPHA_GLOBAL; 1874 - else if (alpha_config->src_pixel_alpha_en && !src_glb_alpha_en) 1875 - alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX; 1876 - else 1877 - alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL; 1878 - 1879 - alpha->src_color_ctrl.bits.alpha_en = 1; 1880 - 1881 - if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_GLOBAL) { 1882 - alpha->src_color_ctrl.bits.color_mode = src_color_mode; 1883 - alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL; 1884 - } else if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_PER_PIX) { 1885 - alpha->src_color_ctrl.bits.color_mode = src_color_mode; 1886 - alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_ONE; 1887 - } else { 1888 - alpha->src_color_ctrl.bits.color_mode = ALPHA_SRC_PRE_MUL; 1889 - alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL; 1890 - } 1891 - alpha->src_color_ctrl.bits.glb_alpha = alpha_config->src_glb_alpha_value >> 8; 1892 - alpha->src_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; 1893 - alpha->src_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION; 1894 - 1895 - alpha->dst_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; 1896 - alpha->dst_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION; 1897 - alpha->dst_color_ctrl.bits.blend_mode = ALPHA_GLOBAL; 1898 - alpha->dst_color_ctrl.bits.glb_alpha = alpha_config->dst_glb_alpha_value >> 8; 1899 - alpha->dst_color_ctrl.bits.color_mode = dst_color_mode; 1900 - alpha->dst_color_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE; 1901 - 1902 - alpha->src_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; 1903 - alpha->src_alpha_ctrl.bits.blend_mode = alpha->src_color_ctrl.bits.blend_mode; 1904 - alpha->src_alpha_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION; 1905 - alpha->src_alpha_ctrl.bits.factor_mode = ALPHA_ONE; 1906 - 1907 - alpha->dst_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; 1908 - if (alpha_config->dst_pixel_alpha_en && !dst_glb_alpha_en) 1909 - alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX; 1910 - else 1911 - alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL; 1912 - alpha->dst_alpha_ctrl.bits.alpha_cal_mode = ALPHA_NO_SATURATION; 1913 - alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE; 1914 - } 1915 - 1916 - static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id) 1917 - { 1918 - struct vop2_video_port *vp; 1919 - int used_layer = 0; 1920 - int i; 1921 - 1922 - for (i = 0; i < port_id; i++) { 1923 - vp = &vop2->vps[i]; 1924 - used_layer += hweight32(vp->win_mask); 1925 - } 1926 - 1927 - return used_layer; 1928 - } 1929 - 1930 - static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win) 1931 - { 1932 - struct vop2_alpha_config alpha_config; 1933 - struct vop2_alpha alpha; 1934 - struct drm_plane_state *bottom_win_pstate; 1935 - bool src_pixel_alpha_en = false; 1936 - u16 src_glb_alpha_val, dst_glb_alpha_val; 1937 - bool premulti_en = false; 1938 - bool swap = false; 1939 - u32 offset = 0; 1940 - 1941 - /* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */ 1942 - bottom_win_pstate = main_win->base.state; 1943 - src_glb_alpha_val = 0; 1944 - dst_glb_alpha_val = main_win->base.state->alpha; 1945 - 1946 - if (!bottom_win_pstate->fb) 1947 - return; 1948 - 1949 - alpha_config.src_premulti_en = premulti_en; 1950 - alpha_config.dst_premulti_en = false; 1951 - alpha_config.src_pixel_alpha_en = src_pixel_alpha_en; 1952 - alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */ 1953 - alpha_config.src_glb_alpha_value = src_glb_alpha_val; 1954 - alpha_config.dst_glb_alpha_value = dst_glb_alpha_val; 1955 - vop2_parse_alpha(&alpha_config, &alpha); 1956 - 1957 - alpha.src_color_ctrl.bits.src_dst_swap = swap; 1958 - 1959 - switch (main_win->data->phys_id) { 1960 - case ROCKCHIP_VOP2_CLUSTER0: 1961 - offset = 0x0; 1962 - break; 1963 - case ROCKCHIP_VOP2_CLUSTER1: 1964 - offset = 0x10; 1965 - break; 1966 - case ROCKCHIP_VOP2_CLUSTER2: 1967 - offset = 0x20; 1968 - break; 1969 - case ROCKCHIP_VOP2_CLUSTER3: 1970 - offset = 0x30; 1971 - break; 1972 - } 1973 - 1974 - vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL + offset, 1975 - alpha.src_color_ctrl.val); 1976 - vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_COLOR_CTRL + offset, 1977 - alpha.dst_color_ctrl.val); 1978 - vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_ALPHA_CTRL + offset, 1979 - alpha.src_alpha_ctrl.val); 1980 - vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_ALPHA_CTRL + offset, 1981 - alpha.dst_alpha_ctrl.val); 1982 - } 1983 - 1984 - static void vop2_setup_alpha(struct vop2_video_port *vp) 1985 - { 1986 - struct vop2 *vop2 = vp->vop2; 1987 - struct drm_framebuffer *fb; 1988 - struct vop2_alpha_config alpha_config; 1989 - struct vop2_alpha alpha; 1990 - struct drm_plane *plane; 1991 - int pixel_alpha_en; 1992 - int premulti_en, gpremulti_en = 0; 1993 - int mixer_id; 1994 - u32 offset; 1995 - bool bottom_layer_alpha_en = false; 1996 - u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE; 1997 - 1998 - mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id); 1999 - alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */ 2000 - 2001 - drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { 2002 - struct vop2_win *win = to_vop2_win(plane); 2003 - 2004 - if (plane->state->normalized_zpos == 0 && 2005 - !is_opaque(plane->state->alpha) && 2006 - !vop2_cluster_window(win)) { 2007 - /* 2008 - * If bottom layer have global alpha effect [except cluster layer, 2009 - * because cluster have deal with bottom layer global alpha value 2010 - * at cluster mix], bottom layer mix need deal with global alpha. 2011 - */ 2012 - bottom_layer_alpha_en = true; 2013 - dst_global_alpha = plane->state->alpha; 2014 - } 2015 - } 2016 - 2017 - drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { 2018 - struct vop2_win *win = to_vop2_win(plane); 2019 - int zpos = plane->state->normalized_zpos; 2020 - 2021 - /* 2022 - * Need to configure alpha from second layer. 2023 - */ 2024 - if (zpos == 0) 2025 - continue; 2026 - 2027 - if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) 2028 - premulti_en = 1; 2029 - else 2030 - premulti_en = 0; 2031 - 2032 - plane = &win->base; 2033 - fb = plane->state->fb; 2034 - 2035 - pixel_alpha_en = fb->format->has_alpha; 2036 - 2037 - alpha_config.src_premulti_en = premulti_en; 2038 - 2039 - if (bottom_layer_alpha_en && zpos == 1) { 2040 - gpremulti_en = premulti_en; 2041 - /* Cd = Cs + (1 - As) * Cd * Agd */ 2042 - alpha_config.dst_premulti_en = false; 2043 - alpha_config.src_pixel_alpha_en = pixel_alpha_en; 2044 - alpha_config.src_glb_alpha_value = plane->state->alpha; 2045 - alpha_config.dst_glb_alpha_value = dst_global_alpha; 2046 - } else if (vop2_cluster_window(win)) { 2047 - /* Mix output data only have pixel alpha */ 2048 - alpha_config.dst_premulti_en = true; 2049 - alpha_config.src_pixel_alpha_en = true; 2050 - alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; 2051 - alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; 2052 - } else { 2053 - /* Cd = Cs + (1 - As) * Cd */ 2054 - alpha_config.dst_premulti_en = true; 2055 - alpha_config.src_pixel_alpha_en = pixel_alpha_en; 2056 - alpha_config.src_glb_alpha_value = plane->state->alpha; 2057 - alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; 2058 - } 2059 - 2060 - vop2_parse_alpha(&alpha_config, &alpha); 2061 - 2062 - offset = (mixer_id + zpos - 1) * 0x10; 2063 - vop2_writel(vop2, RK3568_MIX0_SRC_COLOR_CTRL + offset, 2064 - alpha.src_color_ctrl.val); 2065 - vop2_writel(vop2, RK3568_MIX0_DST_COLOR_CTRL + offset, 2066 - alpha.dst_color_ctrl.val); 2067 - vop2_writel(vop2, RK3568_MIX0_SRC_ALPHA_CTRL + offset, 2068 - alpha.src_alpha_ctrl.val); 2069 - vop2_writel(vop2, RK3568_MIX0_DST_ALPHA_CTRL + offset, 2070 - alpha.dst_alpha_ctrl.val); 2071 - } 2072 - 2073 - if (vp->id == 0) { 2074 - if (bottom_layer_alpha_en) { 2075 - /* Transfer pixel alpha to hdr mix */ 2076 - alpha_config.src_premulti_en = gpremulti_en; 2077 - alpha_config.dst_premulti_en = true; 2078 - alpha_config.src_pixel_alpha_en = true; 2079 - alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; 2080 - alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; 2081 - vop2_parse_alpha(&alpha_config, &alpha); 2082 - 2083 - vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL, 2084 - alpha.src_color_ctrl.val); 2085 - vop2_writel(vop2, RK3568_HDR0_DST_COLOR_CTRL, 2086 - alpha.dst_color_ctrl.val); 2087 - vop2_writel(vop2, RK3568_HDR0_SRC_ALPHA_CTRL, 2088 - alpha.src_alpha_ctrl.val); 2089 - vop2_writel(vop2, RK3568_HDR0_DST_ALPHA_CTRL, 2090 - alpha.dst_alpha_ctrl.val); 2091 - } else { 2092 - vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL, 0); 2093 - } 2094 - } 2095 - } 2096 - 2097 - static void vop2_setup_layer_mixer(struct vop2_video_port *vp) 2098 - { 2099 - struct vop2 *vop2 = vp->vop2; 2100 - struct drm_plane *plane; 2101 - u32 layer_sel = 0; 2102 - u32 port_sel; 2103 - u8 layer_id; 2104 - u8 old_layer_id; 2105 - u8 layer_sel_id; 2106 - unsigned int ofs; 2107 - u32 ovl_ctrl; 2108 - int i; 2109 - struct vop2_video_port *vp0 = &vop2->vps[0]; 2110 - struct vop2_video_port *vp1 = &vop2->vps[1]; 2111 - struct vop2_video_port *vp2 = &vop2->vps[2]; 2112 - struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state); 2113 - 2114 - ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL); 2115 - ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD; 2116 - if (vcstate->yuv_overlay) 2117 - ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id); 2118 - else 2119 - ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id); 2120 - 2121 - vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl); 2122 - 2123 - port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL); 2124 - port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT; 2125 - 2126 - if (vp0->nlayers) 2127 - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, 2128 - vp0->nlayers - 1); 2129 - else 2130 - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, 8); 2131 - 2132 - if (vp1->nlayers) 2133 - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 2134 - (vp0->nlayers + vp1->nlayers - 1)); 2135 - else 2136 - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8); 2137 - 2138 - if (vp2->nlayers) 2139 - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 2140 - (vp2->nlayers + vp1->nlayers + vp0->nlayers - 1)); 2141 - else 2142 - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8); 2143 - 2144 - layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL); 2145 - 2146 - ofs = 0; 2147 - for (i = 0; i < vp->id; i++) 2148 - ofs += vop2->vps[i].nlayers; 2149 - 2150 - drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { 2151 - struct vop2_win *win = to_vop2_win(plane); 2152 - struct vop2_win *old_win; 2153 - 2154 - layer_id = (u8)(plane->state->normalized_zpos + ofs); 2155 - 2156 - /* 2157 - * Find the layer this win bind in old state. 2158 - */ 2159 - for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) { 2160 - layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf; 2161 - if (layer_sel_id == win->data->layer_sel_id) 2162 - break; 2163 - } 2164 - 2165 - /* 2166 - * Find the win bind to this layer in old state 2167 - */ 2168 - for (i = 0; i < vop2->data->win_size; i++) { 2169 - old_win = &vop2->win[i]; 2170 - layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf; 2171 - if (layer_sel_id == old_win->data->layer_sel_id) 2172 - break; 2173 - } 2174 - 2175 - switch (win->data->phys_id) { 2176 - case ROCKCHIP_VOP2_CLUSTER0: 2177 - port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER0; 2178 - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER0, vp->id); 2179 - break; 2180 - case ROCKCHIP_VOP2_CLUSTER1: 2181 - port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER1; 2182 - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER1, vp->id); 2183 - break; 2184 - case ROCKCHIP_VOP2_CLUSTER2: 2185 - port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER2; 2186 - port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER2, vp->id); 2187 - break; 2188 - case ROCKCHIP_VOP2_CLUSTER3: 2189 - port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER3; 2190 - port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER3, vp->id); 2191 - break; 2192 - case ROCKCHIP_VOP2_ESMART0: 2193 - port_sel &= ~RK3568_OVL_PORT_SEL__ESMART0; 2194 - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART0, vp->id); 2195 - break; 2196 - case ROCKCHIP_VOP2_ESMART1: 2197 - port_sel &= ~RK3568_OVL_PORT_SEL__ESMART1; 2198 - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART1, vp->id); 2199 - break; 2200 - case ROCKCHIP_VOP2_ESMART2: 2201 - port_sel &= ~RK3588_OVL_PORT_SEL__ESMART2; 2202 - port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART2, vp->id); 2203 - break; 2204 - case ROCKCHIP_VOP2_ESMART3: 2205 - port_sel &= ~RK3588_OVL_PORT_SEL__ESMART3; 2206 - port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART3, vp->id); 2207 - break; 2208 - case ROCKCHIP_VOP2_SMART0: 2209 - port_sel &= ~RK3568_OVL_PORT_SEL__SMART0; 2210 - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART0, vp->id); 2211 - break; 2212 - case ROCKCHIP_VOP2_SMART1: 2213 - port_sel &= ~RK3568_OVL_PORT_SEL__SMART1; 2214 - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART1, vp->id); 2215 - break; 2216 - } 2217 - 2218 - layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7); 2219 - layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id); 2220 - /* 2221 - * When we bind a window from layerM to layerN, we also need to move the old 2222 - * window on layerN to layerM to avoid one window selected by two or more layers. 2223 - */ 2224 - layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7); 2225 - layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, old_win->data->layer_sel_id); 2226 - } 2227 - 2228 - vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel); 2229 - vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel); 2230 - } 2231 - 2232 - static void vop2_setup_dly_for_windows(struct vop2 *vop2) 2233 - { 2234 - struct vop2_win *win; 2235 - int i = 0; 2236 - u32 cdly = 0, sdly = 0; 2237 - 2238 - for (i = 0; i < vop2->data->win_size; i++) { 2239 - u32 dly; 2240 - 2241 - win = &vop2->win[i]; 2242 - dly = win->delay; 2243 - 2244 - switch (win->data->phys_id) { 2245 - case ROCKCHIP_VOP2_CLUSTER0: 2246 - cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_0, dly); 2247 - cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_1, dly); 2248 - break; 2249 - case ROCKCHIP_VOP2_CLUSTER1: 2250 - cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_0, dly); 2251 - cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_1, dly); 2252 - break; 2253 - case ROCKCHIP_VOP2_ESMART0: 2254 - sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART0, dly); 2255 - break; 2256 - case ROCKCHIP_VOP2_ESMART1: 2257 - sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly); 2258 - break; 2259 - case ROCKCHIP_VOP2_SMART0: 2260 - case ROCKCHIP_VOP2_ESMART2: 2261 - sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly); 2262 - break; 2263 - case ROCKCHIP_VOP2_SMART1: 2264 - case ROCKCHIP_VOP2_ESMART3: 2265 - sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly); 2266 - break; 2267 - } 2268 - } 2269 - 2270 - vop2_writel(vop2, RK3568_CLUSTER_DLY_NUM, cdly); 2271 - vop2_writel(vop2, RK3568_SMART_DLY_NUM, sdly); 2272 - } 2273 - 2274 2354 static void vop2_crtc_atomic_begin(struct drm_crtc *crtc, 2275 2355 struct drm_atomic_state *state) 2276 2356 { 2277 2357 struct vop2_video_port *vp = to_vop2_video_port(crtc); 2278 2358 struct vop2 *vop2 = vp->vop2; 2279 - struct drm_plane *plane; 2280 2359 2281 - vp->win_mask = 0; 2282 - 2283 - drm_atomic_crtc_for_each_plane(plane, crtc) { 2284 - struct vop2_win *win = to_vop2_win(plane); 2285 - 2286 - win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT]; 2287 - 2288 - vp->win_mask |= BIT(win->data->phys_id); 2289 - 2290 - if (vop2_cluster_window(win)) 2291 - vop2_setup_cluster_alpha(vop2, win); 2292 - } 2293 - 2294 - if (!vp->win_mask) 2295 - return; 2296 - 2297 - vop2_setup_layer_mixer(vp); 2298 - vop2_setup_alpha(vp); 2299 - vop2_setup_dly_for_windows(vop2); 2360 + vop2->ops->setup_overlay(vp); 2300 2361 } 2301 2362 2302 2363 static void vop2_crtc_atomic_flush(struct drm_crtc *crtc, ··· 2167 3110 .late_register = vop2_crtc_late_register, 2168 3111 }; 2169 3112 3113 + static irqreturn_t rk3576_vp_isr(int irq, void *data) 3114 + { 3115 + struct vop2_video_port *vp = data; 3116 + struct vop2 *vop2 = vp->vop2; 3117 + struct drm_crtc *crtc = &vp->crtc; 3118 + uint32_t irqs; 3119 + int ret = IRQ_NONE; 3120 + 3121 + if (!pm_runtime_get_if_in_use(vop2->dev)) 3122 + return IRQ_NONE; 3123 + 3124 + irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id)); 3125 + vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs); 3126 + 3127 + if (irqs & VP_INT_DSP_HOLD_VALID) { 3128 + complete(&vp->dsp_hold_completion); 3129 + ret = IRQ_HANDLED; 3130 + } 3131 + 3132 + if (irqs & VP_INT_FS_FIELD) { 3133 + drm_crtc_handle_vblank(crtc); 3134 + spin_lock(&crtc->dev->event_lock); 3135 + if (vp->event) { 3136 + u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE); 3137 + 3138 + if (!(val & BIT(vp->id))) { 3139 + drm_crtc_send_vblank_event(crtc, vp->event); 3140 + vp->event = NULL; 3141 + drm_crtc_vblank_put(crtc); 3142 + } 3143 + } 3144 + spin_unlock(&crtc->dev->event_lock); 3145 + 3146 + ret = IRQ_HANDLED; 3147 + } 3148 + 3149 + if (irqs & VP_INT_POST_BUF_EMPTY) { 3150 + drm_err_ratelimited(vop2->drm, "POST_BUF_EMPTY irq err at vp%d\n", vp->id); 3151 + ret = IRQ_HANDLED; 3152 + } 3153 + 3154 + pm_runtime_put(vop2->dev); 3155 + 3156 + return ret; 3157 + } 3158 + 2170 3159 static irqreturn_t vop2_isr(int irq, void *data) 2171 3160 { 2172 3161 struct vop2 *vop2 = data; ··· 2228 3125 if (!pm_runtime_get_if_in_use(vop2->dev)) 2229 3126 return IRQ_NONE; 2230 3127 2231 - for (i = 0; i < vop2_data->nr_vps; i++) { 2232 - struct vop2_video_port *vp = &vop2->vps[i]; 2233 - struct drm_crtc *crtc = &vp->crtc; 2234 - u32 irqs; 3128 + if (vop2->version < VOP_VERSION_RK3576) { 3129 + for (i = 0; i < vop2_data->nr_vps; i++) { 3130 + struct vop2_video_port *vp = &vop2->vps[i]; 3131 + struct drm_crtc *crtc = &vp->crtc; 3132 + u32 irqs; 2235 3133 2236 - irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id)); 2237 - vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs); 3134 + irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id)); 3135 + vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs); 2238 3136 2239 - if (irqs & VP_INT_DSP_HOLD_VALID) { 2240 - complete(&vp->dsp_hold_completion); 2241 - ret = IRQ_HANDLED; 2242 - } 2243 - 2244 - if (irqs & VP_INT_FS_FIELD) { 2245 - drm_crtc_handle_vblank(crtc); 2246 - spin_lock(&crtc->dev->event_lock); 2247 - if (vp->event) { 2248 - u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE); 2249 - 2250 - if (!(val & BIT(vp->id))) { 2251 - drm_crtc_send_vblank_event(crtc, vp->event); 2252 - vp->event = NULL; 2253 - drm_crtc_vblank_put(crtc); 2254 - } 3137 + if (irqs & VP_INT_DSP_HOLD_VALID) { 3138 + complete(&vp->dsp_hold_completion); 3139 + ret = IRQ_HANDLED; 2255 3140 } 2256 - spin_unlock(&crtc->dev->event_lock); 2257 3141 2258 - ret = IRQ_HANDLED; 2259 - } 3142 + if (irqs & VP_INT_FS_FIELD) { 3143 + drm_crtc_handle_vblank(crtc); 3144 + spin_lock(&crtc->dev->event_lock); 3145 + if (vp->event) { 3146 + u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE); 2260 3147 2261 - if (irqs & VP_INT_POST_BUF_EMPTY) { 2262 - drm_err_ratelimited(vop2->drm, 2263 - "POST_BUF_EMPTY irq err at vp%d\n", 2264 - vp->id); 2265 - ret = IRQ_HANDLED; 3148 + if (!(val & BIT(vp->id))) { 3149 + drm_crtc_send_vblank_event(crtc, vp->event); 3150 + vp->event = NULL; 3151 + drm_crtc_vblank_put(crtc); 3152 + } 3153 + } 3154 + spin_unlock(&crtc->dev->event_lock); 3155 + 3156 + ret = IRQ_HANDLED; 3157 + } 3158 + 3159 + if (irqs & VP_INT_POST_BUF_EMPTY) { 3160 + drm_err_ratelimited(vop2->drm, 3161 + "POST_BUF_EMPTY irq err at vp%d\n", 3162 + vp->id); 3163 + ret = IRQ_HANDLED; 3164 + } 2266 3165 } 2267 3166 } 2268 3167 ··· 2318 3213 return 0; 2319 3214 } 2320 3215 2321 - static struct vop2_video_port *find_vp_without_primary(struct vop2 *vop2) 3216 + /* 3217 + * On RK3566 these windows don't have an independent 3218 + * framebuffer. They can only share/mirror the framebuffer 3219 + * with smart0, esmart0 and cluster0 respectively. 3220 + * And RK3566 share the same vop version with Rk3568, so we 3221 + * need to use soc_id for identification here. 3222 + */ 3223 + static bool vop2_is_mirror_win(struct vop2_win *win) 2322 3224 { 2323 - int i; 3225 + struct vop2 *vop2 = win->vop2; 2324 3226 2325 - for (i = 0; i < vop2->data->nr_vps; i++) { 2326 - struct vop2_video_port *vp = &vop2->vps[i]; 2327 - 2328 - if (!vp->crtc.port) 2329 - continue; 2330 - if (vp->primary_plane) 2331 - continue; 2332 - 2333 - return vp; 3227 + if (vop2->data->soc_id == 3566) { 3228 + switch (win->data->phys_id) { 3229 + case ROCKCHIP_VOP2_SMART1: 3230 + case ROCKCHIP_VOP2_ESMART1: 3231 + case ROCKCHIP_VOP2_CLUSTER1: 3232 + return true; 3233 + default: 3234 + return false; 3235 + } 3236 + } else { 3237 + return false; 2334 3238 } 2335 - 2336 - return NULL; 2337 3239 } 2338 3240 2339 3241 static int vop2_create_crtcs(struct vop2 *vop2) ··· 2351 3239 struct drm_plane *plane; 2352 3240 struct device_node *port; 2353 3241 struct vop2_video_port *vp; 2354 - int i, nvp, nvps = 0; 3242 + struct vop2_win *win; 3243 + u32 possible_crtcs; 3244 + int i, j, nvp, nvps = 0; 2355 3245 int ret; 2356 3246 2357 3247 for (i = 0; i < vop2_data->nr_vps; i++) { ··· 2369 3255 2370 3256 snprintf(dclk_name, sizeof(dclk_name), "dclk_vp%d", vp->id); 2371 3257 vp->dclk = devm_clk_get(vop2->dev, dclk_name); 2372 - if (IS_ERR(vp->dclk)) { 2373 - drm_err(vop2->drm, "failed to get %s\n", dclk_name); 2374 - return PTR_ERR(vp->dclk); 2375 - } 3258 + if (IS_ERR(vp->dclk)) 3259 + return dev_err_probe(drm->dev, PTR_ERR(vp->dclk), 3260 + "failed to get %s\n", dclk_name); 2376 3261 2377 3262 np = of_graph_get_remote_node(dev->of_node, i, -1); 2378 3263 if (!np) { ··· 2381 3268 of_node_put(np); 2382 3269 2383 3270 port = of_graph_get_port_by_id(dev->of_node, i); 2384 - if (!port) { 2385 - drm_err(vop2->drm, "no port node found for video_port%d\n", i); 2386 - return -ENOENT; 2387 - } 2388 - 3271 + if (!port) 3272 + return dev_err_probe(drm->dev, -ENOENT, 3273 + "no port node found for video_port%d\n", i); 2389 3274 vp->crtc.port = port; 2390 3275 nvps++; 2391 3276 } 2392 3277 2393 3278 nvp = 0; 2394 - for (i = 0; i < vop2->registered_num_wins; i++) { 2395 - struct vop2_win *win = &vop2->win[i]; 2396 - u32 possible_crtcs = 0; 3279 + /* Register a primary plane for every crtc */ 3280 + for (i = 0; i < vop2_data->nr_vps; i++) { 3281 + vp = &vop2->vps[i]; 2397 3282 2398 - if (vop2->data->soc_id == 3566) { 2399 - /* 2400 - * On RK3566 these windows don't have an independent 2401 - * framebuffer. They share the framebuffer with smart0, 2402 - * esmart0 and cluster0 respectively. 2403 - */ 2404 - switch (win->data->phys_id) { 2405 - case ROCKCHIP_VOP2_SMART1: 2406 - case ROCKCHIP_VOP2_ESMART1: 2407 - case ROCKCHIP_VOP2_CLUSTER1: 3283 + if (!vp->crtc.port) 3284 + continue; 3285 + 3286 + for (j = 0; j < vop2->registered_num_wins; j++) { 3287 + win = &vop2->win[j]; 3288 + 3289 + /* Aready registered as primary plane */ 3290 + if (win->base.type == DRM_PLANE_TYPE_PRIMARY) 2408 3291 continue; 2409 - } 2410 - } 2411 3292 2412 - if (win->type == DRM_PLANE_TYPE_PRIMARY) { 2413 - vp = find_vp_without_primary(vop2); 2414 - if (vp) { 3293 + /* If this win can not attached to this VP */ 3294 + if (!(win->data->possible_vp_mask & BIT(vp->id))) 3295 + continue; 3296 + 3297 + if (vop2_is_mirror_win(win)) 3298 + continue; 3299 + 3300 + if (win->type == DRM_PLANE_TYPE_PRIMARY) { 2415 3301 possible_crtcs = BIT(nvp); 2416 3302 vp->primary_plane = win; 3303 + ret = vop2_plane_init(vop2, win, possible_crtcs); 3304 + if (ret) 3305 + return dev_err_probe(drm->dev, ret, 3306 + "failed to init primary plane %s\n", 3307 + win->data->name); 2417 3308 nvp++; 2418 - } else { 2419 - /* change the unused primary window to overlay window */ 2420 - win->type = DRM_PLANE_TYPE_OVERLAY; 3309 + break; 2421 3310 } 2422 3311 } 3312 + } 2423 3313 2424 - if (win->type == DRM_PLANE_TYPE_OVERLAY) 2425 - possible_crtcs = (1 << nvps) - 1; 3314 + /* Register all unused window as overlay plane */ 3315 + for (i = 0; i < vop2->registered_num_wins; i++) { 3316 + win = &vop2->win[i]; 3317 + 3318 + /* Aready registered as primary plane */ 3319 + if (win->base.type == DRM_PLANE_TYPE_PRIMARY) 3320 + continue; 3321 + 3322 + if (vop2_is_mirror_win(win)) 3323 + continue; 3324 + 3325 + win->type = DRM_PLANE_TYPE_OVERLAY; 3326 + 3327 + possible_crtcs = 0; 3328 + nvp = 0; 3329 + for (j = 0; j < vop2_data->nr_vps; j++) { 3330 + vp = &vop2->vps[j]; 3331 + 3332 + if (!vp->crtc.port) 3333 + continue; 3334 + 3335 + if (win->data->possible_vp_mask & BIT(vp->id)) 3336 + possible_crtcs |= BIT(nvp); 3337 + nvp++; 3338 + } 2426 3339 2427 3340 ret = vop2_plane_init(vop2, win, possible_crtcs); 2428 - if (ret) { 2429 - drm_err(vop2->drm, "failed to init plane %s: %d\n", 2430 - win->data->name, ret); 2431 - return ret; 2432 - } 3341 + if (ret) 3342 + return dev_err_probe(drm->dev, ret, "failed to init overlay plane %s\n", 3343 + win->data->name); 2433 3344 } 2434 3345 2435 3346 for (i = 0; i < vop2_data->nr_vps; i++) { ··· 2467 3330 ret = drm_crtc_init_with_planes(drm, &vp->crtc, plane, NULL, 2468 3331 &vop2_crtc_funcs, 2469 3332 "video_port%d", vp->id); 2470 - if (ret) { 2471 - drm_err(vop2->drm, "crtc init for video_port%d failed\n", i); 2472 - return ret; 2473 - } 3333 + if (ret) 3334 + return dev_err_probe(drm->dev, ret, 3335 + "crtc init for video_port%d failed\n", i); 2474 3336 2475 3337 drm_crtc_helper_add(&vp->crtc, &vop2_crtc_helper_funcs); 2476 3338 if (vop2->lut_regs) { ··· 2536 3400 return -ENOENT; 2537 3401 } 2538 3402 2539 - static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = { 2540 - [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0), 2541 - [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5), 2542 - [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14), 2543 - [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18), 2544 - [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31), 2545 - [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31), 2546 - [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31), 2547 - [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31), 2548 - [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31), 2549 - [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19), 2550 - [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15), 2551 - [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31), 2552 - [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8), 2553 - [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9), 2554 - [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11), 2555 - [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3), 2556 - [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8), 2557 - /* RK3588 only, reserved bit on rk3568*/ 2558 - [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13), 2559 - 2560 - /* Scale */ 2561 - [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15), 2562 - [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31), 2563 - [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15), 2564 - [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 12, 13), 2565 - [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3), 2566 - [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28), 2567 - [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29), 2568 - 2569 - /* cluster regs */ 2570 - [VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1), 2571 - [VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0), 2572 - [VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7), 2573 - 2574 - /* afbc regs */ 2575 - [VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6), 2576 - [VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9), 2577 - [VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10), 2578 - [VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4), 2579 - [VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7), 2580 - [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8), 2581 - [VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31), 2582 - [VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31), 2583 - [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15), 2584 - [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31), 2585 - [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31), 2586 - [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31), 2587 - [VOP2_WIN_AFBC_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_TRANSFORM_OFFSET, 0, 31), 2588 - [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0), 2589 - [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1), 2590 - [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2), 2591 - [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3), 2592 - [VOP2_WIN_UV_SWAP] = { .reg = 0xffffffff }, 2593 - [VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff }, 2594 - [VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff }, 2595 - [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff }, 2596 - [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff }, 2597 - [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff }, 2598 - [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff }, 2599 - [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff }, 2600 - [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff }, 2601 - [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff }, 2602 - [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff }, 2603 - [VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff }, 2604 - [VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff }, 2605 - }; 2606 - 2607 - static int vop2_cluster_init(struct vop2_win *win) 3403 + static int vop2_regmap_init(struct vop2_win *win, const struct reg_field *regs, 3404 + int nr_regs) 2608 3405 { 2609 3406 struct vop2 *vop2 = win->vop2; 2610 - struct reg_field *cluster_regs; 2611 - int ret, i; 3407 + int i; 2612 3408 2613 - cluster_regs = kmemdup(vop2_cluster_regs, sizeof(vop2_cluster_regs), 2614 - GFP_KERNEL); 2615 - if (!cluster_regs) 2616 - return -ENOMEM; 3409 + for (i = 0; i < nr_regs; i++) { 3410 + const struct reg_field field = { 3411 + .reg = (regs[i].reg != 0xffffffff) ? 3412 + regs[i].reg + win->offset : regs[i].reg, 3413 + .lsb = regs[i].lsb, 3414 + .msb = regs[i].msb 3415 + }; 2617 3416 2618 - for (i = 0; i < ARRAY_SIZE(vop2_cluster_regs); i++) 2619 - if (cluster_regs[i].reg != 0xffffffff) 2620 - cluster_regs[i].reg += win->offset; 3417 + win->reg[i] = devm_regmap_field_alloc(vop2->dev, vop2->map, field); 3418 + if (IS_ERR(win->reg[i])) 3419 + return PTR_ERR(win->reg[i]); 3420 + } 2621 3421 2622 - ret = devm_regmap_field_bulk_alloc(vop2->dev, vop2->map, win->reg, 2623 - cluster_regs, 2624 - ARRAY_SIZE(vop2_cluster_regs)); 2625 - 2626 - kfree(cluster_regs); 2627 - 2628 - return ret; 2629 - }; 2630 - 2631 - static struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = { 2632 - [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0), 2633 - [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5), 2634 - [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12), 2635 - [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14), 2636 - [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16), 2637 - [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31), 2638 - [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31), 2639 - [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28), 2640 - [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31), 2641 - [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31), 2642 - [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17), 2643 - [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15), 2644 - [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31), 2645 - [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0), 2646 - [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1), 2647 - [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3), 2648 - [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31), 2649 - [VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29), 2650 - [VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31), 2651 - [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8), 2652 - [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16), 2653 - /* RK3588 only, reserved register on rk3568 */ 2654 - [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1), 2655 - 2656 - /* Scale */ 2657 - [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15), 2658 - [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31), 2659 - [VOP2_WIN_SCALE_CBCR_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 0, 15), 2660 - [VOP2_WIN_SCALE_CBCR_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 16, 31), 2661 - [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1), 2662 - [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3), 2663 - [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5), 2664 - [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7), 2665 - [VOP2_WIN_CBCR_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 8, 9), 2666 - [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 10, 11), 2667 - [VOP2_WIN_CBCR_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 12, 13), 2668 - [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 14, 15), 2669 - [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17), 2670 - [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8), 2671 - [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9), 2672 - [VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10), 2673 - [VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11), 2674 - [VOP2_WIN_XMIRROR] = { .reg = 0xffffffff }, 2675 - [VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff }, 2676 - [VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff }, 2677 - [VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff }, 2678 - [VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff }, 2679 - [VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff }, 2680 - [VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff }, 2681 - [VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff }, 2682 - [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff }, 2683 - [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff }, 2684 - [VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff }, 2685 - [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff }, 2686 - [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff }, 2687 - [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff }, 2688 - [VOP2_WIN_AFBC_TRANSFORM_OFFSET] = { .reg = 0xffffffff }, 2689 - [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff }, 2690 - [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff }, 2691 - [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff }, 2692 - [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff }, 2693 - }; 2694 - 2695 - static int vop2_esmart_init(struct vop2_win *win) 2696 - { 2697 - struct vop2 *vop2 = win->vop2; 2698 - struct reg_field *esmart_regs; 2699 - int ret, i; 2700 - 2701 - esmart_regs = kmemdup(vop2_esmart_regs, sizeof(vop2_esmart_regs), 2702 - GFP_KERNEL); 2703 - if (!esmart_regs) 2704 - return -ENOMEM; 2705 - 2706 - for (i = 0; i < ARRAY_SIZE(vop2_esmart_regs); i++) 2707 - if (esmart_regs[i].reg != 0xffffffff) 2708 - esmart_regs[i].reg += win->offset; 2709 - 2710 - ret = devm_regmap_field_bulk_alloc(vop2->dev, vop2->map, win->reg, 2711 - esmart_regs, 2712 - ARRAY_SIZE(vop2_esmart_regs)); 2713 - 2714 - kfree(esmart_regs); 2715 - 2716 - return ret; 3422 + return 0; 2717 3423 }; 2718 3424 2719 3425 static int vop2_win_init(struct vop2 *vop2) ··· 2574 3596 win->win_id = i; 2575 3597 win->vop2 = vop2; 2576 3598 if (vop2_cluster_window(win)) 2577 - ret = vop2_cluster_init(win); 3599 + ret = vop2_regmap_init(win, vop2->data->cluster_reg, 3600 + vop2->data->nr_cluster_regs); 2578 3601 else 2579 - ret = vop2_esmart_init(win); 3602 + ret = vop2_regmap_init(win, vop2->data->smart_reg, 3603 + vop2->data->nr_smart_regs); 2580 3604 if (ret) 2581 3605 return ret; 2582 3606 } ··· 2635 3655 2636 3656 vop2->dev = dev; 2637 3657 vop2->data = vop2_data; 3658 + vop2->ops = vop2_data->ops; 3659 + vop2->version = vop2_data->version; 2638 3660 vop2->drm = drm; 2639 3661 2640 3662 dev_set_drvdata(dev, vop2); 2641 3663 2642 3664 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vop"); 2643 - if (!res) { 2644 - drm_err(vop2->drm, "failed to get vop2 register byname\n"); 2645 - return -EINVAL; 2646 - } 3665 + if (!res) 3666 + return dev_err_probe(drm->dev, -EINVAL, 3667 + "failed to get vop2 register byname\n"); 2647 3668 2648 3669 vop2->res = res; 2649 3670 vop2->regs = devm_ioremap_resource(dev, res); ··· 2669 3688 if (vop2_data->feature & VOP2_FEATURE_HAS_SYS_GRF) { 2670 3689 vop2->sys_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); 2671 3690 if (IS_ERR(vop2->sys_grf)) 2672 - return dev_err_probe(dev, PTR_ERR(vop2->sys_grf), "cannot get sys_grf"); 3691 + return dev_err_probe(drm->dev, PTR_ERR(vop2->sys_grf), 3692 + "cannot get sys_grf\n"); 2673 3693 } 2674 3694 2675 3695 if (vop2_data->feature & VOP2_FEATURE_HAS_VOP_GRF) { 2676 3696 vop2->vop_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vop-grf"); 2677 3697 if (IS_ERR(vop2->vop_grf)) 2678 - return dev_err_probe(dev, PTR_ERR(vop2->vop_grf), "cannot get vop_grf"); 3698 + return dev_err_probe(drm->dev, PTR_ERR(vop2->vop_grf), 3699 + "cannot get vop_grf\n"); 2679 3700 } 2680 3701 2681 3702 if (vop2_data->feature & VOP2_FEATURE_HAS_VO1_GRF) { 2682 3703 vop2->vo1_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vo1-grf"); 2683 3704 if (IS_ERR(vop2->vo1_grf)) 2684 - return dev_err_probe(dev, PTR_ERR(vop2->vo1_grf), "cannot get vo1_grf"); 3705 + return dev_err_probe(drm->dev, PTR_ERR(vop2->vo1_grf), 3706 + "cannot get vo1_grf\n"); 2685 3707 } 2686 3708 2687 3709 if (vop2_data->feature & VOP2_FEATURE_HAS_SYS_PMU) { 2688 3710 vop2->sys_pmu = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,pmu"); 2689 3711 if (IS_ERR(vop2->sys_pmu)) 2690 - return dev_err_probe(dev, PTR_ERR(vop2->sys_pmu), "cannot get sys_pmu"); 3712 + return dev_err_probe(drm->dev, PTR_ERR(vop2->sys_pmu), 3713 + "cannot get sys_pmu\n"); 2691 3714 } 2692 3715 2693 3716 vop2->hclk = devm_clk_get(vop2->dev, "hclk"); 2694 - if (IS_ERR(vop2->hclk)) { 2695 - drm_err(vop2->drm, "failed to get hclk source\n"); 2696 - return PTR_ERR(vop2->hclk); 2697 - } 3717 + if (IS_ERR(vop2->hclk)) 3718 + return dev_err_probe(drm->dev, PTR_ERR(vop2->hclk), 3719 + "failed to get hclk source\n"); 2698 3720 2699 3721 vop2->aclk = devm_clk_get(vop2->dev, "aclk"); 2700 - if (IS_ERR(vop2->aclk)) { 2701 - drm_err(vop2->drm, "failed to get aclk source\n"); 2702 - return PTR_ERR(vop2->aclk); 2703 - } 3722 + if (IS_ERR(vop2->aclk)) 3723 + return dev_err_probe(drm->dev, PTR_ERR(vop2->aclk), 3724 + "failed to get aclk source\n"); 2704 3725 2705 3726 vop2->pclk = devm_clk_get_optional(vop2->dev, "pclk_vop"); 2706 - if (IS_ERR(vop2->pclk)) { 2707 - drm_err(vop2->drm, "failed to get pclk source\n"); 2708 - return PTR_ERR(vop2->pclk); 2709 - } 3727 + if (IS_ERR(vop2->pclk)) 3728 + return dev_err_probe(drm->dev, PTR_ERR(vop2->pclk), 3729 + "failed to get pclk source\n"); 2710 3730 2711 3731 vop2->pll_hdmiphy0 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy0"); 2712 - if (IS_ERR(vop2->pll_hdmiphy0)) { 2713 - drm_err(vop2->drm, "failed to get pll_hdmiphy0\n"); 2714 - return PTR_ERR(vop2->pll_hdmiphy0); 2715 - } 3732 + if (IS_ERR(vop2->pll_hdmiphy0)) 3733 + return dev_err_probe(drm->dev, PTR_ERR(vop2->pll_hdmiphy0), 3734 + "failed to get pll_hdmiphy0\n"); 3735 + 3736 + vop2->pll_hdmiphy1 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy1"); 3737 + if (IS_ERR(vop2->pll_hdmiphy1)) 3738 + return dev_err_probe(drm->dev, PTR_ERR(vop2->pll_hdmiphy1), 3739 + "failed to get pll_hdmiphy1\n"); 2716 3740 2717 3741 vop2->irq = platform_get_irq(pdev, 0); 2718 - if (vop2->irq < 0) { 2719 - drm_err(vop2->drm, "cannot find irq for vop2\n"); 2720 - return vop2->irq; 2721 - } 3742 + if (vop2->irq < 0) 3743 + return dev_err_probe(drm->dev, vop2->irq, "cannot find irq for vop2\n"); 2722 3744 2723 3745 mutex_init(&vop2->vop2_lock); 2724 3746 ··· 2732 3748 ret = vop2_create_crtcs(vop2); 2733 3749 if (ret) 2734 3750 return ret; 3751 + 3752 + if (vop2->version >= VOP_VERSION_RK3576) { 3753 + struct drm_crtc *crtc; 3754 + 3755 + drm_for_each_crtc(crtc, drm) { 3756 + struct vop2_video_port *vp = to_vop2_video_port(crtc); 3757 + int vp_irq; 3758 + const char *irq_name = devm_kasprintf(dev, GFP_KERNEL, "vp%d", vp->id); 3759 + 3760 + if (!irq_name) 3761 + return -ENOMEM; 3762 + 3763 + vp_irq = platform_get_irq_byname(pdev, irq_name); 3764 + if (vp_irq < 0) 3765 + return dev_err_probe(drm->dev, vp_irq, 3766 + "cannot find irq for vop2 vp%d\n", vp->id); 3767 + 3768 + ret = devm_request_irq(dev, vp_irq, rk3576_vp_isr, IRQF_SHARED, irq_name, 3769 + vp); 3770 + if (ret) 3771 + dev_err_probe(drm->dev, ret, 3772 + "request irq for vop2 vp%d failed\n", vp->id); 3773 + } 3774 + } 2735 3775 2736 3776 ret = vop2_find_rgb_encoder(vop2); 2737 3777 if (ret >= 0) {
+274 -4
drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
··· 9 9 10 10 #include <linux/regmap.h> 11 11 #include <drm/drm_modes.h> 12 + #include <dt-bindings/soc/rockchip,vop2.h> 12 13 #include "rockchip_drm_drv.h" 13 14 #include "rockchip_drm_vop.h" 15 + 16 + #define VOP2_VERSION(major, minor, build) ((major) << 24 | (minor) << 16 | (build)) 17 + 18 + /* The VOP version of new SoC is bigger than the old */ 19 + #define VOP_VERSION_RK3568 VOP2_VERSION(0x40, 0x15, 0x8023) 20 + #define VOP_VERSION_RK3588 VOP2_VERSION(0x40, 0x17, 0x6786) 21 + #define VOP_VERSION_RK3528 VOP2_VERSION(0x50, 0x17, 0x1263) 22 + #define VOP_VERSION_RK3562 VOP2_VERSION(0x50, 0x17, 0x4350) 23 + #define VOP_VERSION_RK3576 VOP2_VERSION(0x50, 0x19, 0x9765) 14 24 15 25 #define VOP2_VP_FEATURE_OUTPUT_10BIT BIT(0) 16 26 ··· 42 32 VOP2_DLY_MODE_HISO_S, /** HDR in SDR out mode, as a SDR window */ 43 33 VOP2_DLY_MODE_HIHO_H, /** HDR in HDR out mode, as a HDR window */ 44 34 VOP2_DLY_MODE_MAX, 35 + }; 36 + 37 + enum vop2_dly_module { 38 + VOP2_DLY_WIN, /** Win delay cycle for this VP */ 39 + VOP2_DLY_LAYER_MIX, /** Layer Mix delay cycle for this VP */ 40 + VOP2_DLY_HDR_MIX, /** HDR delay cycle for this VP */ 41 + VOP2_DLY_MAX, 45 42 }; 46 43 47 44 enum vop2_scale_up_mode { ··· 74 57 #define VOP2_PD_DSC_8K BIT(5) 75 58 #define VOP2_PD_DSC_4K BIT(6) 76 59 #define VOP2_PD_ESMART BIT(7) 60 + 61 + #define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \ 62 + (x) == ROCKCHIP_VOP2_EP_HDMI1) 63 + 64 + #define vop2_output_if_is_dp(x) ((x) == ROCKCHIP_VOP2_EP_DP0 || \ 65 + (x) == ROCKCHIP_VOP2_EP_DP1) 66 + 67 + #define vop2_output_if_is_edp(x) ((x) == ROCKCHIP_VOP2_EP_EDP0 || \ 68 + (x) == ROCKCHIP_VOP2_EP_EDP1) 69 + 70 + #define vop2_output_if_is_mipi(x) ((x) == ROCKCHIP_VOP2_EP_MIPI0 || \ 71 + (x) == ROCKCHIP_VOP2_EP_MIPI1) 72 + 73 + #define vop2_output_if_is_lvds(x) ((x) == ROCKCHIP_VOP2_EP_LVDS0 || \ 74 + (x) == ROCKCHIP_VOP2_EP_LVDS1) 75 + 76 + #define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0) 77 77 78 78 enum vop2_win_regs { 79 79 VOP2_WIN_ENABLE, ··· 147 113 VOP2_WIN_AFBC_UV_SWAP, 148 114 VOP2_WIN_AFBC_AUTO_GATING_EN, 149 115 VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 116 + VOP2_WIN_AFBC_PLD_OFFSET_EN, 150 117 VOP2_WIN_AFBC_PIC_VIR_WIDTH, 151 118 VOP2_WIN_AFBC_TILE_NUM, 152 119 VOP2_WIN_AFBC_PIC_OFFSET, 153 120 VOP2_WIN_AFBC_PIC_SIZE, 154 121 VOP2_WIN_AFBC_DSP_OFFSET, 155 - VOP2_WIN_AFBC_TRANSFORM_OFFSET, 122 + VOP2_WIN_AFBC_PLD_OFFSET, 123 + VOP2_WIN_TRANSFORM_OFFSET, 156 124 VOP2_WIN_AFBC_HDR_PTR, 157 125 VOP2_WIN_AFBC_HALF_BLOCK_EN, 158 126 VOP2_WIN_AFBC_ROTATE_270, 159 127 VOP2_WIN_AFBC_ROTATE_90, 128 + 129 + VOP2_WIN_VP_SEL, 130 + VOP2_WIN_DLY_NUM, 131 + 160 132 VOP2_WIN_MAX_REG, 161 133 }; 162 134 ··· 180 140 unsigned int phys_id; 181 141 182 142 u32 base; 143 + u32 possible_vp_mask; 183 144 enum drm_plane_type type; 184 145 185 146 u32 nformats; ··· 189 148 const unsigned int supported_rotations; 190 149 191 150 /** 192 - * @layer_sel_id: defined by register OVERLAY_LAYER_SEL of VOP2 151 + * @layer_sel_id: defined by register OVERLAY_LAYER_SEL or PORTn_LAYER_SEL 193 152 */ 194 - unsigned int layer_sel_id; 153 + unsigned int layer_sel_id[ROCKCHIP_MAX_CRTC]; 195 154 uint64_t feature; 196 155 197 156 uint8_t axi_bus_id; ··· 203 162 const u8 dly[VOP2_DLY_MODE_MAX]; 204 163 }; 205 164 165 + struct vop2_win { 166 + struct vop2 *vop2; 167 + struct drm_plane base; 168 + const struct vop2_win_data *data; 169 + struct regmap_field *reg[VOP2_WIN_MAX_REG]; 170 + 171 + /** 172 + * @win_id: graphic window id, a cluster may be split into two 173 + * graphics windows. 174 + */ 175 + u8 win_id; 176 + u8 delay; 177 + u32 offset; 178 + 179 + enum drm_plane_type type; 180 + }; 181 + 206 182 struct vop2_video_port_data { 207 183 unsigned int id; 208 184 u32 feature; ··· 228 170 struct vop_rect max_output; 229 171 const u8 pre_scan_max_dly[4]; 230 172 unsigned int offset; 173 + /** 174 + * @pixel_rate: pixel per cycle 175 + */ 176 + u8 pixel_rate; 177 + }; 178 + 179 + struct vop2_video_port { 180 + struct drm_crtc crtc; 181 + struct vop2 *vop2; 182 + struct clk *dclk; 183 + struct clk *dclk_src; 184 + unsigned int id; 185 + const struct vop2_video_port_data *data; 186 + 187 + struct completion dsp_hold_completion; 188 + 189 + /** 190 + * @win_mask: Bitmask of windows attached to the video port; 191 + */ 192 + u32 win_mask; 193 + 194 + struct vop2_win *primary_plane; 195 + struct drm_pending_vblank_event *event; 196 + 197 + unsigned int nlayers; 198 + }; 199 + 200 + /** 201 + * struct vop2_ops - helper operations for vop2 hardware 202 + * 203 + * These hooks are used by the common part of the vop2 driver to 204 + * implement the proper behaviour of different variants. 205 + */ 206 + struct vop2_ops { 207 + unsigned long (*setup_intf_mux)(struct vop2_video_port *vp, int ep_id, u32 polflags); 208 + void (*setup_bg_dly)(struct vop2_video_port *vp); 209 + void (*setup_overlay)(struct vop2_video_port *vp); 231 210 }; 232 211 233 212 struct vop2_data { 234 213 u8 nr_vps; 235 214 u64 feature; 215 + u32 version; 216 + const struct vop2_ops *ops; 236 217 const struct vop2_win_data *win; 237 218 const struct vop2_video_port_data *vp; 219 + const struct reg_field *cluster_reg; 220 + const struct reg_field *smart_reg; 238 221 const struct vop2_regs_dump *regs_dump; 239 222 struct vop_rect max_input; 240 223 struct vop_rect max_output; 241 224 225 + unsigned int nr_cluster_regs; 226 + unsigned int nr_smart_regs; 242 227 unsigned int win_size; 243 228 unsigned int regs_dump_size; 244 229 unsigned int soc_id; 230 + }; 231 + 232 + struct vop2 { 233 + u32 version; 234 + struct device *dev; 235 + struct drm_device *drm; 236 + struct vop2_video_port vps[ROCKCHIP_MAX_CRTC]; 237 + 238 + const struct vop2_data *data; 239 + const struct vop2_ops *ops; 240 + /* 241 + * Number of windows that are registered as plane, may be less than the 242 + * total number of hardware windows. 243 + */ 244 + u32 registered_num_wins; 245 + 246 + struct resource *res; 247 + void __iomem *regs; 248 + struct regmap *map; 249 + 250 + struct regmap *sys_grf; 251 + struct regmap *vop_grf; 252 + struct regmap *vo1_grf; 253 + struct regmap *sys_pmu; 254 + 255 + /* physical map length of vop2 register */ 256 + u32 len; 257 + 258 + void __iomem *lut_regs; 259 + 260 + /* protects crtc enable/disable */ 261 + struct mutex vop2_lock; 262 + 263 + int irq; 264 + 265 + /* 266 + * Some global resources are shared between all video ports(crtcs), so 267 + * we need a ref counter here. 268 + */ 269 + unsigned int enable_count; 270 + struct clk *hclk; 271 + struct clk *aclk; 272 + struct clk *pclk; 273 + struct clk *pll_hdmiphy0; 274 + struct clk *pll_hdmiphy1; 275 + 276 + /* optional internal rgb encoder */ 277 + struct rockchip_rgb *rgb; 278 + 279 + /* must be put at the end of the struct */ 280 + struct vop2_win win[]; 245 281 }; 246 282 247 283 /* interrupt define */ ··· 392 240 #define RK3568_REG_CFG_DONE 0x000 393 241 #define RK3568_VERSION_INFO 0x004 394 242 #define RK3568_SYS_AUTO_GATING_CTRL 0x008 243 + #define RK3576_SYS_MMU_CTRL_IMD 0x020 395 244 #define RK3568_SYS_AXI_LUT_CTRL 0x024 396 245 #define RK3568_DSP_IF_EN 0x028 246 + #define RK3576_SYS_PORT_CTRL_IMD 0x028 397 247 #define RK3568_DSP_IF_CTRL 0x02c 398 248 #define RK3568_DSP_IF_POL 0x030 249 + #define RK3576_SYS_CLUSTER_PD_CTRL_IMD 0x030 399 250 #define RK3588_SYS_PD_CTRL 0x034 400 251 #define RK3568_WB_CTRL 0x40 401 252 #define RK3568_WB_XSCAL_FACTOR 0x44 ··· 418 263 #define RK3568_VP_INT_CLR(vp) (0xA4 + (vp) * 0x10) 419 264 #define RK3568_VP_INT_STATUS(vp) (0xA8 + (vp) * 0x10) 420 265 #define RK3568_VP_INT_RAW_STATUS(vp) (0xAC + (vp) * 0x10) 266 + #define RK3576_WB_CTRL 0x100 267 + #define RK3576_WB_XSCAL_FACTOR 0x104 268 + #define RK3576_WB_YRGB_MST 0x108 269 + #define RK3576_WB_CBR_MST 0x10C 270 + #define RK3576_WB_VIR_STRIDE 0x110 271 + #define RK3576_WB_TIMEOUT_CTRL 0x114 272 + #define RK3576_MIPI0_IF_CTRL 0x180 273 + #define RK3576_HDMI0_IF_CTRL 0x184 274 + #define RK3576_EDP0_IF_CTRL 0x188 275 + #define RK3576_DP0_IF_CTRL 0x18C 276 + #define RK3576_RGB_IF_CTRL 0x194 277 + #define RK3576_DP1_IF_CTRL 0x1A4 278 + #define RK3576_DP2_IF_CTRL 0x1B0 279 + 280 + /* Extra OVL register definition */ 281 + #define RK3576_SYS_EXTRA_ALPHA_CTRL 0x500 282 + #define RK3576_CLUSTER0_MIX_SRC_COLOR_CTRL 0x530 283 + #define RK3576_CLUSTER0_MIX_DST_COLOR_CTRL 0x534 284 + #define RK3576_CLUSTER0_MIX_SRC_ALPHA_CTRL 0x538 285 + #define RK3576_CLUSTER0_MIX_DST_ALPHA_CTRL 0x53c 286 + #define RK3576_CLUSTER1_MIX_SRC_COLOR_CTRL 0x540 287 + #define RK3576_CLUSTER1_MIX_DST_COLOR_CTRL 0x544 288 + #define RK3576_CLUSTER1_MIX_SRC_ALPHA_CTRL 0x548 289 + #define RK3576_CLUSTER1_MIX_DST_ALPHA_CTRL 0x54c 290 + 291 + /* OVL registers for Video Port definition */ 292 + #define RK3576_OVL_CTRL(vp) (0x600 + (vp) * 0x100) 293 + #define RK3576_OVL_LAYER_SEL(vp) (0x604 + (vp) * 0x100) 294 + #define RK3576_OVL_MIX0_SRC_COLOR_CTRL(vp) (0x620 + (vp) * 0x100) 295 + #define RK3576_OVL_MIX0_DST_COLOR_CTRL(vp) (0x624 + (vp) * 0x100) 296 + #define RK3576_OVL_MIX0_SRC_ALPHA_CTRL(vp) (0x628 + (vp) * 0x100) 297 + #define RK3576_OVL_MIX0_DST_ALPHA_CTRL(vp) (0x62C + (vp) * 0x100) 298 + #define RK3576_OVL_MIX1_SRC_COLOR_CTRL(vp) (0x630 + (vp) * 0x100) 299 + #define RK3576_OVL_MIX1_DST_COLOR_CTRL(vp) (0x634 + (vp) * 0x100) 300 + #define RK3576_OVL_MIX1_SRC_ALPHA_CTRL(vp) (0x638 + (vp) * 0x100) 301 + #define RK3576_OVL_MIX1_DST_ALPHA_CTRL(vp) (0x63C + (vp) * 0x100) 302 + #define RK3576_OVL_MIX2_SRC_COLOR_CTRL(vp) (0x640 + (vp) * 0x100) 303 + #define RK3576_OVL_MIX2_DST_COLOR_CTRL(vp) (0x644 + (vp) * 0x100) 304 + #define RK3576_OVL_MIX2_SRC_ALPHA_CTRL(vp) (0x648 + (vp) * 0x100) 305 + #define RK3576_OVL_MIX2_DST_ALPHA_CTRL(vp) (0x64C + (vp) * 0x100) 306 + #define RK3576_EXTRA_OVL_SRC_COLOR_CTRL(vp) (0x650 + (vp) * 0x100) 307 + #define RK3576_EXTRA_OVL_DST_COLOR_CTRL(vp) (0x654 + (vp) * 0x100) 308 + #define RK3576_EXTRA_OVL_SRC_ALPHA_CTRL(vp) (0x658 + (vp) * 0x100) 309 + #define RK3576_EXTRA_OVL_DST_ALPHA_CTRL(vp) (0x65C + (vp) * 0x100) 310 + #define RK3576_OVL_HDR_SRC_COLOR_CTRL(vp) (0x660 + (vp) * 0x100) 311 + #define RK3576_OVL_HDR_DST_COLOR_CTRL(vp) (0x664 + (vp) * 0x100) 312 + #define RK3576_OVL_HDR_SRC_ALPHA_CTRL(vp) (0x668 + (vp) * 0x100) 313 + #define RK3576_OVL_HDR_DST_ALPHA_CTRL(vp) (0x66C + (vp) * 0x100) 314 + #define RK3576_OVL_BG_MIX_CTRL(vp) (0x670 + (vp) * 0x100) 421 315 422 316 /* Video Port registers definition */ 423 317 #define RK3568_VP0_CTRL_BASE 0x0C00 ··· 539 335 #define RK3568_CLUSTER_WIN_DSP_INFO 0x24 540 336 #define RK3568_CLUSTER_WIN_DSP_ST 0x28 541 337 #define RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB 0x30 542 - #define RK3568_CLUSTER_WIN_AFBCD_TRANSFORM_OFFSET 0x3C 338 + #define RK3568_CLUSTER_WIN_TRANSFORM_OFFSET 0x3C 543 339 #define RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL 0x50 544 340 #define RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE 0x54 545 341 #define RK3568_CLUSTER_WIN_AFBCD_HDR_PTR 0x58 ··· 549 345 #define RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET 0x68 550 346 #define RK3568_CLUSTER_WIN_AFBCD_CTRL 0x6C 551 347 348 + #define RK3576_CLUSTER_WIN_AFBCD_PLD_PTR_OFFSET 0x78 349 + 552 350 #define RK3568_CLUSTER_CTRL 0x100 351 + #define RK3576_CLUSTER_PORT_SEL_IMD 0x1F4 352 + #define RK3576_CLUSTER_DLY_NUM 0x1F8 553 353 554 354 /* (E)smart register definition, offset relative to window base */ 555 355 #define RK3568_SMART_CTRL0 0x00 ··· 604 396 #define RK3568_SMART_REGION3_SCL_FACTOR_CBR 0xC8 605 397 #define RK3568_SMART_REGION3_SCL_OFFSET 0xCC 606 398 #define RK3568_SMART_COLOR_KEY_CTRL 0xD0 399 + #define RK3576_SMART_ALPHA_MAP 0xD8 400 + #define RK3576_SMART_PORT_SEL_IMD 0xF4 401 + #define RK3576_SMART_DLY_NUM 0xF8 607 402 608 403 /* HDR register definition */ 609 404 #define RK3568_HDR_LUT_CTRL 0x2000 ··· 755 544 756 545 #define POLFLAG_DCLK_INV BIT(3) 757 546 547 + #define RK3576_OVL_CTRL__YUV_MODE BIT(0) 548 + #define RK3576_OVL_BG_MIX_CTRL__BG_DLY GENMASK(31, 24) 549 + 550 + #define RK3576_DSP_IF_CFG_DONE_IMD BIT(31) 551 + #define RK3576_DSP_IF_DCLK_SEL_OUT BIT(21) 552 + #define RK3576_DSP_IF_PCLK_DIV BIT(20) 553 + #define RK3576_DSP_IF_PIN_POL GENMASK(5, 4) 554 + #define RK3576_DSP_IF_MUX GENMASK(3, 2) 555 + #define RK3576_DSP_IF_CLK_OUT_EN BIT(1) 556 + #define RK3576_DSP_IF_EN BIT(0) 557 + 758 558 enum vop2_layer_phy_id { 759 559 ROCKCHIP_VOP2_CLUSTER0 = 0, 760 560 ROCKCHIP_VOP2_CLUSTER1, ··· 781 559 }; 782 560 783 561 extern const struct component_ops vop2_component_ops; 562 + 563 + static inline void vop2_writel(struct vop2 *vop2, u32 offset, u32 v) 564 + { 565 + regmap_write(vop2->map, offset, v); 566 + } 567 + 568 + static inline void vop2_vp_write(struct vop2_video_port *vp, u32 offset, u32 v) 569 + { 570 + regmap_write(vp->vop2->map, vp->data->offset + offset, v); 571 + } 572 + 573 + static inline u32 vop2_readl(struct vop2 *vop2, u32 offset) 574 + { 575 + u32 val; 576 + 577 + regmap_read(vop2->map, offset, &val); 578 + 579 + return val; 580 + } 581 + 582 + static inline u32 vop2_vp_read(struct vop2_video_port *vp, u32 offset) 583 + { 584 + u32 val; 585 + 586 + regmap_read(vp->vop2->map, vp->data->offset + offset, &val); 587 + 588 + return val; 589 + } 590 + 591 + static inline void vop2_win_write(const struct vop2_win *win, unsigned int reg, u32 v) 592 + { 593 + regmap_field_write(win->reg[reg], v); 594 + } 595 + 596 + static inline bool vop2_cluster_window(const struct vop2_win *win) 597 + { 598 + return win->data->feature & WIN_FEATURE_CLUSTER; 599 + } 600 + 601 + static inline struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc) 602 + { 603 + return container_of(crtc, struct vop2_video_port, crtc); 604 + } 605 + 606 + static inline struct vop2_win *to_vop2_win(struct drm_plane *p) 607 + { 608 + return container_of(p, struct vop2_win, base); 609 + } 784 610 785 611 #endif /* _ROCKCHIP_DRM_VOP2_H */
+29 -51
drivers/gpu/drm/rockchip/rockchip_lvds.c
··· 448 448 static int rk3288_lvds_probe(struct platform_device *pdev, 449 449 struct rockchip_lvds *lvds) 450 450 { 451 - int ret; 452 - 453 451 lvds->regs = devm_platform_ioremap_resource(pdev, 0); 454 452 if (IS_ERR(lvds->regs)) 455 453 return PTR_ERR(lvds->regs); 456 454 457 - lvds->pclk = devm_clk_get(lvds->dev, "pclk_lvds"); 458 - if (IS_ERR(lvds->pclk)) { 459 - DRM_DEV_ERROR(lvds->dev, "could not get pclk_lvds\n"); 460 - return PTR_ERR(lvds->pclk); 461 - } 455 + lvds->pclk = devm_clk_get_prepared(lvds->dev, "pclk_lvds"); 456 + if (IS_ERR(lvds->pclk)) 457 + return dev_err_probe(lvds->dev, PTR_ERR(lvds->pclk), 458 + "could not get or prepare pclk_lvds\n"); 462 459 463 460 lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins), 464 461 GFP_KERNEL); ··· 464 467 465 468 lvds->pins->p = devm_pinctrl_get(lvds->dev); 466 469 if (IS_ERR(lvds->pins->p)) { 467 - DRM_DEV_ERROR(lvds->dev, "no pinctrl handle\n"); 470 + dev_warn(lvds->dev, "no pinctrl handle\n"); 468 471 devm_kfree(lvds->dev, lvds->pins); 469 472 lvds->pins = NULL; 470 473 } else { 471 474 lvds->pins->default_state = 472 475 pinctrl_lookup_state(lvds->pins->p, "lcdc"); 473 476 if (IS_ERR(lvds->pins->default_state)) { 474 - DRM_DEV_ERROR(lvds->dev, "no default pinctrl state\n"); 477 + dev_warn(lvds->dev, "no default pinctrl state\n"); 475 478 devm_kfree(lvds->dev, lvds->pins); 476 479 lvds->pins = NULL; 477 480 } 478 - } 479 - 480 - ret = clk_prepare(lvds->pclk); 481 - if (ret < 0) { 482 - DRM_DEV_ERROR(lvds->dev, "failed to prepare pclk_lvds\n"); 483 - return ret; 484 481 } 485 482 486 483 return 0; ··· 546 555 547 556 lvds->drm_dev = drm_dev; 548 557 port = of_graph_get_port_by_id(dev->of_node, 1); 549 - if (!port) { 550 - DRM_DEV_ERROR(dev, 551 - "can't found port point, please init lvds panel port!\n"); 552 - return -EINVAL; 553 - } 558 + if (!port) 559 + return dev_err_probe(dev, -EINVAL, 560 + "can't found port point, please init lvds panel port!\n"); 561 + 554 562 for_each_child_of_node(port, endpoint) { 555 563 child_count++; 556 564 of_property_read_u32(endpoint, "reg", &endpoint_id); ··· 561 571 } 562 572 } 563 573 if (!child_count) { 564 - DRM_DEV_ERROR(dev, "lvds port does not have any children\n"); 565 - ret = -EINVAL; 574 + ret = dev_err_probe(dev, -EINVAL, "lvds port does not have any children\n"); 566 575 goto err_put_port; 567 576 } else if (ret) { 568 577 dev_err_probe(dev, ret, "failed to find panel and bridge node\n"); ··· 578 589 lvds->output = rockchip_lvds_name_to_output(name); 579 590 580 591 if (lvds->output < 0) { 581 - DRM_DEV_ERROR(dev, "invalid output type [%s]\n", name); 582 - ret = lvds->output; 592 + ret = dev_err_probe(dev, lvds->output, "invalid output type [%s]\n", name); 583 593 goto err_put_remote; 584 594 } 585 595 ··· 589 601 lvds->format = rockchip_lvds_name_to_format(name); 590 602 591 603 if (lvds->format < 0) { 592 - DRM_DEV_ERROR(dev, "invalid data-mapping format [%s]\n", name); 593 - ret = lvds->format; 604 + ret = dev_err_probe(dev, lvds->format, 605 + "invalid data-mapping format [%s]\n", name); 594 606 goto err_put_remote; 595 607 } 596 608 ··· 600 612 601 613 ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS); 602 614 if (ret < 0) { 603 - DRM_DEV_ERROR(drm_dev->dev, 604 - "failed to initialize encoder: %d\n", ret); 615 + drm_err(drm_dev, 616 + "failed to initialize encoder: %d\n", ret); 605 617 goto err_put_remote; 606 618 } 607 619 ··· 614 626 &rockchip_lvds_connector_funcs, 615 627 DRM_MODE_CONNECTOR_LVDS); 616 628 if (ret < 0) { 617 - DRM_DEV_ERROR(drm_dev->dev, 618 - "failed to initialize connector: %d\n", ret); 629 + drm_err(drm_dev, 630 + "failed to initialize connector: %d\n", ret); 619 631 goto err_free_encoder; 620 632 } 621 633 ··· 629 641 630 642 connector = drm_bridge_connector_init(lvds->drm_dev, encoder); 631 643 if (IS_ERR(connector)) { 632 - DRM_DEV_ERROR(drm_dev->dev, 633 - "failed to initialize bridge connector: %pe\n", 634 - connector); 644 + drm_err(drm_dev, 645 + "failed to initialize bridge connector: %pe\n", 646 + connector); 635 647 ret = PTR_ERR(connector); 636 648 goto err_free_encoder; 637 649 } ··· 639 651 640 652 ret = drm_connector_attach_encoder(connector, encoder); 641 653 if (ret < 0) { 642 - DRM_DEV_ERROR(drm_dev->dev, 643 - "failed to attach encoder: %d\n", ret); 654 + drm_err(drm_dev, "failed to attach encoder: %d\n", ret); 644 655 goto err_free_connector; 645 656 } 646 657 ··· 701 714 702 715 lvds->grf = syscon_regmap_lookup_by_phandle(dev->of_node, 703 716 "rockchip,grf"); 704 - if (IS_ERR(lvds->grf)) { 705 - DRM_DEV_ERROR(dev, "missing rockchip,grf property\n"); 706 - return PTR_ERR(lvds->grf); 707 - } 717 + if (IS_ERR(lvds->grf)) 718 + return dev_err_probe(dev, PTR_ERR(lvds->grf), "missing rockchip,grf property\n"); 708 719 709 720 ret = lvds->soc_data->probe(pdev, lvds); 710 - if (ret) { 711 - DRM_DEV_ERROR(dev, "Platform initialization failed\n"); 712 - return ret; 713 - } 721 + if (ret) 722 + return dev_err_probe(dev, ret, "Platform initialization failed\n"); 714 723 715 724 dev_set_drvdata(dev, lvds); 716 725 717 726 ret = component_add(&pdev->dev, &rockchip_lvds_component_ops); 718 - if (ret < 0) { 719 - DRM_DEV_ERROR(dev, "failed to add component\n"); 720 - clk_unprepare(lvds->pclk); 721 - } 727 + if (ret < 0) 728 + return dev_err_probe(dev, ret, "failed to add component\n"); 722 729 723 - return ret; 730 + return 0; 724 731 } 725 732 726 733 static void rockchip_lvds_remove(struct platform_device *pdev) 727 734 { 728 - struct rockchip_lvds *lvds = platform_get_drvdata(pdev); 729 - 730 735 component_del(&pdev->dev, &rockchip_lvds_component_ops); 731 - clk_unprepare(lvds->pclk); 732 736 } 733 737 734 738 struct platform_driver rockchip_lvds_driver = {
+1784 -14
drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
··· 4 4 * Author: Andy Yan <andy.yan@rock-chips.com> 5 5 */ 6 6 7 + #include <linux/bitfield.h> 7 8 #include <linux/kernel.h> 8 9 #include <linux/component.h> 9 10 #include <linux/mod_devicetable.h> 10 11 #include <linux/platform_device.h> 11 12 #include <linux/of.h> 13 + #include <drm/drm_blend.h> 12 14 #include <drm/drm_fourcc.h> 15 + #include <drm/drm_framebuffer.h> 13 16 #include <drm/drm_plane.h> 14 17 #include <drm/drm_print.h> 15 18 16 19 #include "rockchip_drm_vop2.h" 20 + 21 + union vop2_alpha_ctrl { 22 + u32 val; 23 + struct { 24 + /* [0:1] */ 25 + u32 color_mode:1; 26 + u32 alpha_mode:1; 27 + /* [2:3] */ 28 + u32 blend_mode:2; 29 + u32 alpha_cal_mode:1; 30 + /* [5:7] */ 31 + u32 factor_mode:3; 32 + /* [8:9] */ 33 + u32 alpha_en:1; 34 + u32 src_dst_swap:1; 35 + u32 reserved:6; 36 + /* [16:23] */ 37 + u32 glb_alpha:8; 38 + } bits; 39 + }; 40 + 41 + struct vop2_alpha { 42 + union vop2_alpha_ctrl src_color_ctrl; 43 + union vop2_alpha_ctrl dst_color_ctrl; 44 + union vop2_alpha_ctrl src_alpha_ctrl; 45 + union vop2_alpha_ctrl dst_alpha_ctrl; 46 + }; 47 + 48 + struct vop2_alpha_config { 49 + bool src_premulti_en; 50 + bool dst_premulti_en; 51 + bool src_pixel_alpha_en; 52 + bool dst_pixel_alpha_en; 53 + u16 src_glb_alpha_value; 54 + u16 dst_glb_alpha_value; 55 + }; 17 56 18 57 static const uint32_t formats_cluster[] = { 19 58 DRM_FORMAT_XRGB2101010, ··· 69 30 DRM_FORMAT_YUV420_10BIT, /* yuv420_10bit non-Linear mode only */ 70 31 DRM_FORMAT_YUYV, /* yuv422_8bit non-Linear mode only*/ 71 32 DRM_FORMAT_Y210, /* yuv422_10bit non-Linear mode only */ 33 + }; 34 + 35 + /* 36 + * The cluster windows on rk3576 support: 37 + * RGB: linear mode and afbc 38 + * YUV: linear mode and rfbc 39 + * rfbc is a rockchip defined non-linear mode, produced by 40 + * Video decoder 41 + */ 42 + static const uint32_t formats_rk3576_cluster[] = { 43 + DRM_FORMAT_XRGB2101010, 44 + DRM_FORMAT_XBGR2101010, 45 + DRM_FORMAT_ARGB2101010, 46 + DRM_FORMAT_ABGR2101010, 47 + DRM_FORMAT_XRGB8888, 48 + DRM_FORMAT_ARGB8888, 49 + DRM_FORMAT_XBGR8888, 50 + DRM_FORMAT_ABGR8888, 51 + DRM_FORMAT_RGB888, 52 + DRM_FORMAT_BGR888, 53 + DRM_FORMAT_RGB565, 54 + DRM_FORMAT_BGR565, 55 + DRM_FORMAT_NV12, /* yuv420_8bit linear mode, 2 plane */ 56 + DRM_FORMAT_NV21, /* yvu420_8bit linear mode, 2 plane */ 57 + DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */ 58 + DRM_FORMAT_NV61, /* yvu422_8bit linear mode, 2 plane */ 59 + DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */ 60 + DRM_FORMAT_NV42, /* yvu444_8bit linear mode, 2 plane */ 61 + DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */ 62 + DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */ 63 + DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */ 72 64 }; 73 65 74 66 static const uint32_t formats_esmart[] = { ··· 146 76 DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */ 147 77 DRM_FORMAT_YVYU, /* yuv422_8bit[YVYU] linear mode */ 148 78 DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode */ 79 + }; 80 + 81 + /* 82 + * Add XRGB2101010/ARGB2101010ARGB1555/XRGB1555 83 + */ 84 + static const uint32_t formats_rk3576_esmart[] = { 85 + DRM_FORMAT_XRGB2101010, 86 + DRM_FORMAT_XBGR2101010, 87 + DRM_FORMAT_ARGB2101010, 88 + DRM_FORMAT_ABGR2101010, 89 + DRM_FORMAT_XRGB8888, 90 + DRM_FORMAT_ARGB8888, 91 + DRM_FORMAT_XBGR8888, 92 + DRM_FORMAT_ABGR8888, 93 + DRM_FORMAT_RGB888, 94 + DRM_FORMAT_BGR888, 95 + DRM_FORMAT_RGB565, 96 + DRM_FORMAT_BGR565, 97 + DRM_FORMAT_ARGB1555, 98 + DRM_FORMAT_ABGR1555, 99 + DRM_FORMAT_XRGB1555, 100 + DRM_FORMAT_XBGR1555, 101 + DRM_FORMAT_NV12, /* yuv420_8bit linear mode, 2 plane */ 102 + DRM_FORMAT_NV21, /* yvu420_8bit linear mode, 2 plane */ 103 + DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */ 104 + DRM_FORMAT_NV61, /* yvu422_8bit linear mode, 2 plane */ 105 + DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */ 106 + DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */ 107 + DRM_FORMAT_NV42, /* yvu444_8bit linear mode, 2 plane */ 108 + DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */ 109 + DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */ 110 + DRM_FORMAT_YVYU, /* yuv422_8bit[YVYU] linear mode */ 111 + DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode */ 112 + DRM_FORMAT_YUYV, /* yuv422_8bit[YUYV] linear mode */ 113 + DRM_FORMAT_UYVY, /* yuv422_8bit[UYVY] linear mode */ 149 114 }; 150 115 151 116 static const uint32_t formats_smart[] = { ··· 236 131 DRM_FORMAT_MOD_INVALID, 237 132 }; 238 133 134 + /* used from rk3576, afbc 32*8 half mode */ 135 + static const uint64_t format_modifiers_rk3576_afbc[] = { 136 + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | 137 + AFBC_FORMAT_MOD_SPLIT), 138 + 139 + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | 140 + AFBC_FORMAT_MOD_SPARSE | 141 + AFBC_FORMAT_MOD_SPLIT), 142 + 143 + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | 144 + AFBC_FORMAT_MOD_YTR | 145 + AFBC_FORMAT_MOD_SPLIT), 146 + 147 + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | 148 + AFBC_FORMAT_MOD_CBR | 149 + AFBC_FORMAT_MOD_SPLIT), 150 + 151 + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | 152 + AFBC_FORMAT_MOD_CBR | 153 + AFBC_FORMAT_MOD_SPARSE | 154 + AFBC_FORMAT_MOD_SPLIT), 155 + 156 + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | 157 + AFBC_FORMAT_MOD_YTR | 158 + AFBC_FORMAT_MOD_CBR | 159 + AFBC_FORMAT_MOD_SPLIT), 160 + 161 + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | 162 + AFBC_FORMAT_MOD_YTR | 163 + AFBC_FORMAT_MOD_CBR | 164 + AFBC_FORMAT_MOD_SPARSE | 165 + AFBC_FORMAT_MOD_SPLIT), 166 + 167 + /* SPLIT mandates SPARSE, RGB modes mandates YTR */ 168 + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | 169 + AFBC_FORMAT_MOD_YTR | 170 + AFBC_FORMAT_MOD_SPARSE | 171 + AFBC_FORMAT_MOD_SPLIT), 172 + DRM_FORMAT_MOD_LINEAR, 173 + DRM_FORMAT_MOD_INVALID, 174 + }; 175 + 176 + static const struct reg_field rk3568_vop_cluster_regs[VOP2_WIN_MAX_REG] = { 177 + [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0), 178 + [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5), 179 + [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14), 180 + [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18), 181 + [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31), 182 + [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31), 183 + [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31), 184 + [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31), 185 + [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31), 186 + [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19), 187 + [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15), 188 + [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31), 189 + [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8), 190 + [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9), 191 + [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11), 192 + [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3), 193 + [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8), 194 + /* RK3588 only, reserved bit on rk3568*/ 195 + [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13), 196 + 197 + /* Scale */ 198 + [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15), 199 + [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31), 200 + [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15), 201 + [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 12, 13), 202 + [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3), 203 + [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28), 204 + [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29), 205 + 206 + /* cluster regs */ 207 + [VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1), 208 + [VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0), 209 + [VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7), 210 + 211 + /* afbc regs */ 212 + [VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6), 213 + [VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9), 214 + [VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10), 215 + [VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4), 216 + [VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7), 217 + [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8), 218 + [VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31), 219 + [VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31), 220 + [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15), 221 + [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31), 222 + [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31), 223 + [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31), 224 + [VOP2_WIN_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_TRANSFORM_OFFSET, 0, 31), 225 + [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0), 226 + [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1), 227 + [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2), 228 + [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3), 229 + [VOP2_WIN_UV_SWAP] = { .reg = 0xffffffff }, 230 + [VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff }, 231 + [VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff }, 232 + [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff }, 233 + [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff }, 234 + [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff }, 235 + [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff }, 236 + [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff }, 237 + [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff }, 238 + [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff }, 239 + [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff }, 240 + [VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff }, 241 + [VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff }, 242 + }; 243 + 244 + static const struct reg_field rk3568_vop_smart_regs[VOP2_WIN_MAX_REG] = { 245 + [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0), 246 + [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5), 247 + [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12), 248 + [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14), 249 + [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16), 250 + [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31), 251 + [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31), 252 + [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28), 253 + [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31), 254 + [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31), 255 + [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17), 256 + [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15), 257 + [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31), 258 + [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0), 259 + [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1), 260 + [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3), 261 + [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31), 262 + [VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29), 263 + [VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31), 264 + [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8), 265 + [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16), 266 + /* RK3588 only, reserved register on rk3568 */ 267 + [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1), 268 + 269 + /* Scale */ 270 + [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15), 271 + [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31), 272 + [VOP2_WIN_SCALE_CBCR_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 0, 15), 273 + [VOP2_WIN_SCALE_CBCR_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 16, 31), 274 + [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1), 275 + [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3), 276 + [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5), 277 + [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7), 278 + [VOP2_WIN_CBCR_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 8, 9), 279 + [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 10, 11), 280 + [VOP2_WIN_CBCR_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 12, 13), 281 + [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 14, 15), 282 + [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17), 283 + [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8), 284 + [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9), 285 + [VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10), 286 + [VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11), 287 + [VOP2_WIN_XMIRROR] = { .reg = 0xffffffff }, 288 + [VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff }, 289 + [VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff }, 290 + [VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff }, 291 + [VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff }, 292 + [VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff }, 293 + [VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff }, 294 + [VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff }, 295 + [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff }, 296 + [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff }, 297 + [VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff }, 298 + [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff }, 299 + [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff }, 300 + [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff }, 301 + [VOP2_WIN_TRANSFORM_OFFSET] = { .reg = 0xffffffff }, 302 + [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff }, 303 + [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff }, 304 + [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff }, 305 + [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff }, 306 + }; 307 + 308 + static const struct reg_field rk3576_vop_cluster_regs[VOP2_WIN_MAX_REG] = { 309 + [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0), 310 + [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5), 311 + [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14), 312 + [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 17, 17), 313 + [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18), 314 + [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31), 315 + [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31), 316 + [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31), 317 + [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31), 318 + [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31), 319 + [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19), 320 + [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15), 321 + [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31), 322 + [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8), 323 + [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9), 324 + [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11), 325 + [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 4), 326 + [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 9), 327 + /* Read only bit on rk3576, writing on this bit have no effect.*/ 328 + [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13), 329 + 330 + [VOP2_WIN_VP_SEL] = REG_FIELD(RK3576_CLUSTER_PORT_SEL_IMD, 0, 1), 331 + [VOP2_WIN_DLY_NUM] = REG_FIELD(RK3576_CLUSTER_DLY_NUM, 0, 7), 332 + 333 + /* Scale */ 334 + [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15), 335 + [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31), 336 + [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3), 337 + [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15), 338 + [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 22, 23), 339 + [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28), 340 + [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29), 341 + 342 + /* cluster regs */ 343 + [VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1), 344 + [VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0), 345 + [VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7), 346 + 347 + /* afbc regs */ 348 + [VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6), 349 + [VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9), 350 + [VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10), 351 + [VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4), 352 + [VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7), 353 + [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8), 354 + [VOP2_WIN_AFBC_PLD_OFFSET_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 16, 16), 355 + [VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31), 356 + [VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31), 357 + [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15), 358 + [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31), 359 + [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31), 360 + [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31), 361 + [VOP2_WIN_AFBC_PLD_OFFSET] = REG_FIELD(RK3576_CLUSTER_WIN_AFBCD_PLD_PTR_OFFSET, 0, 31), 362 + [VOP2_WIN_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_TRANSFORM_OFFSET, 0, 31), 363 + [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0), 364 + [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1), 365 + [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2), 366 + [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3), 367 + [VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff }, 368 + [VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff }, 369 + [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff }, 370 + [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff }, 371 + [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff }, 372 + [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff }, 373 + [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff }, 374 + [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff }, 375 + [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff }, 376 + [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff }, 377 + [VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff }, 378 + [VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff }, 379 + }; 380 + 381 + static const struct reg_field rk3576_vop_smart_regs[VOP2_WIN_MAX_REG] = { 382 + [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0), 383 + [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5), 384 + [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12), 385 + [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14), 386 + [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16), 387 + [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31), 388 + [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31), 389 + [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28), 390 + [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31), 391 + [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31), 392 + [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17), 393 + [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15), 394 + [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31), 395 + [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0), 396 + [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1), 397 + [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3), 398 + [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31), 399 + [VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29), 400 + [VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31), 401 + [VOP2_WIN_VP_SEL] = REG_FIELD(RK3576_SMART_PORT_SEL_IMD, 0, 1), 402 + [VOP2_WIN_DLY_NUM] = REG_FIELD(RK3576_SMART_DLY_NUM, 0, 7), 403 + [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8), 404 + [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16), 405 + [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1), 406 + 407 + /* Scale */ 408 + [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15), 409 + [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31), 410 + [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1), 411 + [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3), 412 + [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5), 413 + [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7), 414 + [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17), 415 + [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8), 416 + [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9), 417 + [VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10), 418 + [VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11), 419 + [VOP2_WIN_XMIRROR] = { .reg = 0xffffffff }, 420 + 421 + /* CBCR share the same scale factor as YRGB */ 422 + [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff }, 423 + [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff }, 424 + [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff }, 425 + [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff}, 426 + [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff}, 427 + [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff}, 428 + 429 + [VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff }, 430 + [VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff }, 431 + [VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff }, 432 + [VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff }, 433 + [VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff }, 434 + [VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff }, 435 + [VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff }, 436 + [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff }, 437 + [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff }, 438 + [VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff }, 439 + [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff }, 440 + [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff }, 441 + [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff }, 442 + [VOP2_WIN_TRANSFORM_OFFSET] = { .reg = 0xffffffff }, 443 + [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff }, 444 + [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff }, 445 + [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff }, 446 + [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff }, 447 + }; 448 + 239 449 static const struct vop2_video_port_data rk3568_vop_video_ports[] = { 240 450 { 241 451 .id = 0, ··· 597 177 .name = "Smart0-win0", 598 178 .phys_id = ROCKCHIP_VOP2_SMART0, 599 179 .base = 0x1c00, 180 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), 600 181 .formats = formats_smart, 601 182 .nformats = ARRAY_SIZE(formats_smart), 602 183 .format_modifiers = format_modifiers, 603 - .layer_sel_id = 3, 184 + /* 0xf means this layer can't attached to this VP */ 185 + .layer_sel_id = { 3, 3, 3, 0xf }, 604 186 .supported_rotations = DRM_MODE_REFLECT_Y, 605 187 .type = DRM_PLANE_TYPE_PRIMARY, 606 188 .max_upscale_factor = 8, ··· 611 189 }, { 612 190 .name = "Smart1-win0", 613 191 .phys_id = ROCKCHIP_VOP2_SMART1, 192 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), 614 193 .formats = formats_smart, 615 194 .nformats = ARRAY_SIZE(formats_smart), 616 195 .format_modifiers = format_modifiers, 617 196 .base = 0x1e00, 618 - .layer_sel_id = 7, 197 + .layer_sel_id = { 7, 7, 7, 0xf }, 619 198 .supported_rotations = DRM_MODE_REFLECT_Y, 620 199 .type = DRM_PLANE_TYPE_PRIMARY, 621 200 .max_upscale_factor = 8, ··· 625 202 }, { 626 203 .name = "Esmart1-win0", 627 204 .phys_id = ROCKCHIP_VOP2_ESMART1, 205 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), 628 206 .formats = formats_rk356x_esmart, 629 207 .nformats = ARRAY_SIZE(formats_rk356x_esmart), 630 208 .format_modifiers = format_modifiers, 631 209 .base = 0x1a00, 632 - .layer_sel_id = 6, 210 + .layer_sel_id = { 6, 6, 6, 0xf }, 633 211 .supported_rotations = DRM_MODE_REFLECT_Y, 634 212 .type = DRM_PLANE_TYPE_PRIMARY, 635 213 .max_upscale_factor = 8, ··· 639 215 }, { 640 216 .name = "Esmart0-win0", 641 217 .phys_id = ROCKCHIP_VOP2_ESMART0, 218 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), 642 219 .formats = formats_rk356x_esmart, 643 220 .nformats = ARRAY_SIZE(formats_rk356x_esmart), 644 221 .format_modifiers = format_modifiers, 645 222 .base = 0x1800, 646 - .layer_sel_id = 2, 223 + .layer_sel_id = { 2, 2, 2, 0xf }, 647 224 .supported_rotations = DRM_MODE_REFLECT_Y, 648 225 .type = DRM_PLANE_TYPE_PRIMARY, 649 226 .max_upscale_factor = 8, ··· 654 229 .name = "Cluster0-win0", 655 230 .phys_id = ROCKCHIP_VOP2_CLUSTER0, 656 231 .base = 0x1000, 232 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), 657 233 .formats = formats_cluster, 658 234 .nformats = ARRAY_SIZE(formats_cluster), 659 235 .format_modifiers = format_modifiers_afbc, 660 - .layer_sel_id = 0, 236 + .layer_sel_id = { 0, 0, 0, 0xf }, 661 237 .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | 662 238 DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, 663 239 .max_upscale_factor = 4, ··· 670 244 .name = "Cluster1-win0", 671 245 .phys_id = ROCKCHIP_VOP2_CLUSTER1, 672 246 .base = 0x1200, 247 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), 673 248 .formats = formats_cluster, 674 249 .nformats = ARRAY_SIZE(formats_cluster), 675 250 .format_modifiers = format_modifiers_afbc, 676 - .layer_sel_id = 1, 251 + .layer_sel_id = { 1, 1, 1, 0xf }, 677 252 .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | 678 253 DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, 679 254 .type = DRM_PLANE_TYPE_OVERLAY, ··· 767 340 }, 768 341 }; 769 342 343 + static const struct vop2_video_port_data rk3576_vop_video_ports[] = { 344 + { 345 + .id = 0, 346 + .feature = VOP2_VP_FEATURE_OUTPUT_10BIT, 347 + .gamma_lut_len = 1024, 348 + .cubic_lut_len = 9 * 9 * 9, /* 9x9x9 */ 349 + .max_output = { 4096, 2304 }, 350 + /* win layer_mix hdr */ 351 + .pre_scan_max_dly = { 10, 8, 2, 0 }, 352 + .offset = 0xc00, 353 + .pixel_rate = 2, 354 + }, { 355 + .id = 1, 356 + .feature = VOP2_VP_FEATURE_OUTPUT_10BIT, 357 + .gamma_lut_len = 1024, 358 + .cubic_lut_len = 729, /* 9x9x9 */ 359 + .max_output = { 2560, 1600 }, 360 + /* win layer_mix hdr */ 361 + .pre_scan_max_dly = { 10, 6, 0, 0 }, 362 + .offset = 0xd00, 363 + .pixel_rate = 1, 364 + }, { 365 + .id = 2, 366 + .gamma_lut_len = 1024, 367 + .max_output = { 1920, 1080 }, 368 + /* win layer_mix hdr */ 369 + .pre_scan_max_dly = { 10, 6, 0, 0 }, 370 + .offset = 0xe00, 371 + .pixel_rate = 1, 372 + }, 373 + }; 374 + 375 + /* 376 + * rk3576 vop with 2 cluster, 4 esmart win. 377 + * Every cluster can work as 4K win or split into two 2K win. 378 + * All win in cluster support AFBCD. 379 + * 380 + * Every esmart win support 4 Multi-region. 381 + * 382 + * VP0 can use Cluster0/1 and Esmart0/2 383 + * VP1 can use Cluster0/1 and Esmart1/3 384 + * VP2 can use Esmart0/1/2/3 385 + * 386 + * Scale filter mode: 387 + * 388 + * * Cluster: 389 + * * Support prescale down: 390 + * * H/V: gt2/avg2 or gt4/avg4 391 + * * After prescale down: 392 + * * nearest-neighbor/bilinear/multi-phase filter for scale up 393 + * * nearest-neighbor/bilinear/multi-phase filter for scale down 394 + * 395 + * * Esmart: 396 + * * Support prescale down: 397 + * * H: gt2/avg2 or gt4/avg4 398 + * * V: gt2 or gt4 399 + * * After prescale down: 400 + * * nearest-neighbor/bilinear/bicubic for scale up 401 + * * nearest-neighbor/bilinear for scale down 402 + * 403 + * AXI config:: 404 + * 405 + * * Cluster0 win0: 0xa, 0xb [AXI0] 406 + * * Cluster0 win1: 0xc, 0xd [AXI0] 407 + * * Cluster1 win0: 0x6, 0x7 [AXI0] 408 + * * Cluster1 win1: 0x8, 0x9 [AXI0] 409 + * * Esmart0: 0x10, 0x11 [AXI0] 410 + * * Esmart1: 0x12, 0x13 [AXI0] 411 + * * Esmart2: 0xa, 0xb [AXI1] 412 + * * Esmart3: 0xc, 0xd [AXI1] 413 + * * Lut dma rid: 0x1, 0x2, 0x3 [AXI0] 414 + * * DCI dma rid: 0x4 [AXI0] 415 + * * Metadata rid: 0x5 [AXI0] 416 + * 417 + * * Limit: 418 + * * (1) Cluster0/1 are fixed on AXI0 by IC design 419 + * * (2) 0x0 and 0xf can't be used; 420 + * * (3) 5 Bits ID for eache axi bus 421 + * * (3) cluster and lut/dci/metadata rid must smaller than 0xf, 422 + * * if Cluster rid is bigger than 0xf, VOP will dead at the 423 + * * system bandwidth very terrible scene. 424 + */ 425 + static const struct vop2_win_data rk3576_vop_win_data[] = { 426 + { 427 + .name = "Cluster0-win0", 428 + .phys_id = ROCKCHIP_VOP2_CLUSTER0, 429 + .base = 0x1000, 430 + .possible_vp_mask = BIT(0) | BIT(1), 431 + .formats = formats_rk3576_cluster, 432 + .nformats = ARRAY_SIZE(formats_rk3576_cluster), 433 + .format_modifiers = format_modifiers_rk3576_afbc, 434 + .layer_sel_id = { 0, 0, 0xf, 0xf }, 435 + .supported_rotations = DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, 436 + .type = DRM_PLANE_TYPE_PRIMARY, 437 + .axi_bus_id = 0, 438 + .axi_yrgb_r_id = 0xa, 439 + .axi_uv_r_id = 0xb, 440 + .max_upscale_factor = 4, 441 + .max_downscale_factor = 4, 442 + .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER, 443 + }, { 444 + .name = "Cluster1-win0", 445 + .phys_id = ROCKCHIP_VOP2_CLUSTER1, 446 + .base = 0x1200, 447 + .possible_vp_mask = BIT(0) | BIT(1), 448 + .formats = formats_rk3576_cluster, 449 + .nformats = ARRAY_SIZE(formats_rk3576_cluster), 450 + .format_modifiers = format_modifiers_rk3576_afbc, 451 + .layer_sel_id = { 1, 1, 0xf, 0xf }, 452 + .supported_rotations = DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, 453 + .type = DRM_PLANE_TYPE_PRIMARY, 454 + .axi_bus_id = 0, 455 + .axi_yrgb_r_id = 6, 456 + .axi_uv_r_id = 7, 457 + .max_upscale_factor = 4, 458 + .max_downscale_factor = 4, 459 + .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER, 460 + }, { 461 + .name = "Esmart0-win0", 462 + .phys_id = ROCKCHIP_VOP2_ESMART0, 463 + .base = 0x1800, 464 + .possible_vp_mask = BIT(0) | BIT(2), 465 + .formats = formats_rk3576_esmart, 466 + .nformats = ARRAY_SIZE(formats_rk3576_esmart), 467 + .format_modifiers = format_modifiers, 468 + .layer_sel_id = { 2, 0xf, 0, 0xf }, 469 + .supported_rotations = DRM_MODE_REFLECT_Y, 470 + .type = DRM_PLANE_TYPE_OVERLAY, 471 + .axi_bus_id = 0, 472 + .axi_yrgb_r_id = 0x10, 473 + .axi_uv_r_id = 0x11, 474 + .max_upscale_factor = 8, 475 + .max_downscale_factor = 8, 476 + }, { 477 + .name = "Esmart1-win0", 478 + .phys_id = ROCKCHIP_VOP2_ESMART1, 479 + .base = 0x1a00, 480 + .possible_vp_mask = BIT(1) | BIT(2), 481 + .formats = formats_rk3576_esmart, 482 + .nformats = ARRAY_SIZE(formats_rk3576_esmart), 483 + .format_modifiers = format_modifiers, 484 + .layer_sel_id = { 0xf, 2, 1, 0xf }, 485 + .supported_rotations = DRM_MODE_REFLECT_Y, 486 + .type = DRM_PLANE_TYPE_OVERLAY, 487 + .axi_bus_id = 0, 488 + .axi_yrgb_r_id = 0x12, 489 + .axi_uv_r_id = 0x13, 490 + .max_upscale_factor = 8, 491 + .max_downscale_factor = 8, 492 + }, { 493 + .name = "Esmart2-win0", 494 + .phys_id = ROCKCHIP_VOP2_ESMART2, 495 + .base = 0x1c00, 496 + .possible_vp_mask = BIT(0) | BIT(2), 497 + .formats = formats_rk3576_esmart, 498 + .nformats = ARRAY_SIZE(formats_rk3576_esmart), 499 + .format_modifiers = format_modifiers, 500 + .layer_sel_id = { 3, 0xf, 2, 0xf }, 501 + .supported_rotations = DRM_MODE_REFLECT_Y, 502 + .type = DRM_PLANE_TYPE_OVERLAY, 503 + .axi_bus_id = 1, 504 + .axi_yrgb_r_id = 0x0a, 505 + .axi_uv_r_id = 0x0b, 506 + .max_upscale_factor = 8, 507 + .max_downscale_factor = 8, 508 + }, { 509 + .name = "Esmart3-win0", 510 + .phys_id = ROCKCHIP_VOP2_ESMART3, 511 + .base = 0x1e00, 512 + .possible_vp_mask = BIT(1) | BIT(2), 513 + .formats = formats_rk3576_esmart, 514 + .nformats = ARRAY_SIZE(formats_rk3576_esmart), 515 + .format_modifiers = format_modifiers, 516 + .layer_sel_id = { 0xf, 3, 3, 0xf }, 517 + .supported_rotations = DRM_MODE_REFLECT_Y, 518 + .type = DRM_PLANE_TYPE_OVERLAY, 519 + .axi_bus_id = 1, 520 + .axi_yrgb_r_id = 0x0c, 521 + .axi_uv_r_id = 0x0d, 522 + .max_upscale_factor = 8, 523 + .max_downscale_factor = 8, 524 + }, 525 + }; 526 + 527 + static const struct vop2_regs_dump rk3576_regs_dump[] = { 528 + { 529 + .name = "SYS", 530 + .base = RK3568_REG_CFG_DONE, 531 + .size = 0x200, 532 + .en_reg = 0, 533 + .en_val = 0, 534 + .en_mask = 0 535 + }, { 536 + .name = "OVL_SYS", 537 + .base = RK3576_SYS_EXTRA_ALPHA_CTRL, 538 + .size = 0x50, 539 + .en_reg = 0, 540 + .en_val = 0, 541 + .en_mask = 0, 542 + }, { 543 + .name = "OVL_VP0", 544 + .base = RK3576_OVL_CTRL(0), 545 + .size = 0x80, 546 + .en_reg = 0, 547 + .en_val = 0, 548 + .en_mask = 0, 549 + }, { 550 + .name = "OVL_VP1", 551 + .base = RK3576_OVL_CTRL(1), 552 + .size = 0x80, 553 + .en_reg = 0, 554 + .en_val = 0, 555 + .en_mask = 0, 556 + }, { 557 + .name = "OVL_VP2", 558 + .base = RK3576_OVL_CTRL(2), 559 + .size = 0x80, 560 + .en_reg = 0, 561 + .en_val = 0, 562 + .en_mask = 0, 563 + }, { 564 + .name = "VP0", 565 + .base = RK3568_VP0_CTRL_BASE, 566 + .size = 0x100, 567 + .en_reg = RK3568_VP_DSP_CTRL, 568 + .en_val = 0, 569 + .en_mask = RK3568_VP_DSP_CTRL__STANDBY, 570 + }, { 571 + .name = "VP1", 572 + .base = RK3568_VP1_CTRL_BASE, 573 + .size = 0x100, 574 + .en_reg = RK3568_VP_DSP_CTRL, 575 + .en_val = 0, 576 + .en_mask = RK3568_VP_DSP_CTRL__STANDBY, 577 + }, { 578 + .name = "VP2", 579 + .base = RK3568_VP2_CTRL_BASE, 580 + .size = 0x100, 581 + .en_reg = RK3568_VP_DSP_CTRL, 582 + .en_val = 0, 583 + .en_mask = RK3568_VP_DSP_CTRL__STANDBY, 584 + }, { 585 + .name = "Cluster0", 586 + .base = RK3568_CLUSTER0_CTRL_BASE, 587 + .size = 0x200, 588 + .en_reg = RK3568_CLUSTER_WIN_CTRL0, 589 + .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN, 590 + .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN, 591 + }, { 592 + .name = "Cluster1", 593 + .base = RK3568_CLUSTER1_CTRL_BASE, 594 + .size = 0x200, 595 + .en_reg = RK3568_CLUSTER_WIN_CTRL0, 596 + .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN, 597 + .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN, 598 + }, { 599 + .name = "Esmart0", 600 + .base = RK3568_ESMART0_CTRL_BASE, 601 + .size = 0xf0, 602 + .en_reg = RK3568_SMART_REGION0_CTRL, 603 + .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN, 604 + .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN, 605 + }, { 606 + .name = "Esmart1", 607 + .base = RK3568_ESMART1_CTRL_BASE, 608 + .size = 0xf0, 609 + .en_reg = RK3568_SMART_REGION0_CTRL, 610 + .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN, 611 + .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN, 612 + }, { 613 + .name = "Esmart2", 614 + .base = RK3588_ESMART2_CTRL_BASE, 615 + .size = 0xf0, 616 + .en_reg = RK3568_SMART_REGION0_CTRL, 617 + .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN, 618 + .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN, 619 + }, { 620 + .name = "Esmart3", 621 + .base = RK3588_ESMART3_CTRL_BASE, 622 + .size = 0xf0, 623 + .en_reg = RK3568_SMART_REGION0_CTRL, 624 + .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN, 625 + .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN, 626 + }, 627 + }; 628 + 770 629 static const struct vop2_video_port_data rk3588_vop_video_ports[] = { 771 630 { 772 631 .id = 0, ··· 1122 409 .name = "Cluster0-win0", 1123 410 .phys_id = ROCKCHIP_VOP2_CLUSTER0, 1124 411 .base = 0x1000, 412 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), 1125 413 .formats = formats_cluster, 1126 414 .nformats = ARRAY_SIZE(formats_cluster), 1127 415 .format_modifiers = format_modifiers_afbc, 1128 - .layer_sel_id = 0, 416 + .layer_sel_id = { 0, 0, 0, 0 }, 1129 417 .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | 1130 418 DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, 1131 419 .axi_bus_id = 0, ··· 1141 427 .name = "Cluster1-win0", 1142 428 .phys_id = ROCKCHIP_VOP2_CLUSTER1, 1143 429 .base = 0x1200, 430 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), 1144 431 .formats = formats_cluster, 1145 432 .nformats = ARRAY_SIZE(formats_cluster), 1146 433 .format_modifiers = format_modifiers_afbc, 1147 - .layer_sel_id = 1, 434 + .layer_sel_id = { 1, 1, 1, 1 }, 1148 435 .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | 1149 436 DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, 1150 437 .type = DRM_PLANE_TYPE_PRIMARY, ··· 1160 445 .name = "Cluster2-win0", 1161 446 .phys_id = ROCKCHIP_VOP2_CLUSTER2, 1162 447 .base = 0x1400, 448 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), 1163 449 .formats = formats_cluster, 1164 450 .nformats = ARRAY_SIZE(formats_cluster), 1165 451 .format_modifiers = format_modifiers_afbc, 1166 - .layer_sel_id = 4, 452 + .layer_sel_id = { 4, 4, 4, 4 }, 1167 453 .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | 1168 454 DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, 1169 455 .type = DRM_PLANE_TYPE_PRIMARY, ··· 1179 463 .name = "Cluster3-win0", 1180 464 .phys_id = ROCKCHIP_VOP2_CLUSTER3, 1181 465 .base = 0x1600, 466 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), 1182 467 .formats = formats_cluster, 1183 468 .nformats = ARRAY_SIZE(formats_cluster), 1184 469 .format_modifiers = format_modifiers_afbc, 1185 - .layer_sel_id = 5, 470 + .layer_sel_id = { 5, 5, 5, 5 }, 1186 471 .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | 1187 472 DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, 1188 473 .type = DRM_PLANE_TYPE_PRIMARY, ··· 1197 480 }, { 1198 481 .name = "Esmart0-win0", 1199 482 .phys_id = ROCKCHIP_VOP2_ESMART0, 483 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), 1200 484 .formats = formats_esmart, 1201 485 .nformats = ARRAY_SIZE(formats_esmart), 1202 486 .format_modifiers = format_modifiers, 1203 487 .base = 0x1800, 1204 - .layer_sel_id = 2, 488 + .layer_sel_id = { 2, 2, 2, 2 }, 1205 489 .supported_rotations = DRM_MODE_REFLECT_Y, 1206 490 .type = DRM_PLANE_TYPE_OVERLAY, 1207 491 .axi_bus_id = 0, ··· 1214 496 }, { 1215 497 .name = "Esmart1-win0", 1216 498 .phys_id = ROCKCHIP_VOP2_ESMART1, 499 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), 1217 500 .formats = formats_esmart, 1218 501 .nformats = ARRAY_SIZE(formats_esmart), 1219 502 .format_modifiers = format_modifiers, 1220 503 .base = 0x1a00, 1221 - .layer_sel_id = 3, 504 + .layer_sel_id = { 3, 3, 3, 3 }, 1222 505 .supported_rotations = DRM_MODE_REFLECT_Y, 1223 506 .type = DRM_PLANE_TYPE_OVERLAY, 1224 507 .axi_bus_id = 0, ··· 1232 513 .name = "Esmart2-win0", 1233 514 .phys_id = ROCKCHIP_VOP2_ESMART2, 1234 515 .base = 0x1c00, 516 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), 1235 517 .formats = formats_esmart, 1236 518 .nformats = ARRAY_SIZE(formats_esmart), 1237 519 .format_modifiers = format_modifiers, 1238 - .layer_sel_id = 6, 520 + .layer_sel_id = { 6, 6, 6, 6 }, 1239 521 .supported_rotations = DRM_MODE_REFLECT_Y, 1240 522 .type = DRM_PLANE_TYPE_OVERLAY, 1241 523 .axi_bus_id = 1, ··· 1248 528 }, { 1249 529 .name = "Esmart3-win0", 1250 530 .phys_id = ROCKCHIP_VOP2_ESMART3, 531 + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), 1251 532 .formats = formats_esmart, 1252 533 .nformats = ARRAY_SIZE(formats_esmart), 1253 534 .format_modifiers = format_modifiers, 1254 535 .base = 0x1e00, 1255 - .layer_sel_id = 7, 536 + .layer_sel_id = { 7, 7, 7, 7 }, 1256 537 .supported_rotations = DRM_MODE_REFLECT_Y, 1257 538 .type = DRM_PLANE_TYPE_OVERLAY, 1258 539 .axi_bus_id = 1, ··· 1368 647 }, 1369 648 }; 1370 649 650 + static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) 651 + { 652 + struct vop2 *vop2 = vp->vop2; 653 + struct drm_crtc *crtc = &vp->crtc; 654 + u32 die, dip; 655 + 656 + die = vop2_readl(vop2, RK3568_DSP_IF_EN); 657 + dip = vop2_readl(vop2, RK3568_DSP_IF_POL); 658 + 659 + switch (id) { 660 + case ROCKCHIP_VOP2_EP_RGB0: 661 + die &= ~RK3568_SYS_DSP_INFACE_EN_RGB_MUX; 662 + die |= RK3568_SYS_DSP_INFACE_EN_RGB | 663 + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_RGB_MUX, vp->id); 664 + dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL; 665 + dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags); 666 + if (polflags & POLFLAG_DCLK_INV) 667 + regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3)); 668 + else 669 + regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16)); 670 + break; 671 + case ROCKCHIP_VOP2_EP_HDMI0: 672 + die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX; 673 + die |= RK3568_SYS_DSP_INFACE_EN_HDMI | 674 + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id); 675 + dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL; 676 + dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags); 677 + break; 678 + case ROCKCHIP_VOP2_EP_EDP0: 679 + die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX; 680 + die |= RK3568_SYS_DSP_INFACE_EN_EDP | 681 + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id); 682 + dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL; 683 + dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags); 684 + break; 685 + case ROCKCHIP_VOP2_EP_MIPI0: 686 + die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX; 687 + die |= RK3568_SYS_DSP_INFACE_EN_MIPI0 | 688 + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX, vp->id); 689 + dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL; 690 + dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags); 691 + break; 692 + case ROCKCHIP_VOP2_EP_MIPI1: 693 + die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX; 694 + die |= RK3568_SYS_DSP_INFACE_EN_MIPI1 | 695 + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id); 696 + dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL; 697 + dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags); 698 + break; 699 + case ROCKCHIP_VOP2_EP_LVDS0: 700 + die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX; 701 + die |= RK3568_SYS_DSP_INFACE_EN_LVDS0 | 702 + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX, vp->id); 703 + dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL; 704 + dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags); 705 + break; 706 + case ROCKCHIP_VOP2_EP_LVDS1: 707 + die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX; 708 + die |= RK3568_SYS_DSP_INFACE_EN_LVDS1 | 709 + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX, vp->id); 710 + dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL; 711 + dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags); 712 + break; 713 + default: 714 + drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id); 715 + return 0; 716 + } 717 + 718 + dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD; 719 + 720 + vop2_writel(vop2, RK3568_DSP_IF_EN, die); 721 + vop2_writel(vop2, RK3568_DSP_IF_POL, dip); 722 + 723 + return crtc->state->adjusted_mode.crtc_clock * 1000LL; 724 + } 725 + 726 + static unsigned long rk3576_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) 727 + { 728 + struct vop2 *vop2 = vp->vop2; 729 + struct drm_crtc *crtc = &vp->crtc; 730 + struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; 731 + struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state); 732 + u8 port_pix_rate = vp->data->pixel_rate; 733 + int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_sel; 734 + u32 ctrl, vp_clk_div, reg, dclk_div; 735 + unsigned long dclk_in_rate, dclk_core_rate; 736 + 737 + if (vcstate->output_mode == ROCKCHIP_OUT_MODE_YUV420 || adjusted_mode->crtc_clock > 600000) 738 + dclk_div = 2; 739 + else 740 + dclk_div = 1; 741 + 742 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 743 + dclk_core_rate = adjusted_mode->crtc_clock / 2; 744 + else 745 + dclk_core_rate = adjusted_mode->crtc_clock / port_pix_rate; 746 + 747 + dclk_in_rate = adjusted_mode->crtc_clock / dclk_div; 748 + 749 + dclk_core_div = dclk_in_rate > dclk_core_rate ? 1 : 0; 750 + 751 + if (vop2_output_if_is_edp(id)) 752 + if_pixclk_div = port_pix_rate == 2 ? RK3576_DSP_IF_PCLK_DIV : 0; 753 + else 754 + if_pixclk_div = port_pix_rate == 1 ? RK3576_DSP_IF_PCLK_DIV : 0; 755 + 756 + if (vcstate->output_mode == ROCKCHIP_OUT_MODE_YUV420) { 757 + if_dclk_sel = RK3576_DSP_IF_DCLK_SEL_OUT; 758 + dclk_out_div = 1; 759 + } else { 760 + if_dclk_sel = 0; 761 + dclk_out_div = 0; 762 + } 763 + 764 + switch (id) { 765 + case ROCKCHIP_VOP2_EP_HDMI0: 766 + reg = RK3576_HDMI0_IF_CTRL; 767 + break; 768 + case ROCKCHIP_VOP2_EP_EDP0: 769 + reg = RK3576_EDP0_IF_CTRL; 770 + break; 771 + case ROCKCHIP_VOP2_EP_MIPI0: 772 + reg = RK3576_MIPI0_IF_CTRL; 773 + break; 774 + case ROCKCHIP_VOP2_EP_DP0: 775 + reg = RK3576_DP0_IF_CTRL; 776 + break; 777 + case ROCKCHIP_VOP2_EP_DP1: 778 + reg = RK3576_DP1_IF_CTRL; 779 + break; 780 + default: 781 + drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id); 782 + return 0; 783 + } 784 + 785 + ctrl = vop2_readl(vop2, reg); 786 + ctrl &= ~RK3576_DSP_IF_DCLK_SEL_OUT; 787 + ctrl &= ~RK3576_DSP_IF_PCLK_DIV; 788 + ctrl &= ~RK3576_DSP_IF_MUX; 789 + ctrl |= RK3576_DSP_IF_CFG_DONE_IMD; 790 + ctrl |= if_dclk_sel | if_pixclk_div; 791 + ctrl |= RK3576_DSP_IF_CLK_OUT_EN | RK3576_DSP_IF_EN; 792 + ctrl |= FIELD_PREP(RK3576_DSP_IF_MUX, vp->id); 793 + ctrl |= FIELD_PREP(RK3576_DSP_IF_PIN_POL, polflags); 794 + vop2_writel(vop2, reg, ctrl); 795 + 796 + vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div); 797 + vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div); 798 + 799 + vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div); 800 + 801 + return dclk_in_rate * 1000LL; 802 + } 803 + 804 + /* 805 + * calc the dclk on rk3588 806 + * the available div of dclk is 1, 2, 4 807 + */ 808 + static unsigned long rk3588_calc_dclk(unsigned long child_clk, unsigned long max_dclk) 809 + { 810 + if (child_clk * 4 <= max_dclk) 811 + return child_clk * 4; 812 + else if (child_clk * 2 <= max_dclk) 813 + return child_clk * 2; 814 + else if (child_clk <= max_dclk) 815 + return child_clk; 816 + else 817 + return 0; 818 + } 819 + 820 + /* 821 + * 4 pixclk/cycle on rk3588 822 + * RGB/eDP/HDMI: if_pixclk >= dclk_core 823 + * DP: dp_pixclk = dclk_out <= dclk_core 824 + * DSI: mipi_pixclk <= dclk_out <= dclk_core 825 + */ 826 + static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id, 827 + int *dclk_core_div, int *dclk_out_div, 828 + int *if_pixclk_div, int *if_dclk_div) 829 + { 830 + struct vop2 *vop2 = vp->vop2; 831 + struct drm_crtc *crtc = &vp->crtc; 832 + struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; 833 + struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state); 834 + int output_mode = vcstate->output_mode; 835 + unsigned long v_pixclk = adjusted_mode->crtc_clock * 1000LL; /* video timing pixclk */ 836 + unsigned long dclk_core_rate = v_pixclk >> 2; 837 + unsigned long dclk_rate = v_pixclk; 838 + unsigned long dclk_out_rate; 839 + unsigned long if_pixclk_rate; 840 + int K = 1; 841 + 842 + if (vop2_output_if_is_hdmi(id)) { 843 + /* 844 + * K = 2: dclk_core = if_pixclk_rate > if_dclk_rate 845 + * K = 1: dclk_core = hdmie_edp_dclk > if_pixclk_rate 846 + */ 847 + if (output_mode == ROCKCHIP_OUT_MODE_YUV420) { 848 + dclk_rate = dclk_rate >> 1; 849 + K = 2; 850 + } 851 + 852 + /* 853 + * if_pixclk_rate = (dclk_core_rate << 1) / K; 854 + * if_dclk_rate = dclk_core_rate / K; 855 + * *if_pixclk_div = dclk_rate / if_pixclk_rate; 856 + * *if_dclk_div = dclk_rate / if_dclk_rate; 857 + */ 858 + *if_pixclk_div = 2; 859 + *if_dclk_div = 4; 860 + } else if (vop2_output_if_is_edp(id)) { 861 + /* 862 + * edp_pixclk = edp_dclk > dclk_core 863 + */ 864 + if_pixclk_rate = v_pixclk / K; 865 + dclk_rate = if_pixclk_rate * K; 866 + /* 867 + * *if_pixclk_div = dclk_rate / if_pixclk_rate; 868 + * *if_dclk_div = *if_pixclk_div; 869 + */ 870 + *if_pixclk_div = K; 871 + *if_dclk_div = K; 872 + } else if (vop2_output_if_is_dp(id)) { 873 + if (output_mode == ROCKCHIP_OUT_MODE_YUV420) 874 + dclk_out_rate = v_pixclk >> 3; 875 + else 876 + dclk_out_rate = v_pixclk >> 2; 877 + 878 + dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000); 879 + if (!dclk_rate) { 880 + drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n", 881 + dclk_out_rate); 882 + return 0; 883 + } 884 + *dclk_out_div = dclk_rate / dclk_out_rate; 885 + } else if (vop2_output_if_is_mipi(id)) { 886 + if_pixclk_rate = dclk_core_rate / K; 887 + /* 888 + * dclk_core = dclk_out * K = if_pixclk * K = v_pixclk / 4 889 + */ 890 + dclk_out_rate = if_pixclk_rate; 891 + /* 892 + * dclk_rate = N * dclk_core_rate N = (1,2,4 ), 893 + * we get a little factor here 894 + */ 895 + dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000); 896 + if (!dclk_rate) { 897 + drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n", 898 + dclk_out_rate); 899 + return 0; 900 + } 901 + *dclk_out_div = dclk_rate / dclk_out_rate; 902 + /* 903 + * mipi pixclk == dclk_out 904 + */ 905 + *if_pixclk_div = 1; 906 + } else if (vop2_output_if_is_dpi(id)) { 907 + dclk_rate = v_pixclk; 908 + } 909 + 910 + *dclk_core_div = dclk_rate / dclk_core_rate; 911 + *if_pixclk_div = ilog2(*if_pixclk_div); 912 + *if_dclk_div = ilog2(*if_dclk_div); 913 + *dclk_core_div = ilog2(*dclk_core_div); 914 + *dclk_out_div = ilog2(*dclk_out_div); 915 + 916 + drm_dbg(vop2->drm, "dclk: %ld, pixclk_div: %d, dclk_div: %d\n", 917 + dclk_rate, *if_pixclk_div, *if_dclk_div); 918 + 919 + return dclk_rate; 920 + } 921 + 922 + /* 923 + * MIPI port mux on rk3588: 924 + * 0: Video Port2 925 + * 1: Video Port3 926 + * 3: Video Port 1(MIPI1 only) 927 + */ 928 + static u32 rk3588_get_mipi_port_mux(int vp_id) 929 + { 930 + if (vp_id == 1) 931 + return 3; 932 + else if (vp_id == 3) 933 + return 1; 934 + else 935 + return 0; 936 + } 937 + 938 + static u32 rk3588_get_hdmi_pol(u32 flags) 939 + { 940 + u32 val; 941 + 942 + val = (flags & DRM_MODE_FLAG_NHSYNC) ? BIT(HSYNC_POSITIVE) : 0; 943 + val |= (flags & DRM_MODE_FLAG_NVSYNC) ? BIT(VSYNC_POSITIVE) : 0; 944 + 945 + return val; 946 + } 947 + 948 + static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) 949 + { 950 + struct vop2 *vop2 = vp->vop2; 951 + int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_div; 952 + unsigned long clock; 953 + u32 die, dip, div, vp_clk_div, val; 954 + 955 + clock = rk3588_calc_cru_cfg(vp, id, &dclk_core_div, &dclk_out_div, 956 + &if_pixclk_div, &if_dclk_div); 957 + if (!clock) 958 + return 0; 959 + 960 + vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div); 961 + vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div); 962 + 963 + die = vop2_readl(vop2, RK3568_DSP_IF_EN); 964 + dip = vop2_readl(vop2, RK3568_DSP_IF_POL); 965 + div = vop2_readl(vop2, RK3568_DSP_IF_CTRL); 966 + 967 + switch (id) { 968 + case ROCKCHIP_VOP2_EP_HDMI0: 969 + div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV; 970 + div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV; 971 + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div); 972 + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div); 973 + die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX; 974 + die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 | 975 + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id); 976 + val = rk3588_get_hdmi_pol(polflags); 977 + regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1)); 978 + regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5)); 979 + break; 980 + case ROCKCHIP_VOP2_EP_HDMI1: 981 + div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV; 982 + div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV; 983 + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV, if_dclk_div); 984 + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV, if_pixclk_div); 985 + die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX; 986 + die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 | 987 + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id); 988 + val = rk3588_get_hdmi_pol(polflags); 989 + regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4)); 990 + regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7)); 991 + break; 992 + case ROCKCHIP_VOP2_EP_EDP0: 993 + div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV; 994 + div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV; 995 + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div); 996 + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div); 997 + die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX; 998 + die |= RK3588_SYS_DSP_INFACE_EN_EDP0 | 999 + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id); 1000 + regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0)); 1001 + break; 1002 + case ROCKCHIP_VOP2_EP_EDP1: 1003 + div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV; 1004 + div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV; 1005 + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div); 1006 + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div); 1007 + die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX; 1008 + die |= RK3588_SYS_DSP_INFACE_EN_EDP1 | 1009 + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id); 1010 + regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3)); 1011 + break; 1012 + case ROCKCHIP_VOP2_EP_MIPI0: 1013 + div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV; 1014 + div |= FIELD_PREP(RK3588_DSP_IF_MIPI0_PCLK_DIV, if_pixclk_div); 1015 + die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX; 1016 + val = rk3588_get_mipi_port_mux(vp->id); 1017 + die |= RK3588_SYS_DSP_INFACE_EN_MIPI0 | 1018 + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX, !!val); 1019 + break; 1020 + case ROCKCHIP_VOP2_EP_MIPI1: 1021 + div &= ~RK3588_DSP_IF_MIPI1_PCLK_DIV; 1022 + div |= FIELD_PREP(RK3588_DSP_IF_MIPI1_PCLK_DIV, if_pixclk_div); 1023 + die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX; 1024 + val = rk3588_get_mipi_port_mux(vp->id); 1025 + die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 | 1026 + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, val); 1027 + break; 1028 + case ROCKCHIP_VOP2_EP_DP0: 1029 + die &= ~RK3588_SYS_DSP_INFACE_EN_DP0_MUX; 1030 + die |= RK3588_SYS_DSP_INFACE_EN_DP0 | 1031 + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP0_MUX, vp->id); 1032 + dip &= ~RK3588_DSP_IF_POL__DP0_PIN_POL; 1033 + dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags); 1034 + break; 1035 + case ROCKCHIP_VOP2_EP_DP1: 1036 + die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX; 1037 + die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 | 1038 + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id); 1039 + dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL; 1040 + dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags); 1041 + break; 1042 + default: 1043 + drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id); 1044 + return 0; 1045 + } 1046 + 1047 + dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD; 1048 + 1049 + vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div); 1050 + vop2_writel(vop2, RK3568_DSP_IF_EN, die); 1051 + vop2_writel(vop2, RK3568_DSP_IF_CTRL, div); 1052 + vop2_writel(vop2, RK3568_DSP_IF_POL, dip); 1053 + 1054 + return clock; 1055 + } 1056 + 1057 + static bool is_opaque(u16 alpha) 1058 + { 1059 + return (alpha >> 8) == 0xff; 1060 + } 1061 + 1062 + static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config, 1063 + struct vop2_alpha *alpha) 1064 + { 1065 + int src_glb_alpha_en = is_opaque(alpha_config->src_glb_alpha_value) ? 0 : 1; 1066 + int dst_glb_alpha_en = is_opaque(alpha_config->dst_glb_alpha_value) ? 0 : 1; 1067 + int src_color_mode = alpha_config->src_premulti_en ? 1068 + ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL; 1069 + int dst_color_mode = alpha_config->dst_premulti_en ? 1070 + ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL; 1071 + 1072 + alpha->src_color_ctrl.val = 0; 1073 + alpha->dst_color_ctrl.val = 0; 1074 + alpha->src_alpha_ctrl.val = 0; 1075 + alpha->dst_alpha_ctrl.val = 0; 1076 + 1077 + if (!alpha_config->src_pixel_alpha_en) 1078 + alpha->src_color_ctrl.bits.blend_mode = ALPHA_GLOBAL; 1079 + else if (alpha_config->src_pixel_alpha_en && !src_glb_alpha_en) 1080 + alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX; 1081 + else 1082 + alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL; 1083 + 1084 + alpha->src_color_ctrl.bits.alpha_en = 1; 1085 + 1086 + if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_GLOBAL) { 1087 + alpha->src_color_ctrl.bits.color_mode = src_color_mode; 1088 + alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL; 1089 + } else if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_PER_PIX) { 1090 + alpha->src_color_ctrl.bits.color_mode = src_color_mode; 1091 + alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_ONE; 1092 + } else { 1093 + alpha->src_color_ctrl.bits.color_mode = ALPHA_SRC_PRE_MUL; 1094 + alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL; 1095 + } 1096 + alpha->src_color_ctrl.bits.glb_alpha = alpha_config->src_glb_alpha_value >> 8; 1097 + alpha->src_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; 1098 + alpha->src_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION; 1099 + 1100 + alpha->dst_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; 1101 + alpha->dst_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION; 1102 + alpha->dst_color_ctrl.bits.blend_mode = ALPHA_GLOBAL; 1103 + alpha->dst_color_ctrl.bits.glb_alpha = alpha_config->dst_glb_alpha_value >> 8; 1104 + alpha->dst_color_ctrl.bits.color_mode = dst_color_mode; 1105 + alpha->dst_color_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE; 1106 + 1107 + alpha->src_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; 1108 + alpha->src_alpha_ctrl.bits.blend_mode = alpha->src_color_ctrl.bits.blend_mode; 1109 + alpha->src_alpha_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION; 1110 + alpha->src_alpha_ctrl.bits.factor_mode = ALPHA_ONE; 1111 + 1112 + alpha->dst_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; 1113 + if (alpha_config->dst_pixel_alpha_en && !dst_glb_alpha_en) 1114 + alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX; 1115 + else 1116 + alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL; 1117 + alpha->dst_alpha_ctrl.bits.alpha_cal_mode = ALPHA_NO_SATURATION; 1118 + alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE; 1119 + } 1120 + 1121 + static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id) 1122 + { 1123 + struct vop2_video_port *vp; 1124 + int used_layer = 0; 1125 + int i; 1126 + 1127 + for (i = 0; i < port_id; i++) { 1128 + vp = &vop2->vps[i]; 1129 + used_layer += hweight32(vp->win_mask); 1130 + } 1131 + 1132 + return used_layer; 1133 + } 1134 + 1135 + static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win) 1136 + { 1137 + struct vop2_alpha_config alpha_config; 1138 + struct vop2_alpha alpha; 1139 + struct drm_plane_state *bottom_win_pstate; 1140 + bool src_pixel_alpha_en = false; 1141 + u16 src_glb_alpha_val, dst_glb_alpha_val; 1142 + u32 src_color_ctrl_reg, dst_color_ctrl_reg, src_alpha_ctrl_reg, dst_alpha_ctrl_reg; 1143 + u32 offset = 0; 1144 + bool premulti_en = false; 1145 + bool swap = false; 1146 + 1147 + /* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */ 1148 + bottom_win_pstate = main_win->base.state; 1149 + src_glb_alpha_val = 0; 1150 + dst_glb_alpha_val = main_win->base.state->alpha; 1151 + 1152 + if (!bottom_win_pstate->fb) 1153 + return; 1154 + 1155 + alpha_config.src_premulti_en = premulti_en; 1156 + alpha_config.dst_premulti_en = false; 1157 + alpha_config.src_pixel_alpha_en = src_pixel_alpha_en; 1158 + alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */ 1159 + alpha_config.src_glb_alpha_value = src_glb_alpha_val; 1160 + alpha_config.dst_glb_alpha_value = dst_glb_alpha_val; 1161 + vop2_parse_alpha(&alpha_config, &alpha); 1162 + 1163 + alpha.src_color_ctrl.bits.src_dst_swap = swap; 1164 + 1165 + switch (main_win->data->phys_id) { 1166 + case ROCKCHIP_VOP2_CLUSTER0: 1167 + offset = 0x0; 1168 + break; 1169 + case ROCKCHIP_VOP2_CLUSTER1: 1170 + offset = 0x10; 1171 + break; 1172 + case ROCKCHIP_VOP2_CLUSTER2: 1173 + offset = 0x20; 1174 + break; 1175 + case ROCKCHIP_VOP2_CLUSTER3: 1176 + offset = 0x30; 1177 + break; 1178 + } 1179 + 1180 + if (vop2->version <= VOP_VERSION_RK3588) { 1181 + src_color_ctrl_reg = RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL; 1182 + dst_color_ctrl_reg = RK3568_CLUSTER0_MIX_DST_COLOR_CTRL; 1183 + src_alpha_ctrl_reg = RK3568_CLUSTER0_MIX_SRC_ALPHA_CTRL; 1184 + dst_alpha_ctrl_reg = RK3568_CLUSTER0_MIX_DST_ALPHA_CTRL; 1185 + } else { 1186 + src_color_ctrl_reg = RK3576_CLUSTER0_MIX_SRC_COLOR_CTRL; 1187 + dst_color_ctrl_reg = RK3576_CLUSTER0_MIX_DST_COLOR_CTRL; 1188 + src_alpha_ctrl_reg = RK3576_CLUSTER0_MIX_SRC_ALPHA_CTRL; 1189 + dst_alpha_ctrl_reg = RK3576_CLUSTER0_MIX_DST_ALPHA_CTRL; 1190 + } 1191 + 1192 + vop2_writel(vop2, src_color_ctrl_reg + offset, alpha.src_color_ctrl.val); 1193 + vop2_writel(vop2, dst_color_ctrl_reg + offset, alpha.dst_color_ctrl.val); 1194 + vop2_writel(vop2, src_alpha_ctrl_reg + offset, alpha.src_alpha_ctrl.val); 1195 + vop2_writel(vop2, dst_alpha_ctrl_reg + offset, alpha.dst_alpha_ctrl.val); 1196 + } 1197 + 1198 + static void vop2_setup_alpha(struct vop2_video_port *vp) 1199 + { 1200 + struct vop2 *vop2 = vp->vop2; 1201 + struct drm_framebuffer *fb; 1202 + struct vop2_alpha_config alpha_config; 1203 + struct vop2_alpha alpha; 1204 + struct drm_plane *plane; 1205 + int pixel_alpha_en; 1206 + int premulti_en, gpremulti_en = 0; 1207 + int mixer_id; 1208 + u32 src_color_ctrl_reg, dst_color_ctrl_reg, src_alpha_ctrl_reg, dst_alpha_ctrl_reg; 1209 + u32 offset; 1210 + bool bottom_layer_alpha_en = false; 1211 + u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE; 1212 + 1213 + if (vop2->version <= VOP_VERSION_RK3588) 1214 + mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id); 1215 + else 1216 + mixer_id = 0; 1217 + 1218 + alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */ 1219 + 1220 + drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { 1221 + struct vop2_win *win = to_vop2_win(plane); 1222 + 1223 + if (plane->state->normalized_zpos == 0 && 1224 + !is_opaque(plane->state->alpha) && 1225 + !vop2_cluster_window(win)) { 1226 + /* 1227 + * If bottom layer have global alpha effect [except cluster layer, 1228 + * because cluster have deal with bottom layer global alpha value 1229 + * at cluster mix], bottom layer mix need deal with global alpha. 1230 + */ 1231 + bottom_layer_alpha_en = true; 1232 + dst_global_alpha = plane->state->alpha; 1233 + } 1234 + } 1235 + 1236 + if (vop2->version <= VOP_VERSION_RK3588) { 1237 + src_color_ctrl_reg = RK3568_MIX0_SRC_COLOR_CTRL; 1238 + dst_color_ctrl_reg = RK3568_MIX0_DST_COLOR_CTRL; 1239 + src_alpha_ctrl_reg = RK3568_MIX0_SRC_ALPHA_CTRL; 1240 + dst_alpha_ctrl_reg = RK3568_MIX0_DST_ALPHA_CTRL; 1241 + } else { 1242 + src_color_ctrl_reg = RK3576_OVL_MIX0_SRC_COLOR_CTRL(vp->id); 1243 + dst_color_ctrl_reg = RK3576_OVL_MIX0_DST_COLOR_CTRL(vp->id); 1244 + src_alpha_ctrl_reg = RK3576_OVL_MIX0_SRC_ALPHA_CTRL(vp->id); 1245 + dst_alpha_ctrl_reg = RK3576_OVL_MIX0_DST_ALPHA_CTRL(vp->id); 1246 + } 1247 + 1248 + drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { 1249 + struct vop2_win *win = to_vop2_win(plane); 1250 + int zpos = plane->state->normalized_zpos; 1251 + 1252 + /* 1253 + * Need to configure alpha from second layer. 1254 + */ 1255 + if (zpos == 0) 1256 + continue; 1257 + 1258 + if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) 1259 + premulti_en = 1; 1260 + else 1261 + premulti_en = 0; 1262 + 1263 + plane = &win->base; 1264 + fb = plane->state->fb; 1265 + 1266 + pixel_alpha_en = fb->format->has_alpha; 1267 + 1268 + alpha_config.src_premulti_en = premulti_en; 1269 + 1270 + if (bottom_layer_alpha_en && zpos == 1) { 1271 + gpremulti_en = premulti_en; 1272 + /* Cd = Cs + (1 - As) * Cd * Agd */ 1273 + alpha_config.dst_premulti_en = false; 1274 + alpha_config.src_pixel_alpha_en = pixel_alpha_en; 1275 + alpha_config.src_glb_alpha_value = plane->state->alpha; 1276 + alpha_config.dst_glb_alpha_value = dst_global_alpha; 1277 + } else if (vop2_cluster_window(win)) { 1278 + /* Mix output data only have pixel alpha */ 1279 + alpha_config.dst_premulti_en = true; 1280 + alpha_config.src_pixel_alpha_en = true; 1281 + alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; 1282 + alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; 1283 + } else { 1284 + /* Cd = Cs + (1 - As) * Cd */ 1285 + alpha_config.dst_premulti_en = true; 1286 + alpha_config.src_pixel_alpha_en = pixel_alpha_en; 1287 + alpha_config.src_glb_alpha_value = plane->state->alpha; 1288 + alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; 1289 + } 1290 + 1291 + vop2_parse_alpha(&alpha_config, &alpha); 1292 + 1293 + offset = (mixer_id + zpos - 1) * 0x10; 1294 + 1295 + vop2_writel(vop2, src_color_ctrl_reg + offset, alpha.src_color_ctrl.val); 1296 + vop2_writel(vop2, dst_color_ctrl_reg + offset, alpha.dst_color_ctrl.val); 1297 + vop2_writel(vop2, src_alpha_ctrl_reg + offset, alpha.src_alpha_ctrl.val); 1298 + vop2_writel(vop2, dst_alpha_ctrl_reg + offset, alpha.dst_alpha_ctrl.val); 1299 + } 1300 + 1301 + if (vp->id == 0) { 1302 + if (vop2->version <= VOP_VERSION_RK3588) { 1303 + src_color_ctrl_reg = RK3568_HDR0_SRC_COLOR_CTRL; 1304 + dst_color_ctrl_reg = RK3568_HDR0_DST_COLOR_CTRL; 1305 + src_alpha_ctrl_reg = RK3568_HDR0_SRC_ALPHA_CTRL; 1306 + dst_alpha_ctrl_reg = RK3568_HDR0_DST_ALPHA_CTRL; 1307 + } else { 1308 + src_color_ctrl_reg = RK3576_OVL_HDR_SRC_COLOR_CTRL(vp->id); 1309 + dst_color_ctrl_reg = RK3576_OVL_HDR_DST_COLOR_CTRL(vp->id); 1310 + src_alpha_ctrl_reg = RK3576_OVL_HDR_SRC_ALPHA_CTRL(vp->id); 1311 + dst_alpha_ctrl_reg = RK3576_OVL_HDR_DST_ALPHA_CTRL(vp->id); 1312 + } 1313 + 1314 + if (bottom_layer_alpha_en) { 1315 + /* Transfer pixel alpha to hdr mix */ 1316 + alpha_config.src_premulti_en = gpremulti_en; 1317 + alpha_config.dst_premulti_en = true; 1318 + alpha_config.src_pixel_alpha_en = true; 1319 + alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; 1320 + alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; 1321 + 1322 + vop2_parse_alpha(&alpha_config, &alpha); 1323 + 1324 + vop2_writel(vop2, src_color_ctrl_reg, alpha.src_color_ctrl.val); 1325 + vop2_writel(vop2, dst_color_ctrl_reg, alpha.dst_color_ctrl.val); 1326 + vop2_writel(vop2, src_alpha_ctrl_reg, alpha.src_alpha_ctrl.val); 1327 + vop2_writel(vop2, dst_alpha_ctrl_reg, alpha.dst_alpha_ctrl.val); 1328 + } else { 1329 + vop2_writel(vop2, src_color_ctrl_reg, 0); 1330 + } 1331 + } 1332 + } 1333 + 1334 + static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp) 1335 + { 1336 + struct vop2 *vop2 = vp->vop2; 1337 + struct drm_plane *plane; 1338 + u32 layer_sel = 0; 1339 + u32 port_sel; 1340 + u8 layer_id; 1341 + u8 old_layer_id; 1342 + u8 layer_sel_id; 1343 + unsigned int ofs; 1344 + u32 ovl_ctrl; 1345 + int i; 1346 + struct vop2_video_port *vp0 = &vop2->vps[0]; 1347 + struct vop2_video_port *vp1 = &vop2->vps[1]; 1348 + struct vop2_video_port *vp2 = &vop2->vps[2]; 1349 + struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state); 1350 + 1351 + ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL); 1352 + ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD; 1353 + if (vcstate->yuv_overlay) 1354 + ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id); 1355 + else 1356 + ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id); 1357 + 1358 + vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl); 1359 + 1360 + port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL); 1361 + port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT; 1362 + 1363 + if (vp0->nlayers) 1364 + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, 1365 + vp0->nlayers - 1); 1366 + else 1367 + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, 8); 1368 + 1369 + if (vp1->nlayers) 1370 + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 1371 + (vp0->nlayers + vp1->nlayers - 1)); 1372 + else 1373 + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8); 1374 + 1375 + if (vp2->nlayers) 1376 + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 1377 + (vp2->nlayers + vp1->nlayers + vp0->nlayers - 1)); 1378 + else 1379 + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8); 1380 + 1381 + layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL); 1382 + 1383 + ofs = 0; 1384 + for (i = 0; i < vp->id; i++) 1385 + ofs += vop2->vps[i].nlayers; 1386 + 1387 + drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { 1388 + struct vop2_win *win = to_vop2_win(plane); 1389 + struct vop2_win *old_win; 1390 + 1391 + layer_id = (u8)(plane->state->normalized_zpos + ofs); 1392 + /* 1393 + * Find the layer this win bind in old state. 1394 + */ 1395 + for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) { 1396 + layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf; 1397 + if (layer_sel_id == win->data->layer_sel_id[vp->id]) 1398 + break; 1399 + } 1400 + 1401 + /* 1402 + * Find the win bind to this layer in old state 1403 + */ 1404 + for (i = 0; i < vop2->data->win_size; i++) { 1405 + old_win = &vop2->win[i]; 1406 + layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf; 1407 + if (layer_sel_id == old_win->data->layer_sel_id[vp->id]) 1408 + break; 1409 + } 1410 + 1411 + switch (win->data->phys_id) { 1412 + case ROCKCHIP_VOP2_CLUSTER0: 1413 + port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER0; 1414 + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER0, vp->id); 1415 + break; 1416 + case ROCKCHIP_VOP2_CLUSTER1: 1417 + port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER1; 1418 + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER1, vp->id); 1419 + break; 1420 + case ROCKCHIP_VOP2_CLUSTER2: 1421 + port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER2; 1422 + port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER2, vp->id); 1423 + break; 1424 + case ROCKCHIP_VOP2_CLUSTER3: 1425 + port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER3; 1426 + port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER3, vp->id); 1427 + break; 1428 + case ROCKCHIP_VOP2_ESMART0: 1429 + port_sel &= ~RK3568_OVL_PORT_SEL__ESMART0; 1430 + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART0, vp->id); 1431 + break; 1432 + case ROCKCHIP_VOP2_ESMART1: 1433 + port_sel &= ~RK3568_OVL_PORT_SEL__ESMART1; 1434 + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART1, vp->id); 1435 + break; 1436 + case ROCKCHIP_VOP2_ESMART2: 1437 + port_sel &= ~RK3588_OVL_PORT_SEL__ESMART2; 1438 + port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART2, vp->id); 1439 + break; 1440 + case ROCKCHIP_VOP2_ESMART3: 1441 + port_sel &= ~RK3588_OVL_PORT_SEL__ESMART3; 1442 + port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART3, vp->id); 1443 + break; 1444 + case ROCKCHIP_VOP2_SMART0: 1445 + port_sel &= ~RK3568_OVL_PORT_SEL__SMART0; 1446 + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART0, vp->id); 1447 + break; 1448 + case ROCKCHIP_VOP2_SMART1: 1449 + port_sel &= ~RK3568_OVL_PORT_SEL__SMART1; 1450 + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART1, vp->id); 1451 + break; 1452 + } 1453 + 1454 + layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7); 1455 + layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id[vp->id]); 1456 + /* 1457 + * When we bind a window from layerM to layerN, we also need to move the old 1458 + * window on layerN to layerM to avoid one window selected by two or more layers. 1459 + */ 1460 + layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7); 1461 + layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 1462 + old_win->data->layer_sel_id[vp->id]); 1463 + } 1464 + 1465 + vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel); 1466 + vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel); 1467 + } 1468 + 1469 + static void rk3568_vop2_setup_dly_for_windows(struct vop2_video_port *vp) 1470 + { 1471 + struct vop2 *vop2 = vp->vop2; 1472 + struct vop2_win *win; 1473 + int i = 0; 1474 + u32 cdly = 0, sdly = 0; 1475 + 1476 + for (i = 0; i < vop2->data->win_size; i++) { 1477 + u32 dly; 1478 + 1479 + win = &vop2->win[i]; 1480 + dly = win->delay; 1481 + 1482 + switch (win->data->phys_id) { 1483 + case ROCKCHIP_VOP2_CLUSTER0: 1484 + cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_0, dly); 1485 + cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_1, dly); 1486 + break; 1487 + case ROCKCHIP_VOP2_CLUSTER1: 1488 + cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_0, dly); 1489 + cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_1, dly); 1490 + break; 1491 + case ROCKCHIP_VOP2_ESMART0: 1492 + sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART0, dly); 1493 + break; 1494 + case ROCKCHIP_VOP2_ESMART1: 1495 + sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly); 1496 + break; 1497 + case ROCKCHIP_VOP2_SMART0: 1498 + case ROCKCHIP_VOP2_ESMART2: 1499 + sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly); 1500 + break; 1501 + case ROCKCHIP_VOP2_SMART1: 1502 + case ROCKCHIP_VOP2_ESMART3: 1503 + sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly); 1504 + break; 1505 + } 1506 + } 1507 + 1508 + vop2_writel(vop2, RK3568_CLUSTER_DLY_NUM, cdly); 1509 + vop2_writel(vop2, RK3568_SMART_DLY_NUM, sdly); 1510 + } 1511 + 1512 + static void rk3568_vop2_setup_overlay(struct vop2_video_port *vp) 1513 + { 1514 + struct vop2 *vop2 = vp->vop2; 1515 + struct drm_crtc *crtc = &vp->crtc; 1516 + struct drm_plane *plane; 1517 + 1518 + vp->win_mask = 0; 1519 + 1520 + drm_atomic_crtc_for_each_plane(plane, crtc) { 1521 + struct vop2_win *win = to_vop2_win(plane); 1522 + 1523 + win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT]; 1524 + 1525 + vp->win_mask |= BIT(win->data->phys_id); 1526 + 1527 + if (vop2_cluster_window(win)) 1528 + vop2_setup_cluster_alpha(vop2, win); 1529 + } 1530 + 1531 + if (!vp->win_mask) 1532 + return; 1533 + 1534 + rk3568_vop2_setup_layer_mixer(vp); 1535 + vop2_setup_alpha(vp); 1536 + rk3568_vop2_setup_dly_for_windows(vp); 1537 + } 1538 + 1539 + static void rk3576_vop2_setup_layer_mixer(struct vop2_video_port *vp) 1540 + { 1541 + struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state); 1542 + struct vop2 *vop2 = vp->vop2; 1543 + struct drm_plane *plane; 1544 + u32 layer_sel = 0xffff; /* 0xf means this layer is disabled */ 1545 + u32 ovl_ctrl; 1546 + 1547 + ovl_ctrl = vop2_readl(vop2, RK3576_OVL_CTRL(vp->id)); 1548 + if (vcstate->yuv_overlay) 1549 + ovl_ctrl |= RK3576_OVL_CTRL__YUV_MODE; 1550 + else 1551 + ovl_ctrl &= ~RK3576_OVL_CTRL__YUV_MODE; 1552 + 1553 + vop2_writel(vop2, RK3576_OVL_CTRL(vp->id), ovl_ctrl); 1554 + 1555 + drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { 1556 + struct vop2_win *win = to_vop2_win(plane); 1557 + 1558 + layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos, 1559 + 0xf); 1560 + layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos, 1561 + win->data->layer_sel_id[vp->id]); 1562 + } 1563 + 1564 + vop2_writel(vop2, RK3576_OVL_LAYER_SEL(vp->id), layer_sel); 1565 + } 1566 + 1567 + static void rk3576_vop2_setup_dly_for_windows(struct vop2_video_port *vp) 1568 + { 1569 + struct drm_plane *plane; 1570 + struct vop2_win *win; 1571 + 1572 + drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { 1573 + win = to_vop2_win(plane); 1574 + vop2_win_write(win, VOP2_WIN_DLY_NUM, 0); 1575 + } 1576 + } 1577 + 1578 + static void rk3576_vop2_setup_overlay(struct vop2_video_port *vp) 1579 + { 1580 + struct vop2 *vop2 = vp->vop2; 1581 + struct drm_crtc *crtc = &vp->crtc; 1582 + struct drm_plane *plane; 1583 + 1584 + vp->win_mask = 0; 1585 + 1586 + drm_atomic_crtc_for_each_plane(plane, crtc) { 1587 + struct vop2_win *win = to_vop2_win(plane); 1588 + 1589 + win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT]; 1590 + vp->win_mask |= BIT(win->data->phys_id); 1591 + 1592 + if (vop2_cluster_window(win)) 1593 + vop2_setup_cluster_alpha(vop2, win); 1594 + } 1595 + 1596 + if (!vp->win_mask) 1597 + return; 1598 + 1599 + rk3576_vop2_setup_layer_mixer(vp); 1600 + vop2_setup_alpha(vp); 1601 + rk3576_vop2_setup_dly_for_windows(vp); 1602 + } 1603 + 1604 + static void rk3568_vop2_setup_bg_dly(struct vop2_video_port *vp) 1605 + { 1606 + struct drm_crtc *crtc = &vp->crtc; 1607 + struct drm_display_mode *mode = &crtc->state->adjusted_mode; 1608 + u16 hdisplay = mode->crtc_hdisplay; 1609 + u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; 1610 + u32 bg_dly; 1611 + u32 pre_scan_dly; 1612 + 1613 + bg_dly = vp->data->pre_scan_max_dly[3]; 1614 + vop2_writel(vp->vop2, RK3568_VP_BG_MIX_CTRL(vp->id), 1615 + FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly)); 1616 + 1617 + pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len; 1618 + vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly); 1619 + } 1620 + 1621 + static void rk3576_vop2_setup_bg_dly(struct vop2_video_port *vp) 1622 + { 1623 + struct drm_crtc *crtc = &vp->crtc; 1624 + struct drm_display_mode *mode = &crtc->state->adjusted_mode; 1625 + u16 hdisplay = mode->crtc_hdisplay; 1626 + u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; 1627 + u32 bg_dly; 1628 + u32 pre_scan_dly; 1629 + 1630 + bg_dly = vp->data->pre_scan_max_dly[VOP2_DLY_WIN] + 1631 + vp->data->pre_scan_max_dly[VOP2_DLY_LAYER_MIX] + 1632 + vp->data->pre_scan_max_dly[VOP2_DLY_HDR_MIX]; 1633 + 1634 + vop2_writel(vp->vop2, RK3576_OVL_BG_MIX_CTRL(vp->id), 1635 + FIELD_PREP(RK3576_OVL_BG_MIX_CTRL__BG_DLY, bg_dly)); 1636 + 1637 + pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len; 1638 + vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly); 1639 + } 1640 + 1641 + static const struct vop2_ops rk3568_vop_ops = { 1642 + .setup_intf_mux = rk3568_set_intf_mux, 1643 + .setup_bg_dly = rk3568_vop2_setup_bg_dly, 1644 + .setup_overlay = rk3568_vop2_setup_overlay, 1645 + }; 1646 + 1647 + static const struct vop2_ops rk3576_vop_ops = { 1648 + .setup_intf_mux = rk3576_set_intf_mux, 1649 + .setup_bg_dly = rk3576_vop2_setup_bg_dly, 1650 + .setup_overlay = rk3576_vop2_setup_overlay, 1651 + }; 1652 + 1653 + static const struct vop2_ops rk3588_vop_ops = { 1654 + .setup_intf_mux = rk3588_set_intf_mux, 1655 + .setup_bg_dly = rk3568_vop2_setup_bg_dly, 1656 + .setup_overlay = rk3568_vop2_setup_overlay, 1657 + }; 1658 + 1371 1659 static const struct vop2_data rk3566_vop = { 1660 + .version = VOP_VERSION_RK3568, 1372 1661 .feature = VOP2_FEATURE_HAS_SYS_GRF, 1373 1662 .nr_vps = 3, 1374 1663 .max_input = { 4096, 2304 }, ··· 2386 655 .vp = rk3568_vop_video_ports, 2387 656 .win = rk3568_vop_win_data, 2388 657 .win_size = ARRAY_SIZE(rk3568_vop_win_data), 658 + .cluster_reg = rk3568_vop_cluster_regs, 659 + .nr_cluster_regs = ARRAY_SIZE(rk3568_vop_cluster_regs), 660 + .smart_reg = rk3568_vop_smart_regs, 661 + .nr_smart_regs = ARRAY_SIZE(rk3568_vop_smart_regs), 2389 662 .regs_dump = rk3568_regs_dump, 2390 663 .regs_dump_size = ARRAY_SIZE(rk3568_regs_dump), 664 + .ops = &rk3568_vop_ops, 2391 665 .soc_id = 3566, 2392 666 }; 2393 667 2394 668 static const struct vop2_data rk3568_vop = { 669 + .version = VOP_VERSION_RK3568, 2395 670 .feature = VOP2_FEATURE_HAS_SYS_GRF, 2396 671 .nr_vps = 3, 2397 672 .max_input = { 4096, 2304 }, ··· 2405 668 .vp = rk3568_vop_video_ports, 2406 669 .win = rk3568_vop_win_data, 2407 670 .win_size = ARRAY_SIZE(rk3568_vop_win_data), 671 + .cluster_reg = rk3568_vop_cluster_regs, 672 + .nr_cluster_regs = ARRAY_SIZE(rk3568_vop_cluster_regs), 673 + .smart_reg = rk3568_vop_smart_regs, 674 + .nr_smart_regs = ARRAY_SIZE(rk3568_vop_smart_regs), 2408 675 .regs_dump = rk3568_regs_dump, 2409 676 .regs_dump_size = ARRAY_SIZE(rk3568_regs_dump), 677 + .ops = &rk3568_vop_ops, 2410 678 .soc_id = 3568, 2411 679 }; 2412 680 681 + static const struct vop2_data rk3576_vop = { 682 + .version = VOP_VERSION_RK3576, 683 + .feature = VOP2_FEATURE_HAS_SYS_PMU, 684 + .nr_vps = 3, 685 + .max_input = { 4096, 4320 }, 686 + .max_output = { 4096, 4320 }, 687 + .vp = rk3576_vop_video_ports, 688 + .win = rk3576_vop_win_data, 689 + .win_size = ARRAY_SIZE(rk3576_vop_win_data), 690 + .cluster_reg = rk3576_vop_cluster_regs, 691 + .nr_cluster_regs = ARRAY_SIZE(rk3576_vop_cluster_regs), 692 + .smart_reg = rk3576_vop_smart_regs, 693 + .nr_smart_regs = ARRAY_SIZE(rk3576_vop_smart_regs), 694 + .regs_dump = rk3576_regs_dump, 695 + .regs_dump_size = ARRAY_SIZE(rk3576_regs_dump), 696 + .ops = &rk3576_vop_ops, 697 + .soc_id = 3576, 698 + }; 699 + 2413 700 static const struct vop2_data rk3588_vop = { 701 + .version = VOP_VERSION_RK3588, 2414 702 .feature = VOP2_FEATURE_HAS_SYS_GRF | VOP2_FEATURE_HAS_VO1_GRF | 2415 703 VOP2_FEATURE_HAS_VOP_GRF | VOP2_FEATURE_HAS_SYS_PMU, 2416 704 .nr_vps = 4, ··· 2444 682 .vp = rk3588_vop_video_ports, 2445 683 .win = rk3588_vop_win_data, 2446 684 .win_size = ARRAY_SIZE(rk3588_vop_win_data), 685 + .cluster_reg = rk3568_vop_cluster_regs, 686 + .nr_cluster_regs = ARRAY_SIZE(rk3568_vop_cluster_regs), 687 + .smart_reg = rk3568_vop_smart_regs, 688 + .nr_smart_regs = ARRAY_SIZE(rk3568_vop_smart_regs), 2447 689 .regs_dump = rk3588_regs_dump, 2448 690 .regs_dump_size = ARRAY_SIZE(rk3588_regs_dump), 691 + .ops = &rk3588_vop_ops, 2449 692 .soc_id = 3588, 2450 693 }; 2451 694 ··· 2461 694 }, { 2462 695 .compatible = "rockchip,rk3568-vop", 2463 696 .data = &rk3568_vop, 697 + }, { 698 + .compatible = "rockchip,rk3576-vop", 699 + .data = &rk3576_vop, 2464 700 }, { 2465 701 .compatible = "rockchip,rk3588-vop", 2466 702 .data = &rk3588_vop
+1 -1
drivers/gpu/drm/scheduler/sched_entity.c
··· 91 91 * the lowest priority available. 92 92 */ 93 93 if (entity->priority >= sched_list[0]->num_rqs) { 94 - drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n", 94 + dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n", 95 95 entity->priority, sched_list[0]->num_rqs); 96 96 entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, 97 97 (s32) DRM_SCHED_PRIORITY_KERNEL);
+18 -14
drivers/gpu/drm/scheduler/sched_main.c
··· 102 102 { 103 103 u32 credits; 104 104 105 - drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit, 106 - atomic_read(&sched->credit_count), 107 - &credits)); 105 + WARN_ON(check_sub_overflow(sched->credit_limit, 106 + atomic_read(&sched->credit_count), 107 + &credits)); 108 108 109 109 return credits; 110 110 } ··· 129 129 /* If a job exceeds the credit limit, truncate it to the credit limit 130 130 * itself to guarantee forward progress. 131 131 */ 132 - if (drm_WARN(sched, s_job->credits > sched->credit_limit, 133 - "Jobs may not exceed the credit limit, truncate.\n")) 132 + if (s_job->credits > sched->credit_limit) { 133 + dev_WARN(sched->dev, 134 + "Jobs may not exceed the credit limit, truncate.\n"); 134 135 s_job->credits = sched->credit_limit; 136 + } 135 137 136 138 return drm_sched_available_credits(sched) >= s_job->credits; 137 139 } ··· 791 789 * or worse--a blank screen--leave a trail in the 792 790 * logs, so this can be debugged easier. 793 791 */ 794 - drm_err(job->sched, "%s: entity has no rq!\n", __func__); 792 + dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__); 795 793 return -ENOENT; 796 794 } 797 795 ··· 1015 1013 * Cleans up the resources allocated with drm_sched_job_init(). 1016 1014 * 1017 1015 * Drivers should call this from their error unwind code if @job is aborted 1018 - * before drm_sched_job_arm() is called. 1016 + * before it was submitted to an entity with drm_sched_entity_push_job(). 1019 1017 * 1020 - * After that point of no return @job is committed to be executed by the 1021 - * scheduler, and this function should be called from the 1022 - * &drm_sched_backend_ops.free_job callback. 1018 + * Since calling drm_sched_job_arm() causes the job's fences to be initialized, 1019 + * it is up to the driver to ensure that fences that were exposed to external 1020 + * parties get signaled. drm_sched_job_cleanup() does not ensure this. 1021 + * 1022 + * This function must also be called in &struct drm_sched_backend_ops.free_job 1023 1023 */ 1024 1024 void drm_sched_job_cleanup(struct drm_sched_job *job) 1025 1025 { ··· 1032 1028 /* drm_sched_job_arm() has been called */ 1033 1029 dma_fence_put(&job->s_fence->finished); 1034 1030 } else { 1035 - /* aborted job before committing to run it */ 1031 + /* aborted job before arming */ 1036 1032 drm_sched_fence_free(job->s_fence); 1037 1033 } 1038 1034 ··· 1267 1263 if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) { 1268 1264 /* This is a gross violation--tell drivers what the problem is. 1269 1265 */ 1270 - drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", 1266 + dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", 1271 1267 __func__); 1272 1268 return -EINVAL; 1273 1269 } else if (sched->sched_rq) { ··· 1275 1271 * fine-tune their DRM calling order, and return all 1276 1272 * is good. 1277 1273 */ 1278 - drm_warn(sched, "%s: scheduler already initialized!\n", __func__); 1274 + dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__); 1279 1275 return 0; 1280 1276 } 1281 1277 ··· 1330 1326 Out_check_own: 1331 1327 if (sched->own_submit_wq) 1332 1328 destroy_workqueue(sched->submit_wq); 1333 - drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); 1329 + dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); 1334 1330 return -ENOMEM; 1335 1331 } 1336 1332 EXPORT_SYMBOL(drm_sched_init);
+14 -10
drivers/gpu/drm/tests/drm_atomic_state_test.c
··· 189 189 static void drm_test_check_connector_changed_modeset(struct kunit *test) 190 190 { 191 191 struct drm_atomic_test_priv *priv; 192 - struct drm_modeset_acquire_ctx *ctx; 192 + struct drm_modeset_acquire_ctx ctx; 193 193 struct drm_connector *old_conn, *new_conn; 194 194 struct drm_atomic_state *state; 195 195 struct drm_device *drm; ··· 203 203 old_conn = &priv->connectors[0]; 204 204 new_conn = &priv->connectors[1]; 205 205 206 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 207 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 206 + drm_modeset_acquire_init(&ctx, 0); 208 207 209 208 // first modeset to enable 210 - ret = set_up_atomic_state(test, priv, old_conn, ctx); 209 + ret = set_up_atomic_state(test, priv, old_conn, &ctx); 211 210 KUNIT_ASSERT_EQ(test, ret, 0); 212 211 213 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 212 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 214 213 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 215 214 216 215 new_conn_state = drm_atomic_get_connector_state(state, new_conn); ··· 230 231 ret = drm_atomic_commit(state); 231 232 KUNIT_ASSERT_EQ(test, ret, 0); 232 233 KUNIT_ASSERT_EQ(test, modeset_counter, initial_modeset_count + 1); 234 + 235 + drm_modeset_drop_locks(&ctx); 236 + drm_modeset_acquire_fini(&ctx); 233 237 } 234 238 235 239 /* ··· 265 263 int ret; 266 264 const struct drm_clone_mode_test *param = test->param_value; 267 265 struct drm_atomic_test_priv *priv; 268 - struct drm_modeset_acquire_ctx *ctx; 266 + struct drm_modeset_acquire_ctx ctx; 269 267 struct drm_device *drm; 270 268 struct drm_atomic_state *state; 271 269 struct drm_crtc_state *crtc_state; ··· 275 273 276 274 drm = &priv->drm; 277 275 278 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 279 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 276 + drm_modeset_acquire_init(&ctx, 0); 280 277 281 - ret = set_up_atomic_state(test, priv, NULL, ctx); 278 + ret = set_up_atomic_state(test, priv, NULL, &ctx); 282 279 KUNIT_ASSERT_EQ(test, ret, 0); 283 280 284 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 281 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 285 282 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 286 283 287 284 crtc_state = drm_atomic_get_crtc_state(state, priv->crtc); ··· 293 292 294 293 ret = drm_atomic_helper_check_modeset(drm, state); 295 294 KUNIT_ASSERT_EQ(test, ret, param->expected_result); 295 + 296 + drm_modeset_drop_locks(&ctx); 297 + drm_modeset_acquire_fini(&ctx); 296 298 } 297 299 298 300 static void drm_check_in_clone_mode_desc(const struct drm_clone_mode_test *t,
+81
drivers/gpu/drm/tests/drm_format_helper_test.c
··· 60 60 const u8 expected[TEST_BUF_SIZE]; 61 61 }; 62 62 63 + struct convert_to_bgr888_result { 64 + unsigned int dst_pitch; 65 + const u8 expected[TEST_BUF_SIZE]; 66 + }; 67 + 63 68 struct convert_to_argb8888_result { 64 69 unsigned int dst_pitch; 65 70 const u32 expected[TEST_BUF_SIZE]; ··· 112 107 struct convert_to_argb1555_result argb1555_result; 113 108 struct convert_to_rgba5551_result rgba5551_result; 114 109 struct convert_to_rgb888_result rgb888_result; 110 + struct convert_to_bgr888_result bgr888_result; 115 111 struct convert_to_argb8888_result argb8888_result; 116 112 struct convert_to_xrgb2101010_result xrgb2101010_result; 117 113 struct convert_to_argb2101010_result argb2101010_result; ··· 156 150 .rgb888_result = { 157 151 .dst_pitch = TEST_USE_DEFAULT_PITCH, 158 152 .expected = { 0x00, 0x00, 0xFF }, 153 + }, 154 + .bgr888_result = { 155 + .dst_pitch = TEST_USE_DEFAULT_PITCH, 156 + .expected = { 0xFF, 0x00, 0x00 }, 159 157 }, 160 158 .argb8888_result = { 161 159 .dst_pitch = TEST_USE_DEFAULT_PITCH, ··· 226 216 .rgb888_result = { 227 217 .dst_pitch = TEST_USE_DEFAULT_PITCH, 228 218 .expected = { 0x00, 0x00, 0xFF }, 219 + }, 220 + .bgr888_result = { 221 + .dst_pitch = TEST_USE_DEFAULT_PITCH, 222 + .expected = { 0xFF, 0x00, 0x00 }, 229 223 }, 230 224 .argb8888_result = { 231 225 .dst_pitch = TEST_USE_DEFAULT_PITCH, ··· 342 328 0x00, 0x00, 0xFF, 0x00, 0xFF, 0x00, 343 329 0xFF, 0x00, 0x00, 0xFF, 0x00, 0xFF, 344 330 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 331 + }, 332 + }, 333 + .bgr888_result = { 334 + .dst_pitch = TEST_USE_DEFAULT_PITCH, 335 + .expected = { 336 + 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 337 + 0xFF, 0x00, 0x00, 0x00, 0xFF, 0x00, 338 + 0x00, 0x00, 0xFF, 0xFF, 0x00, 0xFF, 339 + 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 345 340 }, 346 341 }, 347 342 .argb8888_result = { ··· 488 465 0x73, 0xF0, 0x6C, 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, 489 466 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 490 467 0x03, 0x03, 0xA8, 0x73, 0xF0, 0x6C, 0x9C, 0x44, 0x0E, 468 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 469 + }, 470 + }, 471 + .bgr888_result = { 472 + .dst_pitch = 15, 473 + .expected = { 474 + 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05, 0xA8, 0xF3, 0x03, 475 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 476 + 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05, 477 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 478 + 0xA8, 0x03, 0x03, 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C, 491 479 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 492 480 }, 493 481 }, ··· 942 908 int blit_result = 0; 943 909 944 910 blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, &params->clip, 911 + &fmtcnv_state); 912 + 913 + KUNIT_EXPECT_FALSE(test, blit_result); 914 + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); 915 + } 916 + 917 + static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test) 918 + { 919 + const struct convert_xrgb8888_case *params = test->param_value; 920 + const struct convert_to_bgr888_result *result = &params->bgr888_result; 921 + size_t dst_size; 922 + u8 *buf = NULL; 923 + __le32 *xrgb8888 = NULL; 924 + struct iosys_map dst, src; 925 + 926 + struct drm_framebuffer fb = { 927 + .format = drm_format_info(DRM_FORMAT_XRGB8888), 928 + .pitches = { params->pitch, 0, 0 }, 929 + }; 930 + 931 + dst_size = conversion_buf_size(DRM_FORMAT_BGR888, result->dst_pitch, 932 + &params->clip, 0); 933 + KUNIT_ASSERT_GT(test, dst_size, 0); 934 + 935 + buf = kunit_kzalloc(test, dst_size, GFP_KERNEL); 936 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); 937 + iosys_map_set_vaddr(&dst, buf); 938 + 939 + xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE); 940 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888); 941 + iosys_map_set_vaddr(&src, xrgb8888); 942 + 943 + /* 944 + * BGR888 expected results are already in little-endian 945 + * order, so there's no need to convert the test output. 946 + */ 947 + drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, &params->clip, 948 + &fmtcnv_state); 949 + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); 950 + 951 + buf = dst.vaddr; /* restore original value of buf */ 952 + memset(buf, 0, dst_size); 953 + 954 + int blit_result = 0; 955 + 956 + blit_result = drm_fb_blit(&dst, &result->dst_pitch, DRM_FORMAT_BGR888, &src, &fb, &params->clip, 945 957 &fmtcnv_state); 946 958 947 959 KUNIT_EXPECT_FALSE(test, blit_result); ··· 1931 1851 KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb1555, convert_xrgb8888_gen_params), 1932 1852 KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgba5551, convert_xrgb8888_gen_params), 1933 1853 KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params), 1854 + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_bgr888, convert_xrgb8888_gen_params), 1934 1855 KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb8888, convert_xrgb8888_gen_params), 1935 1856 KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params), 1936 1857 KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params),
+150 -104
drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
··· 273 273 static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test) 274 274 { 275 275 struct drm_atomic_helper_connector_hdmi_priv *priv; 276 - struct drm_modeset_acquire_ctx *ctx; 276 + struct drm_modeset_acquire_ctx ctx; 277 277 struct drm_connector_state *old_conn_state; 278 278 struct drm_connector_state *new_conn_state; 279 279 struct drm_crtc_state *crtc_state; ··· 296 296 preferred = find_preferred_mode(conn); 297 297 KUNIT_ASSERT_NOT_NULL(test, preferred); 298 298 299 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 300 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 299 + drm_modeset_acquire_init(&ctx, 0); 301 300 302 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 301 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 303 302 KUNIT_ASSERT_EQ(test, ret, 0); 304 303 305 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 304 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 306 305 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 307 306 308 307 new_conn_state = drm_atomic_get_connector_state(state, conn); ··· 326 327 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 327 328 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); 328 329 KUNIT_EXPECT_TRUE(test, crtc_state->mode_changed); 330 + 331 + drm_modeset_drop_locks(&ctx); 332 + drm_modeset_acquire_fini(&ctx); 329 333 } 330 334 331 335 /* ··· 339 337 static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *test) 340 338 { 341 339 struct drm_atomic_helper_connector_hdmi_priv *priv; 342 - struct drm_modeset_acquire_ctx *ctx; 340 + struct drm_modeset_acquire_ctx ctx; 343 341 struct drm_connector_state *old_conn_state; 344 342 struct drm_connector_state *new_conn_state; 345 343 struct drm_crtc_state *crtc_state; ··· 362 360 preferred = find_preferred_mode(conn); 363 361 KUNIT_ASSERT_NOT_NULL(test, preferred); 364 362 365 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 366 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 363 + drm_modeset_acquire_init(&ctx, 0); 367 364 368 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 365 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 369 366 KUNIT_ASSERT_EQ(test, ret, 0); 370 367 371 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 368 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 372 369 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 373 370 374 371 new_conn_state = drm_atomic_get_connector_state(state, conn); ··· 394 393 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 395 394 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); 396 395 KUNIT_EXPECT_FALSE(test, crtc_state->mode_changed); 396 + 397 + drm_modeset_drop_locks(&ctx); 398 + drm_modeset_acquire_fini(&ctx); 397 399 } 398 400 399 401 /* ··· 407 403 static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test) 408 404 { 409 405 struct drm_atomic_helper_connector_hdmi_priv *priv; 410 - struct drm_modeset_acquire_ctx *ctx; 406 + struct drm_modeset_acquire_ctx ctx; 411 407 struct drm_connector_state *conn_state; 412 408 struct drm_atomic_state *state; 413 409 struct drm_display_mode *preferred; ··· 430 426 KUNIT_ASSERT_NOT_NULL(test, preferred); 431 427 KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1); 432 428 433 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 434 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 429 + drm_modeset_acquire_init(&ctx, 0); 435 430 436 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 431 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 437 432 KUNIT_ASSERT_EQ(test, ret, 0); 438 433 439 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 434 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 440 435 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 441 436 442 437 conn_state = drm_atomic_get_connector_state(state, conn); ··· 452 449 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state); 453 450 454 451 KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range); 452 + 453 + drm_modeset_drop_locks(&ctx); 454 + drm_modeset_acquire_fini(&ctx); 455 455 } 456 456 457 457 /* ··· 465 459 static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test) 466 460 { 467 461 struct drm_atomic_helper_connector_hdmi_priv *priv; 468 - struct drm_modeset_acquire_ctx *ctx; 462 + struct drm_modeset_acquire_ctx ctx; 469 463 struct drm_connector_state *conn_state; 470 464 struct drm_atomic_state *state; 471 465 struct drm_display_mode *mode; ··· 483 477 conn = &priv->connector; 484 478 KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi); 485 479 486 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 487 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 480 + drm_modeset_acquire_init(&ctx, 0); 488 481 489 482 mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); 490 483 KUNIT_ASSERT_NOT_NULL(test, mode); 491 484 492 485 crtc = priv->crtc; 493 - ret = light_up_connector(test, drm, crtc, conn, mode, ctx); 486 + ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); 494 487 KUNIT_ASSERT_EQ(test, ret, 0); 495 488 496 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 489 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 497 490 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 498 491 499 492 conn_state = drm_atomic_get_connector_state(state, conn); ··· 509 504 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state); 510 505 511 506 KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range); 507 + 508 + drm_modeset_drop_locks(&ctx); 509 + drm_modeset_acquire_fini(&ctx); 512 510 } 513 511 514 512 /* ··· 522 514 static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test) 523 515 { 524 516 struct drm_atomic_helper_connector_hdmi_priv *priv; 525 - struct drm_modeset_acquire_ctx *ctx; 517 + struct drm_modeset_acquire_ctx ctx; 526 518 struct drm_connector_state *conn_state; 527 519 struct drm_atomic_state *state; 528 520 struct drm_display_mode *preferred; ··· 545 537 KUNIT_ASSERT_NOT_NULL(test, preferred); 546 538 KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1); 547 539 548 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 549 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 540 + drm_modeset_acquire_init(&ctx, 0); 550 541 551 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 542 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 552 543 KUNIT_ASSERT_EQ(test, ret, 0); 553 544 554 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 545 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 555 546 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 556 547 557 548 conn_state = drm_atomic_get_connector_state(state, conn); ··· 569 562 DRM_HDMI_BROADCAST_RGB_FULL); 570 563 571 564 KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range); 565 + 566 + drm_modeset_drop_locks(&ctx); 567 + drm_modeset_acquire_fini(&ctx); 572 568 } 573 569 574 570 /* ··· 582 572 static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test) 583 573 { 584 574 struct drm_atomic_helper_connector_hdmi_priv *priv; 585 - struct drm_modeset_acquire_ctx *ctx; 575 + struct drm_modeset_acquire_ctx ctx; 586 576 struct drm_connector_state *conn_state; 587 577 struct drm_atomic_state *state; 588 578 struct drm_display_mode *mode; ··· 600 590 conn = &priv->connector; 601 591 KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi); 602 592 603 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 604 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 593 + drm_modeset_acquire_init(&ctx, 0); 605 594 606 595 mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); 607 596 KUNIT_ASSERT_NOT_NULL(test, mode); 608 597 609 598 crtc = priv->crtc; 610 - ret = light_up_connector(test, drm, crtc, conn, mode, ctx); 599 + ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); 611 600 KUNIT_ASSERT_EQ(test, ret, 0); 612 601 613 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 602 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 614 603 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 615 604 616 605 conn_state = drm_atomic_get_connector_state(state, conn); ··· 628 619 DRM_HDMI_BROADCAST_RGB_FULL); 629 620 630 621 KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range); 622 + 623 + drm_modeset_drop_locks(&ctx); 624 + drm_modeset_acquire_fini(&ctx); 631 625 } 632 626 633 627 /* ··· 641 629 static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test) 642 630 { 643 631 struct drm_atomic_helper_connector_hdmi_priv *priv; 644 - struct drm_modeset_acquire_ctx *ctx; 632 + struct drm_modeset_acquire_ctx ctx; 645 633 struct drm_connector_state *conn_state; 646 634 struct drm_atomic_state *state; 647 635 struct drm_display_mode *preferred; ··· 664 652 KUNIT_ASSERT_NOT_NULL(test, preferred); 665 653 KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1); 666 654 667 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 668 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 655 + drm_modeset_acquire_init(&ctx, 0); 669 656 670 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 657 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 671 658 KUNIT_ASSERT_EQ(test, ret, 0); 672 659 673 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 660 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 674 661 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 675 662 676 663 conn_state = drm_atomic_get_connector_state(state, conn); ··· 688 677 DRM_HDMI_BROADCAST_RGB_LIMITED); 689 678 690 679 KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range); 680 + 681 + drm_modeset_drop_locks(&ctx); 682 + drm_modeset_acquire_fini(&ctx); 691 683 } 692 684 693 685 /* ··· 701 687 static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *test) 702 688 { 703 689 struct drm_atomic_helper_connector_hdmi_priv *priv; 704 - struct drm_modeset_acquire_ctx *ctx; 690 + struct drm_modeset_acquire_ctx ctx; 705 691 struct drm_connector_state *conn_state; 706 692 struct drm_atomic_state *state; 707 693 struct drm_display_mode *mode; ··· 719 705 conn = &priv->connector; 720 706 KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi); 721 707 722 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 723 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 708 + drm_modeset_acquire_init(&ctx, 0); 724 709 725 710 mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); 726 711 KUNIT_ASSERT_NOT_NULL(test, mode); 727 712 728 713 crtc = priv->crtc; 729 - ret = light_up_connector(test, drm, crtc, conn, mode, ctx); 714 + ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); 730 715 KUNIT_ASSERT_EQ(test, ret, 0); 731 716 732 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 717 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 733 718 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 734 719 735 720 conn_state = drm_atomic_get_connector_state(state, conn); ··· 747 734 DRM_HDMI_BROADCAST_RGB_LIMITED); 748 735 749 736 KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range); 737 + 738 + drm_modeset_drop_locks(&ctx); 739 + drm_modeset_acquire_fini(&ctx); 750 740 } 751 741 752 742 /* ··· 760 744 static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test) 761 745 { 762 746 struct drm_atomic_helper_connector_hdmi_priv *priv; 763 - struct drm_modeset_acquire_ctx *ctx; 747 + struct drm_modeset_acquire_ctx ctx; 764 748 struct drm_connector_state *old_conn_state; 765 749 struct drm_connector_state *new_conn_state; 766 750 struct drm_crtc_state *crtc_state; ··· 787 771 preferred = find_preferred_mode(conn); 788 772 KUNIT_ASSERT_NOT_NULL(test, preferred); 789 773 790 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 791 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 774 + drm_modeset_acquire_init(&ctx, 0); 792 775 793 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 776 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 794 777 KUNIT_ASSERT_EQ(test, ret, 0); 795 778 796 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 779 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 797 780 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 798 781 799 782 new_conn_state = drm_atomic_get_connector_state(state, conn); ··· 823 808 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 824 809 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); 825 810 KUNIT_EXPECT_TRUE(test, crtc_state->mode_changed); 811 + 812 + drm_modeset_drop_locks(&ctx); 813 + drm_modeset_acquire_fini(&ctx); 826 814 } 827 815 828 816 /* ··· 836 818 static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test) 837 819 { 838 820 struct drm_atomic_helper_connector_hdmi_priv *priv; 839 - struct drm_modeset_acquire_ctx *ctx; 821 + struct drm_modeset_acquire_ctx ctx; 840 822 struct drm_connector_state *old_conn_state; 841 823 struct drm_connector_state *new_conn_state; 842 824 struct drm_crtc_state *crtc_state; ··· 863 845 preferred = find_preferred_mode(conn); 864 846 KUNIT_ASSERT_NOT_NULL(test, preferred); 865 847 866 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 867 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 848 + drm_modeset_acquire_init(&ctx, 0); 868 849 869 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 850 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 870 851 KUNIT_ASSERT_EQ(test, ret, 0); 871 852 872 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 853 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 873 854 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 874 855 875 856 new_conn_state = drm_atomic_get_connector_state(state, conn); ··· 897 880 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 898 881 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); 899 882 KUNIT_EXPECT_FALSE(test, crtc_state->mode_changed); 883 + 884 + drm_modeset_drop_locks(&ctx); 885 + drm_modeset_acquire_fini(&ctx); 900 886 } 901 887 902 888 /* ··· 909 889 static void drm_test_check_output_bpc_dvi(struct kunit *test) 910 890 { 911 891 struct drm_atomic_helper_connector_hdmi_priv *priv; 912 - struct drm_modeset_acquire_ctx *ctx; 892 + struct drm_modeset_acquire_ctx ctx; 913 893 struct drm_connector_state *conn_state; 914 894 struct drm_display_info *info; 915 895 struct drm_display_mode *preferred; ··· 939 919 preferred = find_preferred_mode(conn); 940 920 KUNIT_ASSERT_NOT_NULL(test, preferred); 941 921 942 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 943 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 922 + drm_modeset_acquire_init(&ctx, 0); 944 923 945 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 924 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 946 925 KUNIT_ASSERT_EQ(test, ret, 0); 947 926 948 927 conn_state = conn->state; ··· 949 930 950 931 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8); 951 932 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); 933 + 934 + drm_modeset_drop_locks(&ctx); 935 + drm_modeset_acquire_fini(&ctx); 952 936 } 953 937 954 938 /* ··· 961 939 static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test) 962 940 { 963 941 struct drm_atomic_helper_connector_hdmi_priv *priv; 964 - struct drm_modeset_acquire_ctx *ctx; 942 + struct drm_modeset_acquire_ctx ctx; 965 943 struct drm_connector_state *conn_state; 966 944 struct drm_display_mode *preferred; 967 945 struct drm_connector *conn; ··· 986 964 KUNIT_ASSERT_NOT_NULL(test, preferred); 987 965 KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK); 988 966 989 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 990 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 967 + drm_modeset_acquire_init(&ctx, 0); 991 968 992 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 969 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 993 970 KUNIT_ASSERT_EQ(test, ret, 0); 994 971 995 972 conn_state = conn->state; ··· 997 976 KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_bpc, 8); 998 977 KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); 999 978 KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1000); 979 + 980 + drm_modeset_drop_locks(&ctx); 981 + drm_modeset_acquire_fini(&ctx); 1000 982 } 1001 983 1002 984 /* ··· 1010 986 static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test) 1011 987 { 1012 988 struct drm_atomic_helper_connector_hdmi_priv *priv; 1013 - struct drm_modeset_acquire_ctx *ctx; 989 + struct drm_modeset_acquire_ctx ctx; 1014 990 struct drm_connector_state *conn_state; 1015 991 struct drm_display_mode *preferred; 1016 992 struct drm_connector *conn; ··· 1035 1011 KUNIT_ASSERT_NOT_NULL(test, preferred); 1036 1012 KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK); 1037 1013 1038 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 1039 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 1014 + drm_modeset_acquire_init(&ctx, 0); 1040 1015 1041 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 1016 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 1042 1017 KUNIT_ASSERT_EQ(test, ret, 0); 1043 1018 1044 1019 conn_state = conn->state; ··· 1046 1023 KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_bpc, 10); 1047 1024 KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); 1048 1025 KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1250); 1026 + 1027 + drm_modeset_drop_locks(&ctx); 1028 + drm_modeset_acquire_fini(&ctx); 1049 1029 } 1050 1030 1051 1031 /* ··· 1059 1033 static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test) 1060 1034 { 1061 1035 struct drm_atomic_helper_connector_hdmi_priv *priv; 1062 - struct drm_modeset_acquire_ctx *ctx; 1036 + struct drm_modeset_acquire_ctx ctx; 1063 1037 struct drm_connector_state *conn_state; 1064 1038 struct drm_display_mode *preferred; 1065 1039 struct drm_connector *conn; ··· 1084 1058 KUNIT_ASSERT_NOT_NULL(test, preferred); 1085 1059 KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK); 1086 1060 1087 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 1088 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 1061 + drm_modeset_acquire_init(&ctx, 0); 1089 1062 1090 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 1063 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 1091 1064 KUNIT_ASSERT_EQ(test, ret, 0); 1092 1065 1093 1066 conn_state = conn->state; ··· 1095 1070 KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_bpc, 12); 1096 1071 KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); 1097 1072 KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1500); 1073 + 1074 + drm_modeset_drop_locks(&ctx); 1075 + drm_modeset_acquire_fini(&ctx); 1098 1076 } 1099 1077 1100 1078 /* ··· 1111 1083 static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test) 1112 1084 { 1113 1085 struct drm_atomic_helper_connector_hdmi_priv *priv; 1114 - struct drm_modeset_acquire_ctx *ctx; 1086 + struct drm_modeset_acquire_ctx ctx; 1115 1087 struct drm_atomic_state *state; 1116 1088 struct drm_display_mode *preferred; 1117 1089 struct drm_crtc_state *crtc_state; ··· 1132 1104 preferred = find_preferred_mode(conn); 1133 1105 KUNIT_ASSERT_NOT_NULL(test, preferred); 1134 1106 1135 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 1136 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 1107 + drm_modeset_acquire_init(&ctx, 0); 1137 1108 1138 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 1109 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 1139 1110 KUNIT_ASSERT_EQ(test, ret, 0); 1140 1111 1141 1112 /* You shouldn't be doing that at home. */ 1142 1113 conn->hdmi.funcs = &reject_connector_hdmi_funcs; 1143 1114 1144 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 1115 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 1145 1116 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 1146 1117 1147 1118 crtc_state = drm_atomic_get_crtc_state(state, crtc); ··· 1150 1123 1151 1124 ret = drm_atomic_check_only(state); 1152 1125 KUNIT_EXPECT_LT(test, ret, 0); 1126 + 1127 + drm_modeset_drop_locks(&ctx); 1128 + drm_modeset_acquire_fini(&ctx); 1153 1129 } 1154 1130 1155 1131 /* ··· 1169 1139 static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test) 1170 1140 { 1171 1141 struct drm_atomic_helper_connector_hdmi_priv *priv; 1172 - struct drm_modeset_acquire_ctx *ctx; 1142 + struct drm_modeset_acquire_ctx ctx; 1173 1143 struct drm_connector_state *conn_state; 1174 1144 struct drm_display_info *info; 1175 1145 struct drm_display_mode *preferred; ··· 1206 1176 rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB); 1207 1177 KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); 1208 1178 1209 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 1210 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 1179 + drm_modeset_acquire_init(&ctx, 0); 1211 1180 1212 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 1181 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 1213 1182 KUNIT_EXPECT_EQ(test, ret, 0); 1214 1183 1215 1184 conn_state = conn->state; ··· 1217 1188 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 10); 1218 1189 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); 1219 1190 KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1250); 1191 + 1192 + drm_modeset_drop_locks(&ctx); 1193 + drm_modeset_acquire_fini(&ctx); 1220 1194 } 1221 1195 1222 1196 /* ··· 1238 1206 static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test) 1239 1207 { 1240 1208 struct drm_atomic_helper_connector_hdmi_priv *priv; 1241 - struct drm_modeset_acquire_ctx *ctx; 1209 + struct drm_modeset_acquire_ctx ctx; 1242 1210 struct drm_connector_state *conn_state; 1243 1211 struct drm_display_info *info; 1244 1212 struct drm_display_mode *preferred; ··· 1280 1248 rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422); 1281 1249 KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); 1282 1250 1283 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 1284 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 1251 + drm_modeset_acquire_init(&ctx, 0); 1285 1252 1286 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 1253 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 1287 1254 KUNIT_EXPECT_EQ(test, ret, 0); 1288 1255 1289 1256 conn_state = conn->state; ··· 1290 1259 1291 1260 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 10); 1292 1261 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); 1262 + 1263 + drm_modeset_drop_locks(&ctx); 1264 + drm_modeset_acquire_fini(&ctx); 1293 1265 } 1294 1266 1295 1267 /* ··· 1303 1269 static void drm_test_check_output_bpc_format_vic_1(struct kunit *test) 1304 1270 { 1305 1271 struct drm_atomic_helper_connector_hdmi_priv *priv; 1306 - struct drm_modeset_acquire_ctx *ctx; 1272 + struct drm_modeset_acquire_ctx ctx; 1307 1273 struct drm_connector_state *conn_state; 1308 1274 struct drm_display_info *info; 1309 1275 struct drm_display_mode *mode; ··· 1344 1310 rate = mode->clock * 1500; 1345 1311 KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); 1346 1312 1347 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 1348 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 1313 + drm_modeset_acquire_init(&ctx, 0); 1349 1314 1350 1315 crtc = priv->crtc; 1351 - ret = light_up_connector(test, drm, crtc, conn, mode, ctx); 1316 + ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); 1352 1317 KUNIT_EXPECT_EQ(test, ret, 0); 1353 1318 1354 1319 conn_state = conn->state; ··· 1355 1322 1356 1323 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8); 1357 1324 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); 1325 + 1326 + drm_modeset_drop_locks(&ctx); 1327 + drm_modeset_acquire_fini(&ctx); 1358 1328 } 1359 1329 1360 1330 /* ··· 1367 1331 static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test) 1368 1332 { 1369 1333 struct drm_atomic_helper_connector_hdmi_priv *priv; 1370 - struct drm_modeset_acquire_ctx *ctx; 1334 + struct drm_modeset_acquire_ctx ctx; 1371 1335 struct drm_connector_state *conn_state; 1372 1336 struct drm_display_info *info; 1373 1337 struct drm_display_mode *preferred; ··· 1412 1376 rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422); 1413 1377 KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); 1414 1378 1415 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 1416 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 1379 + drm_modeset_acquire_init(&ctx, 0); 1417 1380 1418 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 1381 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 1419 1382 KUNIT_EXPECT_EQ(test, ret, 0); 1420 1383 1421 1384 conn_state = conn->state; ··· 1422 1387 1423 1388 KUNIT_EXPECT_LT(test, conn_state->hdmi.output_bpc, 12); 1424 1389 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); 1390 + 1391 + drm_modeset_drop_locks(&ctx); 1392 + drm_modeset_acquire_fini(&ctx); 1425 1393 } 1426 1394 1427 1395 /* ··· 1434 1396 static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test) 1435 1397 { 1436 1398 struct drm_atomic_helper_connector_hdmi_priv *priv; 1437 - struct drm_modeset_acquire_ctx *ctx; 1399 + struct drm_modeset_acquire_ctx ctx; 1438 1400 struct drm_connector_state *conn_state; 1439 1401 struct drm_display_info *info; 1440 1402 struct drm_display_mode *preferred; ··· 1481 1443 rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422); 1482 1444 KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); 1483 1445 1484 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 1485 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 1446 + drm_modeset_acquire_init(&ctx, 0); 1486 1447 1487 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 1448 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 1488 1449 KUNIT_EXPECT_EQ(test, ret, 0); 1489 1450 1490 1451 conn_state = conn->state; ··· 1491 1454 1492 1455 KUNIT_EXPECT_LT(test, conn_state->hdmi.output_bpc, 12); 1493 1456 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); 1457 + 1458 + drm_modeset_drop_locks(&ctx); 1459 + drm_modeset_acquire_fini(&ctx); 1494 1460 } 1495 1461 1496 1462 /* ··· 1504 1464 static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test) 1505 1465 { 1506 1466 struct drm_atomic_helper_connector_hdmi_priv *priv; 1507 - struct drm_modeset_acquire_ctx *ctx; 1467 + struct drm_modeset_acquire_ctx ctx; 1508 1468 struct drm_connector_state *conn_state; 1509 1469 struct drm_display_info *info; 1510 1470 struct drm_display_mode *preferred; ··· 1541 1501 rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB); 1542 1502 KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); 1543 1503 1544 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 1545 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 1504 + drm_modeset_acquire_init(&ctx, 0); 1546 1505 1547 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 1506 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 1548 1507 KUNIT_EXPECT_EQ(test, ret, 0); 1549 1508 1550 1509 conn_state = conn->state; ··· 1551 1512 1552 1513 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8); 1553 1514 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); 1515 + 1516 + drm_modeset_drop_locks(&ctx); 1517 + drm_modeset_acquire_fini(&ctx); 1554 1518 } 1555 1519 1556 1520 /* ··· 1564 1522 static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *test) 1565 1523 { 1566 1524 struct drm_atomic_helper_connector_hdmi_priv *priv; 1567 - struct drm_modeset_acquire_ctx *ctx; 1525 + struct drm_modeset_acquire_ctx ctx; 1568 1526 struct drm_connector_state *conn_state; 1569 1527 struct drm_display_info *info; 1570 1528 struct drm_display_mode *preferred; ··· 1603 1561 rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB); 1604 1562 KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); 1605 1563 1606 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 1607 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 1564 + drm_modeset_acquire_init(&ctx, 0); 1608 1565 1609 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 1566 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 1610 1567 KUNIT_EXPECT_EQ(test, ret, 0); 1611 1568 1612 1569 conn_state = conn->state; ··· 1613 1572 1614 1573 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8); 1615 1574 KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); 1575 + 1576 + drm_modeset_drop_locks(&ctx); 1577 + drm_modeset_acquire_fini(&ctx); 1616 1578 } 1617 1579 1618 1580 /* Test that atomic check succeeds when disabling a connector. */ 1619 1581 static void drm_test_check_disable_connector(struct kunit *test) 1620 1582 { 1621 1583 struct drm_atomic_helper_connector_hdmi_priv *priv; 1622 - struct drm_modeset_acquire_ctx *ctx; 1584 + struct drm_modeset_acquire_ctx ctx; 1623 1585 struct drm_connector_state *conn_state; 1624 1586 struct drm_crtc_state *crtc_state; 1625 1587 struct drm_atomic_state *state; ··· 1637 1593 8); 1638 1594 KUNIT_ASSERT_NOT_NULL(test, priv); 1639 1595 1640 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 1641 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 1596 + drm_modeset_acquire_init(&ctx, 0); 1642 1597 1643 1598 conn = &priv->connector; 1644 1599 preferred = find_preferred_mode(conn); ··· 1645 1602 1646 1603 drm = &priv->drm; 1647 1604 crtc = priv->crtc; 1648 - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); 1605 + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); 1649 1606 KUNIT_ASSERT_EQ(test, ret, 0); 1650 1607 1651 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 1608 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 1652 1609 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 1653 1610 1654 1611 crtc_state = drm_atomic_get_crtc_state(state, crtc); ··· 1666 1623 1667 1624 ret = drm_atomic_check_only(state); 1668 1625 KUNIT_ASSERT_EQ(test, ret, 0); 1626 + 1627 + drm_modeset_drop_locks(&ctx); 1628 + drm_modeset_acquire_fini(&ctx); 1669 1629 } 1670 1630 1671 1631 static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
-41
drivers/gpu/drm/tests/drm_kunit_helpers.c
··· 80 80 } 81 81 EXPORT_SYMBOL_GPL(__drm_kunit_helper_alloc_drm_device_with_driver); 82 82 83 - static void action_drm_release_context(void *ptr) 84 - { 85 - struct drm_modeset_acquire_ctx *ctx = ptr; 86 - 87 - drm_modeset_drop_locks(ctx); 88 - drm_modeset_acquire_fini(ctx); 89 - } 90 - 91 - /** 92 - * drm_kunit_helper_acquire_ctx_alloc - Allocates an acquire context 93 - * @test: The test context object 94 - * 95 - * Allocates and initializes a modeset acquire context. 96 - * 97 - * The context is tied to the kunit test context, so we must not call 98 - * drm_modeset_acquire_fini() on it, it will be done so automatically. 99 - * 100 - * Returns: 101 - * An ERR_PTR on error, a pointer to the newly allocated context otherwise 102 - */ 103 - struct drm_modeset_acquire_ctx * 104 - drm_kunit_helper_acquire_ctx_alloc(struct kunit *test) 105 - { 106 - struct drm_modeset_acquire_ctx *ctx; 107 - int ret; 108 - 109 - ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); 110 - KUNIT_ASSERT_NOT_NULL(test, ctx); 111 - 112 - drm_modeset_acquire_init(ctx, 0); 113 - 114 - ret = kunit_add_action_or_reset(test, 115 - action_drm_release_context, 116 - ctx); 117 - if (ret) 118 - return ERR_PTR(ret); 119 - 120 - return ctx; 121 - } 122 - EXPORT_SYMBOL_GPL(drm_kunit_helper_acquire_ctx_alloc); 123 - 124 83 static void kunit_action_drm_atomic_state_put(void *ptr) 125 84 { 126 85 struct drm_atomic_state *state = ptr;
+12
drivers/gpu/drm/tiny/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 3 + config DRM_APPLETBDRM 4 + tristate "DRM support for Apple Touch Bars" 5 + depends on DRM && USB && MMU 6 + select DRM_GEM_SHMEM_HELPER 7 + select DRM_KMS_HELPER 8 + help 9 + Say Y here if you want support for the display of Touch Bars on x86 10 + MacBook Pros. 11 + 12 + To compile this driver as a module, choose M here: the 13 + module will be called appletbdrm. 14 + 3 15 config DRM_ARCPGU 4 16 tristate "ARC PGU" 5 17 depends on DRM && OF
+1
drivers/gpu/drm/tiny/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 3 + obj-$(CONFIG_DRM_APPLETBDRM) += appletbdrm.o 3 4 obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o 4 5 obj-$(CONFIG_DRM_BOCHS) += bochs.o 5 6 obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
+841
drivers/gpu/drm/tiny/appletbdrm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Apple Touch Bar DRM Driver 4 + * 5 + * Copyright (c) 2023 Kerem Karabay <kekrby@gmail.com> 6 + */ 7 + 8 + #include <linux/align.h> 9 + #include <linux/array_size.h> 10 + #include <linux/bitops.h> 11 + #include <linux/bug.h> 12 + #include <linux/container_of.h> 13 + #include <linux/err.h> 14 + #include <linux/module.h> 15 + #include <linux/overflow.h> 16 + #include <linux/slab.h> 17 + #include <linux/types.h> 18 + #include <linux/unaligned.h> 19 + #include <linux/usb.h> 20 + 21 + #include <drm/drm_atomic.h> 22 + #include <drm/drm_atomic_helper.h> 23 + #include <drm/drm_crtc.h> 24 + #include <drm/drm_damage_helper.h> 25 + #include <drm/drm_drv.h> 26 + #include <drm/drm_encoder.h> 27 + #include <drm/drm_format_helper.h> 28 + #include <drm/drm_fourcc.h> 29 + #include <drm/drm_framebuffer.h> 30 + #include <drm/drm_gem_atomic_helper.h> 31 + #include <drm/drm_gem_framebuffer_helper.h> 32 + #include <drm/drm_gem_shmem_helper.h> 33 + #include <drm/drm_plane.h> 34 + #include <drm/drm_print.h> 35 + #include <drm/drm_probe_helper.h> 36 + 37 + #define APPLETBDRM_PIXEL_FORMAT cpu_to_le32(0x52474241) /* RGBA, the actual format is BGR888 */ 38 + #define APPLETBDRM_BITS_PER_PIXEL 24 39 + 40 + #define APPLETBDRM_MSG_CLEAR_DISPLAY cpu_to_le32(0x434c5244) /* CLRD */ 41 + #define APPLETBDRM_MSG_GET_INFORMATION cpu_to_le32(0x47494e46) /* GINF */ 42 + #define APPLETBDRM_MSG_UPDATE_COMPLETE cpu_to_le32(0x5544434c) /* UDCL */ 43 + #define APPLETBDRM_MSG_SIGNAL_READINESS cpu_to_le32(0x52454459) /* REDY */ 44 + 45 + #define APPLETBDRM_BULK_MSG_TIMEOUT 1000 46 + 47 + #define drm_to_adev(_drm) container_of(_drm, struct appletbdrm_device, drm) 48 + #define adev_to_udev(adev) interface_to_usbdev(to_usb_interface(adev->dmadev)) 49 + 50 + struct appletbdrm_msg_request_header { 51 + __le16 unk_00; 52 + __le16 unk_02; 53 + __le32 unk_04; 54 + __le32 unk_08; 55 + __le32 size; 56 + } __packed; 57 + 58 + struct appletbdrm_msg_response_header { 59 + u8 unk_00[16]; 60 + __le32 msg; 61 + } __packed; 62 + 63 + struct appletbdrm_msg_simple_request { 64 + struct appletbdrm_msg_request_header header; 65 + __le32 msg; 66 + u8 unk_14[8]; 67 + __le32 size; 68 + } __packed; 69 + 70 + struct appletbdrm_msg_information { 71 + struct appletbdrm_msg_response_header header; 72 + u8 unk_14[12]; 73 + __le32 width; 74 + __le32 height; 75 + u8 bits_per_pixel; 76 + __le32 bytes_per_row; 77 + __le32 orientation; 78 + __le32 bitmap_info; 79 + __le32 pixel_format; 80 + __le32 width_inches; /* floating point */ 81 + __le32 height_inches; /* floating point */ 82 + } __packed; 83 + 84 + struct appletbdrm_frame { 85 + __le16 begin_x; 86 + __le16 begin_y; 87 + __le16 width; 88 + __le16 height; 89 + __le32 buf_size; 90 + u8 buf[]; 91 + } __packed; 92 + 93 + struct appletbdrm_fb_request_footer { 94 + u8 unk_00[12]; 95 + __le32 unk_0c; 96 + u8 unk_10[12]; 97 + __le32 unk_1c; 98 + __le64 timestamp; 99 + u8 unk_28[12]; 100 + __le32 unk_34; 101 + u8 unk_38[20]; 102 + __le32 unk_4c; 103 + } __packed; 104 + 105 + struct appletbdrm_fb_request { 106 + struct appletbdrm_msg_request_header header; 107 + __le16 unk_10; 108 + u8 msg_id; 109 + u8 unk_13[29]; 110 + /* 111 + * Contents of `data`: 112 + * - struct appletbdrm_frame frames[]; 113 + * - struct appletbdrm_fb_request_footer footer; 114 + * - padding to make the total size a multiple of 16 115 + */ 116 + u8 data[]; 117 + } __packed; 118 + 119 + struct appletbdrm_fb_request_response { 120 + struct appletbdrm_msg_response_header header; 121 + u8 unk_14[12]; 122 + __le64 timestamp; 123 + } __packed; 124 + 125 + struct appletbdrm_device { 126 + struct device *dmadev; 127 + 128 + unsigned int in_ep; 129 + unsigned int out_ep; 130 + 131 + unsigned int width; 132 + unsigned int height; 133 + 134 + struct drm_device drm; 135 + struct drm_display_mode mode; 136 + struct drm_connector connector; 137 + struct drm_plane primary_plane; 138 + struct drm_crtc crtc; 139 + struct drm_encoder encoder; 140 + }; 141 + 142 + struct appletbdrm_plane_state { 143 + struct drm_shadow_plane_state base; 144 + struct appletbdrm_fb_request *request; 145 + struct appletbdrm_fb_request_response *response; 146 + size_t request_size; 147 + size_t frames_size; 148 + }; 149 + 150 + static inline struct appletbdrm_plane_state *to_appletbdrm_plane_state(struct drm_plane_state *state) 151 + { 152 + return container_of(state, struct appletbdrm_plane_state, base.base); 153 + } 154 + 155 + static int appletbdrm_send_request(struct appletbdrm_device *adev, 156 + struct appletbdrm_msg_request_header *request, size_t size) 157 + { 158 + struct usb_device *udev = adev_to_udev(adev); 159 + struct drm_device *drm = &adev->drm; 160 + int ret, actual_size; 161 + 162 + ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, adev->out_ep), 163 + request, size, &actual_size, APPLETBDRM_BULK_MSG_TIMEOUT); 164 + if (ret) { 165 + drm_err(drm, "Failed to send message (%d)\n", ret); 166 + return ret; 167 + } 168 + 169 + if (actual_size != size) { 170 + drm_err(drm, "Actual size (%d) doesn't match expected size (%zu)\n", 171 + actual_size, size); 172 + return -EIO; 173 + } 174 + 175 + return 0; 176 + } 177 + 178 + static int appletbdrm_read_response(struct appletbdrm_device *adev, 179 + struct appletbdrm_msg_response_header *response, 180 + size_t size, __le32 expected_response) 181 + { 182 + struct usb_device *udev = adev_to_udev(adev); 183 + struct drm_device *drm = &adev->drm; 184 + int ret, actual_size; 185 + bool readiness_signal_received = false; 186 + 187 + retry: 188 + ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, adev->in_ep), 189 + response, size, &actual_size, APPLETBDRM_BULK_MSG_TIMEOUT); 190 + if (ret) { 191 + drm_err(drm, "Failed to read response (%d)\n", ret); 192 + return ret; 193 + } 194 + 195 + /* 196 + * The device responds to the first request sent in a particular 197 + * timeframe after the USB device configuration is set with a readiness 198 + * signal, in which case the response should be read again 199 + */ 200 + if (response->msg == APPLETBDRM_MSG_SIGNAL_READINESS) { 201 + if (!readiness_signal_received) { 202 + readiness_signal_received = true; 203 + goto retry; 204 + } 205 + 206 + drm_err(drm, "Encountered unexpected readiness signal\n"); 207 + return -EINTR; 208 + } 209 + 210 + if (actual_size != size) { 211 + drm_err(drm, "Actual size (%d) doesn't match expected size (%zu)\n", 212 + actual_size, size); 213 + return -EBADMSG; 214 + } 215 + 216 + if (response->msg != expected_response) { 217 + drm_err(drm, "Unexpected response from device (expected %p4cc found %p4cc)\n", 218 + &expected_response, &response->msg); 219 + return -EIO; 220 + } 221 + 222 + return 0; 223 + } 224 + 225 + static int appletbdrm_send_msg(struct appletbdrm_device *adev, __le32 msg) 226 + { 227 + struct appletbdrm_msg_simple_request *request; 228 + int ret; 229 + 230 + request = kzalloc(sizeof(*request), GFP_KERNEL); 231 + if (!request) 232 + return -ENOMEM; 233 + 234 + request->header.unk_00 = cpu_to_le16(2); 235 + request->header.unk_02 = cpu_to_le16(0x1512); 236 + request->header.size = cpu_to_le32(sizeof(*request) - sizeof(request->header)); 237 + request->msg = msg; 238 + request->size = request->header.size; 239 + 240 + ret = appletbdrm_send_request(adev, &request->header, sizeof(*request)); 241 + 242 + kfree(request); 243 + 244 + return ret; 245 + } 246 + 247 + static int appletbdrm_clear_display(struct appletbdrm_device *adev) 248 + { 249 + return appletbdrm_send_msg(adev, APPLETBDRM_MSG_CLEAR_DISPLAY); 250 + } 251 + 252 + static int appletbdrm_signal_readiness(struct appletbdrm_device *adev) 253 + { 254 + return appletbdrm_send_msg(adev, APPLETBDRM_MSG_SIGNAL_READINESS); 255 + } 256 + 257 + static int appletbdrm_get_information(struct appletbdrm_device *adev) 258 + { 259 + struct appletbdrm_msg_information *info; 260 + struct drm_device *drm = &adev->drm; 261 + u8 bits_per_pixel; 262 + __le32 pixel_format; 263 + int ret; 264 + 265 + info = kzalloc(sizeof(*info), GFP_KERNEL); 266 + if (!info) 267 + return -ENOMEM; 268 + 269 + ret = appletbdrm_send_msg(adev, APPLETBDRM_MSG_GET_INFORMATION); 270 + if (ret) 271 + return ret; 272 + 273 + ret = appletbdrm_read_response(adev, &info->header, sizeof(*info), 274 + APPLETBDRM_MSG_GET_INFORMATION); 275 + if (ret) 276 + goto free_info; 277 + 278 + bits_per_pixel = info->bits_per_pixel; 279 + pixel_format = get_unaligned(&info->pixel_format); 280 + 281 + adev->width = get_unaligned_le32(&info->width); 282 + adev->height = get_unaligned_le32(&info->height); 283 + 284 + if (bits_per_pixel != APPLETBDRM_BITS_PER_PIXEL) { 285 + drm_err(drm, "Encountered unexpected bits per pixel value (%d)\n", bits_per_pixel); 286 + ret = -EINVAL; 287 + goto free_info; 288 + } 289 + 290 + if (pixel_format != APPLETBDRM_PIXEL_FORMAT) { 291 + drm_err(drm, "Encountered unknown pixel format (%p4cc)\n", &pixel_format); 292 + ret = -EINVAL; 293 + goto free_info; 294 + } 295 + 296 + free_info: 297 + kfree(info); 298 + 299 + return ret; 300 + } 301 + 302 + static u32 rect_size(struct drm_rect *rect) 303 + { 304 + return drm_rect_width(rect) * drm_rect_height(rect) * 305 + (BITS_TO_BYTES(APPLETBDRM_BITS_PER_PIXEL)); 306 + } 307 + 308 + static int appletbdrm_connector_helper_get_modes(struct drm_connector *connector) 309 + { 310 + struct appletbdrm_device *adev = drm_to_adev(connector->dev); 311 + 312 + return drm_connector_helper_get_modes_fixed(connector, &adev->mode); 313 + } 314 + 315 + static const u32 appletbdrm_primary_plane_formats[] = { 316 + DRM_FORMAT_BGR888, 317 + DRM_FORMAT_XRGB8888, /* emulated */ 318 + }; 319 + 320 + static int appletbdrm_primary_plane_helper_atomic_check(struct drm_plane *plane, 321 + struct drm_atomic_state *state) 322 + { 323 + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); 324 + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); 325 + struct drm_crtc *new_crtc = new_plane_state->crtc; 326 + struct drm_crtc_state *new_crtc_state = NULL; 327 + struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(new_plane_state); 328 + struct drm_atomic_helper_damage_iter iter; 329 + struct drm_rect damage; 330 + size_t frames_size = 0; 331 + size_t request_size; 332 + int ret; 333 + 334 + if (new_crtc) 335 + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); 336 + 337 + ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, 338 + DRM_PLANE_NO_SCALING, 339 + DRM_PLANE_NO_SCALING, 340 + false, false); 341 + if (ret) 342 + return ret; 343 + else if (!new_plane_state->visible) 344 + return 0; 345 + 346 + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, new_plane_state); 347 + drm_atomic_for_each_plane_damage(&iter, &damage) { 348 + frames_size += struct_size((struct appletbdrm_frame *)0, buf, rect_size(&damage)); 349 + } 350 + 351 + if (!frames_size) 352 + return 0; 353 + 354 + request_size = ALIGN(sizeof(struct appletbdrm_fb_request) + 355 + frames_size + 356 + sizeof(struct appletbdrm_fb_request_footer), 16); 357 + 358 + appletbdrm_state->request = kzalloc(request_size, GFP_KERNEL); 359 + 360 + if (!appletbdrm_state->request) 361 + return -ENOMEM; 362 + 363 + appletbdrm_state->response = kzalloc(sizeof(*appletbdrm_state->response), GFP_KERNEL); 364 + 365 + if (!appletbdrm_state->response) 366 + return -ENOMEM; 367 + 368 + appletbdrm_state->request_size = request_size; 369 + appletbdrm_state->frames_size = frames_size; 370 + 371 + return 0; 372 + } 373 + 374 + static int appletbdrm_flush_damage(struct appletbdrm_device *adev, 375 + struct drm_plane_state *old_state, 376 + struct drm_plane_state *state) 377 + { 378 + struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(state); 379 + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); 380 + struct appletbdrm_fb_request_response *response = appletbdrm_state->response; 381 + struct appletbdrm_fb_request_footer *footer; 382 + struct drm_atomic_helper_damage_iter iter; 383 + struct drm_framebuffer *fb = state->fb; 384 + struct appletbdrm_fb_request *request = appletbdrm_state->request; 385 + struct drm_device *drm = &adev->drm; 386 + struct appletbdrm_frame *frame; 387 + u64 timestamp = ktime_get_ns(); 388 + struct drm_rect damage; 389 + size_t frames_size = appletbdrm_state->frames_size; 390 + size_t request_size = appletbdrm_state->request_size; 391 + int ret; 392 + 393 + if (!frames_size) 394 + return 0; 395 + 396 + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); 397 + if (ret) { 398 + drm_err(drm, "Failed to start CPU framebuffer access (%d)\n", ret); 399 + goto end_fb_cpu_access; 400 + } 401 + 402 + request->header.unk_00 = cpu_to_le16(2); 403 + request->header.unk_02 = cpu_to_le16(0x12); 404 + request->header.unk_04 = cpu_to_le32(9); 405 + request->header.size = cpu_to_le32(request_size - sizeof(request->header)); 406 + request->unk_10 = cpu_to_le16(1); 407 + request->msg_id = timestamp; 408 + 409 + frame = (struct appletbdrm_frame *)request->data; 410 + 411 + drm_atomic_helper_damage_iter_init(&iter, old_state, state); 412 + drm_atomic_for_each_plane_damage(&iter, &damage) { 413 + struct drm_rect dst_clip = state->dst; 414 + struct iosys_map dst = IOSYS_MAP_INIT_VADDR(frame->buf); 415 + u32 buf_size = rect_size(&damage); 416 + 417 + if (!drm_rect_intersect(&dst_clip, &damage)) 418 + continue; 419 + 420 + /* 421 + * The coordinates need to be translated to the coordinate 422 + * system the device expects, see the comment in 423 + * appletbdrm_setup_mode_config 424 + */ 425 + frame->begin_x = cpu_to_le16(damage.y1); 426 + frame->begin_y = cpu_to_le16(adev->height - damage.x2); 427 + frame->width = cpu_to_le16(drm_rect_height(&damage)); 428 + frame->height = cpu_to_le16(drm_rect_width(&damage)); 429 + frame->buf_size = cpu_to_le32(buf_size); 430 + 431 + switch (fb->format->format) { 432 + case DRM_FORMAT_XRGB8888: 433 + drm_fb_xrgb8888_to_bgr888(&dst, NULL, &shadow_plane_state->data[0], fb, &damage, &shadow_plane_state->fmtcnv_state); 434 + break; 435 + default: 436 + drm_fb_memcpy(&dst, NULL, &shadow_plane_state->data[0], fb, &damage); 437 + break; 438 + } 439 + 440 + frame = (void *)frame + struct_size(frame, buf, buf_size); 441 + } 442 + 443 + footer = (struct appletbdrm_fb_request_footer *)&request->data[frames_size]; 444 + 445 + footer->unk_0c = cpu_to_le32(0xfffe); 446 + footer->unk_1c = cpu_to_le32(0x80001); 447 + footer->unk_34 = cpu_to_le32(0x80002); 448 + footer->unk_4c = cpu_to_le32(0xffff); 449 + footer->timestamp = cpu_to_le64(timestamp); 450 + 451 + ret = appletbdrm_send_request(adev, &request->header, request_size); 452 + if (ret) 453 + goto end_fb_cpu_access; 454 + 455 + ret = appletbdrm_read_response(adev, &response->header, sizeof(*response), 456 + APPLETBDRM_MSG_UPDATE_COMPLETE); 457 + if (ret) 458 + goto end_fb_cpu_access; 459 + 460 + if (response->timestamp != footer->timestamp) { 461 + drm_err(drm, "Response timestamp (%llu) doesn't match request timestamp (%llu)\n", 462 + le64_to_cpu(response->timestamp), timestamp); 463 + goto end_fb_cpu_access; 464 + } 465 + 466 + end_fb_cpu_access: 467 + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); 468 + 469 + return ret; 470 + } 471 + 472 + static void appletbdrm_primary_plane_helper_atomic_update(struct drm_plane *plane, 473 + struct drm_atomic_state *old_state) 474 + { 475 + struct appletbdrm_device *adev = drm_to_adev(plane->dev); 476 + struct drm_device *drm = plane->dev; 477 + struct drm_plane_state *plane_state = plane->state; 478 + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(old_state, plane); 479 + int idx; 480 + 481 + if (!drm_dev_enter(drm, &idx)) 482 + return; 483 + 484 + appletbdrm_flush_damage(adev, old_plane_state, plane_state); 485 + 486 + drm_dev_exit(idx); 487 + } 488 + 489 + static void appletbdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane, 490 + struct drm_atomic_state *state) 491 + { 492 + struct drm_device *dev = plane->dev; 493 + struct appletbdrm_device *adev = drm_to_adev(dev); 494 + int idx; 495 + 496 + if (!drm_dev_enter(dev, &idx)) 497 + return; 498 + 499 + appletbdrm_clear_display(adev); 500 + 501 + drm_dev_exit(idx); 502 + } 503 + 504 + static void appletbdrm_primary_plane_reset(struct drm_plane *plane) 505 + { 506 + struct appletbdrm_plane_state *appletbdrm_state; 507 + 508 + WARN_ON(plane->state); 509 + 510 + appletbdrm_state = kzalloc(sizeof(*appletbdrm_state), GFP_KERNEL); 511 + if (!appletbdrm_state) 512 + return; 513 + 514 + __drm_gem_reset_shadow_plane(plane, &appletbdrm_state->base); 515 + } 516 + 517 + static struct drm_plane_state *appletbdrm_primary_plane_duplicate_state(struct drm_plane *plane) 518 + { 519 + struct drm_shadow_plane_state *new_shadow_plane_state; 520 + struct appletbdrm_plane_state *appletbdrm_state; 521 + 522 + if (WARN_ON(!plane->state)) 523 + return NULL; 524 + 525 + appletbdrm_state = kzalloc(sizeof(*appletbdrm_state), GFP_KERNEL); 526 + if (!appletbdrm_state) 527 + return NULL; 528 + 529 + /* Request and response are not duplicated and are allocated in .atomic_check */ 530 + appletbdrm_state->request = NULL; 531 + appletbdrm_state->response = NULL; 532 + 533 + appletbdrm_state->request_size = 0; 534 + appletbdrm_state->frames_size = 0; 535 + 536 + new_shadow_plane_state = &appletbdrm_state->base; 537 + 538 + __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state); 539 + 540 + return &new_shadow_plane_state->base; 541 + } 542 + 543 + static void appletbdrm_primary_plane_destroy_state(struct drm_plane *plane, 544 + struct drm_plane_state *state) 545 + { 546 + struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(state); 547 + 548 + kfree(appletbdrm_state->request); 549 + kfree(appletbdrm_state->response); 550 + 551 + __drm_gem_destroy_shadow_plane_state(&appletbdrm_state->base); 552 + 553 + kfree(appletbdrm_state); 554 + } 555 + 556 + static const struct drm_plane_helper_funcs appletbdrm_primary_plane_helper_funcs = { 557 + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, 558 + .atomic_check = appletbdrm_primary_plane_helper_atomic_check, 559 + .atomic_update = appletbdrm_primary_plane_helper_atomic_update, 560 + .atomic_disable = appletbdrm_primary_plane_helper_atomic_disable, 561 + }; 562 + 563 + static const struct drm_plane_funcs appletbdrm_primary_plane_funcs = { 564 + .update_plane = drm_atomic_helper_update_plane, 565 + .disable_plane = drm_atomic_helper_disable_plane, 566 + .reset = appletbdrm_primary_plane_reset, 567 + .atomic_duplicate_state = appletbdrm_primary_plane_duplicate_state, 568 + .atomic_destroy_state = appletbdrm_primary_plane_destroy_state, 569 + .destroy = drm_plane_cleanup, 570 + }; 571 + 572 + static enum drm_mode_status appletbdrm_crtc_helper_mode_valid(struct drm_crtc *crtc, 573 + const struct drm_display_mode *mode) 574 + { 575 + struct appletbdrm_device *adev = drm_to_adev(crtc->dev); 576 + 577 + return drm_crtc_helper_mode_valid_fixed(crtc, mode, &adev->mode); 578 + } 579 + 580 + static const struct drm_mode_config_funcs appletbdrm_mode_config_funcs = { 581 + .fb_create = drm_gem_fb_create_with_dirty, 582 + .atomic_check = drm_atomic_helper_check, 583 + .atomic_commit = drm_atomic_helper_commit, 584 + }; 585 + 586 + static const struct drm_connector_funcs appletbdrm_connector_funcs = { 587 + .reset = drm_atomic_helper_connector_reset, 588 + .destroy = drm_connector_cleanup, 589 + .fill_modes = drm_helper_probe_single_connector_modes, 590 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 591 + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 592 + }; 593 + 594 + static const struct drm_connector_helper_funcs appletbdrm_connector_helper_funcs = { 595 + .get_modes = appletbdrm_connector_helper_get_modes, 596 + }; 597 + 598 + static const struct drm_crtc_helper_funcs appletbdrm_crtc_helper_funcs = { 599 + .mode_valid = appletbdrm_crtc_helper_mode_valid, 600 + }; 601 + 602 + static const struct drm_crtc_funcs appletbdrm_crtc_funcs = { 603 + .reset = drm_atomic_helper_crtc_reset, 604 + .destroy = drm_crtc_cleanup, 605 + .set_config = drm_atomic_helper_set_config, 606 + .page_flip = drm_atomic_helper_page_flip, 607 + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 608 + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 609 + }; 610 + 611 + static const struct drm_encoder_funcs appletbdrm_encoder_funcs = { 612 + .destroy = drm_encoder_cleanup, 613 + }; 614 + 615 + static struct drm_gem_object *appletbdrm_driver_gem_prime_import(struct drm_device *dev, 616 + struct dma_buf *dma_buf) 617 + { 618 + struct appletbdrm_device *adev = drm_to_adev(dev); 619 + 620 + if (!adev->dmadev) 621 + return ERR_PTR(-ENODEV); 622 + 623 + return drm_gem_prime_import_dev(dev, dma_buf, adev->dmadev); 624 + } 625 + 626 + DEFINE_DRM_GEM_FOPS(appletbdrm_drm_fops); 627 + 628 + static const struct drm_driver appletbdrm_drm_driver = { 629 + DRM_GEM_SHMEM_DRIVER_OPS, 630 + .gem_prime_import = appletbdrm_driver_gem_prime_import, 631 + .name = "appletbdrm", 632 + .desc = "Apple Touch Bar DRM Driver", 633 + .major = 1, 634 + .minor = 0, 635 + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, 636 + .fops = &appletbdrm_drm_fops, 637 + }; 638 + 639 + static int appletbdrm_setup_mode_config(struct appletbdrm_device *adev) 640 + { 641 + struct drm_connector *connector = &adev->connector; 642 + struct drm_plane *primary_plane; 643 + struct drm_crtc *crtc; 644 + struct drm_encoder *encoder; 645 + struct drm_device *drm = &adev->drm; 646 + int ret; 647 + 648 + ret = drmm_mode_config_init(drm); 649 + if (ret) { 650 + drm_err(drm, "Failed to initialize mode configuration\n"); 651 + return ret; 652 + } 653 + 654 + primary_plane = &adev->primary_plane; 655 + ret = drm_universal_plane_init(drm, primary_plane, 0, 656 + &appletbdrm_primary_plane_funcs, 657 + appletbdrm_primary_plane_formats, 658 + ARRAY_SIZE(appletbdrm_primary_plane_formats), 659 + NULL, 660 + DRM_PLANE_TYPE_PRIMARY, NULL); 661 + if (ret) { 662 + drm_err(drm, "Failed to initialize universal plane object\n"); 663 + return ret; 664 + } 665 + 666 + drm_plane_helper_add(primary_plane, &appletbdrm_primary_plane_helper_funcs); 667 + drm_plane_enable_fb_damage_clips(primary_plane); 668 + 669 + crtc = &adev->crtc; 670 + ret = drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, 671 + &appletbdrm_crtc_funcs, NULL); 672 + if (ret) { 673 + drm_err(drm, "Failed to initialize CRTC object\n"); 674 + return ret; 675 + } 676 + 677 + drm_crtc_helper_add(crtc, &appletbdrm_crtc_helper_funcs); 678 + 679 + encoder = &adev->encoder; 680 + ret = drm_encoder_init(drm, encoder, &appletbdrm_encoder_funcs, 681 + DRM_MODE_ENCODER_DAC, NULL); 682 + if (ret) { 683 + drm_err(drm, "Failed to initialize encoder\n"); 684 + return ret; 685 + } 686 + 687 + encoder->possible_crtcs = drm_crtc_mask(crtc); 688 + 689 + /* 690 + * The coordinate system used by the device is different from the 691 + * coordinate system of the framebuffer in that the x and y axes are 692 + * swapped, and that the y axis is inverted; so what the device reports 693 + * as the height is actually the width of the framebuffer and vice 694 + * versa. 695 + */ 696 + drm->mode_config.max_width = max(adev->height, DRM_SHADOW_PLANE_MAX_WIDTH); 697 + drm->mode_config.max_height = max(adev->width, DRM_SHADOW_PLANE_MAX_HEIGHT); 698 + drm->mode_config.preferred_depth = APPLETBDRM_BITS_PER_PIXEL; 699 + drm->mode_config.funcs = &appletbdrm_mode_config_funcs; 700 + 701 + adev->mode = (struct drm_display_mode) { 702 + DRM_MODE_INIT(60, adev->height, adev->width, 703 + DRM_MODE_RES_MM(adev->height, 218), 704 + DRM_MODE_RES_MM(adev->width, 218)) 705 + }; 706 + 707 + ret = drm_connector_init(drm, connector, 708 + &appletbdrm_connector_funcs, DRM_MODE_CONNECTOR_USB); 709 + if (ret) { 710 + drm_err(drm, "Failed to initialize connector\n"); 711 + return ret; 712 + } 713 + 714 + drm_connector_helper_add(connector, &appletbdrm_connector_helper_funcs); 715 + 716 + ret = drm_connector_set_panel_orientation(connector, 717 + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP); 718 + if (ret) { 719 + drm_err(drm, "Failed to set panel orientation\n"); 720 + return ret; 721 + } 722 + 723 + connector->display_info.non_desktop = true; 724 + ret = drm_object_property_set_value(&connector->base, 725 + drm->mode_config.non_desktop_property, true); 726 + if (ret) { 727 + drm_err(drm, "Failed to set non-desktop property\n"); 728 + return ret; 729 + } 730 + 731 + ret = drm_connector_attach_encoder(connector, encoder); 732 + 733 + if (ret) { 734 + drm_err(drm, "Failed to initialize simple display pipe\n"); 735 + return ret; 736 + } 737 + 738 + drm_mode_config_reset(drm); 739 + 740 + return 0; 741 + } 742 + 743 + static int appletbdrm_probe(struct usb_interface *intf, 744 + const struct usb_device_id *id) 745 + { 746 + struct usb_endpoint_descriptor *bulk_in, *bulk_out; 747 + struct device *dev = &intf->dev; 748 + struct appletbdrm_device *adev; 749 + struct drm_device *drm = NULL; 750 + int ret; 751 + 752 + ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL); 753 + if (ret) { 754 + drm_err(drm, "appletbdrm: Failed to find bulk endpoints\n"); 755 + return ret; 756 + } 757 + 758 + adev = devm_drm_dev_alloc(dev, &appletbdrm_drm_driver, struct appletbdrm_device, drm); 759 + if (IS_ERR(adev)) 760 + return PTR_ERR(adev); 761 + 762 + adev->in_ep = bulk_in->bEndpointAddress; 763 + adev->out_ep = bulk_out->bEndpointAddress; 764 + adev->dmadev = dev; 765 + 766 + drm = &adev->drm; 767 + 768 + usb_set_intfdata(intf, adev); 769 + 770 + ret = appletbdrm_get_information(adev); 771 + if (ret) { 772 + drm_err(drm, "Failed to get display information\n"); 773 + return ret; 774 + } 775 + 776 + ret = appletbdrm_signal_readiness(adev); 777 + if (ret) { 778 + drm_err(drm, "Failed to signal readiness\n"); 779 + return ret; 780 + } 781 + 782 + ret = appletbdrm_setup_mode_config(adev); 783 + if (ret) { 784 + drm_err(drm, "Failed to setup mode config\n"); 785 + return ret; 786 + } 787 + 788 + ret = drm_dev_register(drm, 0); 789 + if (ret) { 790 + drm_err(drm, "Failed to register DRM device\n"); 791 + return ret; 792 + } 793 + 794 + ret = appletbdrm_clear_display(adev); 795 + if (ret) { 796 + drm_err(drm, "Failed to clear display\n"); 797 + return ret; 798 + } 799 + 800 + return 0; 801 + } 802 + 803 + static void appletbdrm_disconnect(struct usb_interface *intf) 804 + { 805 + struct appletbdrm_device *adev = usb_get_intfdata(intf); 806 + struct drm_device *drm = &adev->drm; 807 + 808 + put_device(adev->dmadev); 809 + drm_dev_unplug(drm); 810 + drm_atomic_helper_shutdown(drm); 811 + } 812 + 813 + static void appletbdrm_shutdown(struct usb_interface *intf) 814 + { 815 + struct appletbdrm_device *adev = usb_get_intfdata(intf); 816 + 817 + /* 818 + * The framebuffer needs to be cleared on shutdown since its content 819 + * persists across boots 820 + */ 821 + drm_atomic_helper_shutdown(&adev->drm); 822 + } 823 + 824 + static const struct usb_device_id appletbdrm_usb_id_table[] = { 825 + { USB_DEVICE_INTERFACE_CLASS(0x05ac, 0x8302, USB_CLASS_AUDIO_VIDEO) }, 826 + {} 827 + }; 828 + MODULE_DEVICE_TABLE(usb, appletbdrm_usb_id_table); 829 + 830 + static struct usb_driver appletbdrm_usb_driver = { 831 + .name = "appletbdrm", 832 + .probe = appletbdrm_probe, 833 + .disconnect = appletbdrm_disconnect, 834 + .shutdown = appletbdrm_shutdown, 835 + .id_table = appletbdrm_usb_id_table, 836 + }; 837 + module_usb_driver(appletbdrm_usb_driver); 838 + 839 + MODULE_AUTHOR("Kerem Karabay <kekrby@gmail.com>"); 840 + MODULE_DESCRIPTION("Apple Touch Bar DRM Driver"); 841 + MODULE_LICENSE("GPL");
+1 -1
drivers/gpu/drm/ttm/Makefile
··· 4 4 5 5 ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ 6 6 ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \ 7 - ttm_device.o ttm_sys_manager.o 7 + ttm_device.o ttm_sys_manager.o ttm_backup.o 8 8 ttm-$(CONFIG_AGP) += ttm_agp_backend.o 9 9 10 10 obj-$(CONFIG_DRM_TTM) += ttm.o
+207
drivers/gpu/drm/ttm/ttm_backup.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2024 Intel Corporation 4 + */ 5 + 6 + #include <drm/ttm/ttm_backup.h> 7 + #include <linux/page-flags.h> 8 + #include <linux/swap.h> 9 + 10 + /* 11 + * Casting from randomized struct file * to struct ttm_backup * is fine since 12 + * struct ttm_backup is never defined nor dereferenced. 13 + */ 14 + static struct file *ttm_backup_to_file(struct ttm_backup *backup) 15 + { 16 + return (void *)backup; 17 + } 18 + 19 + static struct ttm_backup *ttm_file_to_backup(struct file *file) 20 + { 21 + return (void *)file; 22 + } 23 + 24 + /* 25 + * Need to map shmem indices to handle since a handle value 26 + * of 0 means error, following the swp_entry_t convention. 27 + */ 28 + static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx) 29 + { 30 + return (unsigned long)idx + 1; 31 + } 32 + 33 + static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle) 34 + { 35 + return handle - 1; 36 + } 37 + 38 + /** 39 + * ttm_backup_drop() - release memory associated with a handle 40 + * @backup: The struct backup pointer used to obtain the handle 41 + * @handle: The handle obtained from the @backup_page function. 42 + */ 43 + void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle) 44 + { 45 + loff_t start = ttm_backup_handle_to_shmem_idx(handle); 46 + 47 + start <<= PAGE_SHIFT; 48 + shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start, 49 + start + PAGE_SIZE - 1); 50 + } 51 + 52 + /** 53 + * ttm_backup_copy_page() - Copy the contents of a previously backed 54 + * up page 55 + * @backup: The struct backup pointer used to back up the page. 56 + * @dst: The struct page to copy into. 57 + * @handle: The handle returned when the page was backed up. 58 + * @intr: Try to perform waits interruptable or at least killable. 59 + * 60 + * Return: 0 on success, Negative error code on failure, notably 61 + * -EINTR if @intr was set to true and a signal is pending. 62 + */ 63 + int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst, 64 + pgoff_t handle, bool intr) 65 + { 66 + struct file *filp = ttm_backup_to_file(backup); 67 + struct address_space *mapping = filp->f_mapping; 68 + struct folio *from_folio; 69 + pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle); 70 + 71 + from_folio = shmem_read_folio(mapping, idx); 72 + if (IS_ERR(from_folio)) 73 + return PTR_ERR(from_folio); 74 + 75 + copy_highpage(dst, folio_file_page(from_folio, idx)); 76 + folio_put(from_folio); 77 + 78 + return 0; 79 + } 80 + 81 + /** 82 + * ttm_backup_backup_page() - Backup a page 83 + * @backup: The struct backup pointer to use. 84 + * @page: The page to back up. 85 + * @writeback: Whether to perform immediate writeback of the page. 86 + * This may have performance implications. 87 + * @idx: A unique integer for each page and each struct backup. 88 + * This allows the backup implementation to avoid managing 89 + * its address space separately. 90 + * @page_gfp: The gfp value used when the page was allocated. 91 + * This is used for accounting purposes. 92 + * @alloc_gfp: The gfp to be used when allocating memory. 93 + * 94 + * Context: If called from reclaim context, the caller needs to 95 + * assert that the shrinker gfp has __GFP_FS set, to avoid 96 + * deadlocking on lock_page(). If @writeback is set to true and 97 + * called from reclaim context, the caller also needs to assert 98 + * that the shrinker gfp has __GFP_IO set, since without it, 99 + * we're not allowed to start backup IO. 100 + * 101 + * Return: A handle on success. Negative error code on failure. 102 + * 103 + * Note: This function could be extended to back up a folio and 104 + * implementations would then split the folio internally if needed. 105 + * Drawback is that the caller would then have to keep track of 106 + * the folio size- and usage. 107 + */ 108 + s64 109 + ttm_backup_backup_page(struct ttm_backup *backup, struct page *page, 110 + bool writeback, pgoff_t idx, gfp_t page_gfp, 111 + gfp_t alloc_gfp) 112 + { 113 + struct file *filp = ttm_backup_to_file(backup); 114 + struct address_space *mapping = filp->f_mapping; 115 + unsigned long handle = 0; 116 + struct folio *to_folio; 117 + int ret; 118 + 119 + to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp); 120 + if (IS_ERR(to_folio)) 121 + return PTR_ERR(to_folio); 122 + 123 + folio_mark_accessed(to_folio); 124 + folio_lock(to_folio); 125 + folio_mark_dirty(to_folio); 126 + copy_highpage(folio_file_page(to_folio, idx), page); 127 + handle = ttm_backup_shmem_idx_to_handle(idx); 128 + 129 + if (writeback && !folio_mapped(to_folio) && 130 + folio_clear_dirty_for_io(to_folio)) { 131 + struct writeback_control wbc = { 132 + .sync_mode = WB_SYNC_NONE, 133 + .nr_to_write = SWAP_CLUSTER_MAX, 134 + .range_start = 0, 135 + .range_end = LLONG_MAX, 136 + .for_reclaim = 1, 137 + }; 138 + folio_set_reclaim(to_folio); 139 + ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc); 140 + if (!folio_test_writeback(to_folio)) 141 + folio_clear_reclaim(to_folio); 142 + /* 143 + * If writepage succeeds, it unlocks the folio. 144 + * writepage() errors are otherwise dropped, since writepage() 145 + * is only best effort here. 146 + */ 147 + if (ret) 148 + folio_unlock(to_folio); 149 + } else { 150 + folio_unlock(to_folio); 151 + } 152 + 153 + folio_put(to_folio); 154 + 155 + return handle; 156 + } 157 + 158 + /** 159 + * ttm_backup_fini() - Free the struct backup resources after last use. 160 + * @backup: Pointer to the struct backup whose resources to free. 161 + * 162 + * After a call to this function, it's illegal to use the @backup pointer. 163 + */ 164 + void ttm_backup_fini(struct ttm_backup *backup) 165 + { 166 + fput(ttm_backup_to_file(backup)); 167 + } 168 + 169 + /** 170 + * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space 171 + * left for backup. 172 + * 173 + * This function is intended also for driver use to indicate whether a 174 + * backup attempt is meaningful. 175 + * 176 + * Return: An approximate size of backup space available. 177 + */ 178 + u64 ttm_backup_bytes_avail(void) 179 + { 180 + /* 181 + * The idea behind backing up to shmem is that shmem objects may 182 + * eventually be swapped out. So no point swapping out if there 183 + * is no or low swap-space available. But the accuracy of this 184 + * number also depends on shmem actually swapping out backed-up 185 + * shmem objects without too much buffering. 186 + */ 187 + return (u64)get_nr_swap_pages() << PAGE_SHIFT; 188 + } 189 + EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail); 190 + 191 + /** 192 + * ttm_backup_shmem_create() - Create a shmem-based struct backup. 193 + * @size: The maximum size (in bytes) to back up. 194 + * 195 + * Create a backup utilizing shmem objects. 196 + * 197 + * Return: A pointer to a struct ttm_backup on success, 198 + * an error pointer on error. 199 + */ 200 + struct ttm_backup *ttm_backup_shmem_create(loff_t size) 201 + { 202 + struct file *filp; 203 + 204 + filp = shmem_file_setup("ttm shmem backup", size, 0); 205 + 206 + return ttm_file_to_backup(filp); 207 + }
+242 -5
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 28 28 /* 29 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 30 */ 31 - 31 + #include <linux/swap.h> 32 32 #include <linux/vmalloc.h> 33 33 34 34 #include <drm/ttm/ttm_bo.h> ··· 769 769 return ret; 770 770 } 771 771 772 - static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk, 772 + static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx, 773 773 struct ttm_buffer_object *bo, 774 774 bool *needs_unlock) 775 775 { 776 - struct ttm_operation_ctx *ctx = walk->ctx; 777 - 778 776 *needs_unlock = false; 779 777 780 778 if (dma_resv_trylock(bo->base.resv)) { ··· 875 877 * since if we do it the other way around, and the trylock fails, 876 878 * we need to drop the lru lock to put the bo. 877 879 */ 878 - if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock)) 880 + if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock)) 879 881 bo_locked = true; 880 882 else if (!walk->ticket || walk->ctx->no_wait_gpu || 881 883 walk->trylock_only) ··· 918 920 919 921 return progress; 920 922 } 923 + EXPORT_SYMBOL(ttm_lru_walk_for_evict); 924 + 925 + static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs) 926 + { 927 + struct ttm_buffer_object *bo = curs->bo; 928 + 929 + if (bo) { 930 + if (curs->needs_unlock) 931 + dma_resv_unlock(bo->base.resv); 932 + ttm_bo_put(bo); 933 + curs->bo = NULL; 934 + } 935 + } 936 + 937 + /** 938 + * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor 939 + * and clean up any iteration it was used for. 940 + * @curs: The cursor. 941 + */ 942 + void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs) 943 + { 944 + spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; 945 + 946 + ttm_bo_lru_cursor_cleanup_bo(curs); 947 + spin_lock(lru_lock); 948 + ttm_resource_cursor_fini(&curs->res_curs); 949 + spin_unlock(lru_lock); 950 + } 951 + EXPORT_SYMBOL(ttm_bo_lru_cursor_fini); 952 + 953 + /** 954 + * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor 955 + * @curs: The ttm_bo_lru_cursor to initialize. 956 + * @man: The ttm resource_manager whose LRU lists to iterate over. 957 + * @ctx: The ttm_operation_ctx to govern the locking. 958 + * 959 + * Initialize a struct ttm_bo_lru_cursor. Currently only trylocking 960 + * or prelocked buffer objects are available as detailed by 961 + * @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not 962 + * supported. 963 + * 964 + * Return: Pointer to @curs. The function does not fail. 965 + */ 966 + struct ttm_bo_lru_cursor * 967 + ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs, 968 + struct ttm_resource_manager *man, 969 + struct ttm_operation_ctx *ctx) 970 + { 971 + memset(curs, 0, sizeof(*curs)); 972 + ttm_resource_cursor_init(&curs->res_curs, man); 973 + curs->ctx = ctx; 974 + 975 + return curs; 976 + } 977 + EXPORT_SYMBOL(ttm_bo_lru_cursor_init); 978 + 979 + static struct ttm_buffer_object * 980 + ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs) 981 + { 982 + struct ttm_buffer_object *bo = res->bo; 983 + 984 + if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock)) 985 + return NULL; 986 + 987 + if (!ttm_bo_get_unless_zero(bo)) { 988 + if (curs->needs_unlock) 989 + dma_resv_unlock(bo->base.resv); 990 + return NULL; 991 + } 992 + 993 + curs->bo = bo; 994 + return bo; 995 + } 996 + 997 + /** 998 + * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists 999 + * to find and lock buffer object. 1000 + * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and 1001 + * ttm_bo_lru_cursor_first(). 1002 + * 1003 + * Return: A pointer to a locked and reference-counted buffer object, 1004 + * or NULL if none could be found and looping should be terminated. 1005 + */ 1006 + struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs) 1007 + { 1008 + spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; 1009 + struct ttm_resource *res = NULL; 1010 + struct ttm_buffer_object *bo; 1011 + 1012 + ttm_bo_lru_cursor_cleanup_bo(curs); 1013 + 1014 + spin_lock(lru_lock); 1015 + for (;;) { 1016 + res = ttm_resource_manager_next(&curs->res_curs); 1017 + if (!res) 1018 + break; 1019 + 1020 + bo = ttm_bo_from_res_reserved(res, curs); 1021 + if (bo) 1022 + break; 1023 + } 1024 + 1025 + spin_unlock(lru_lock); 1026 + return res ? bo : NULL; 1027 + } 1028 + EXPORT_SYMBOL(ttm_bo_lru_cursor_next); 1029 + 1030 + /** 1031 + * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists 1032 + * to find and lock buffer object. 1033 + * @curs: The cursor initialized using ttm_bo_lru_cursor_init(). 1034 + * 1035 + * Return: A pointer to a locked and reference-counted buffer object, 1036 + * or NULL if none could be found and looping should be terminated. 1037 + */ 1038 + struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs) 1039 + { 1040 + spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; 1041 + struct ttm_buffer_object *bo; 1042 + struct ttm_resource *res; 1043 + 1044 + spin_lock(lru_lock); 1045 + res = ttm_resource_manager_first(&curs->res_curs); 1046 + if (!res) { 1047 + spin_unlock(lru_lock); 1048 + return NULL; 1049 + } 1050 + 1051 + bo = ttm_bo_from_res_reserved(res, curs); 1052 + spin_unlock(lru_lock); 1053 + 1054 + return bo ? bo : ttm_bo_lru_cursor_next(curs); 1055 + } 1056 + EXPORT_SYMBOL(ttm_bo_lru_cursor_first); 1057 + 1058 + /** 1059 + * ttm_bo_shrink() - Helper to shrink a ttm buffer object. 1060 + * @ctx: The struct ttm_operation_ctx used for the shrinking operation. 1061 + * @bo: The buffer object. 1062 + * @flags: Flags governing the shrinking behaviour. 1063 + * 1064 + * The function uses the ttm_tt_back_up functionality to back up or 1065 + * purge a struct ttm_tt. If the bo is not in system, it's first 1066 + * moved there. 1067 + * 1068 + * Return: The number of pages shrunken or purged, or 1069 + * negative error code on failure. 1070 + */ 1071 + long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, 1072 + const struct ttm_bo_shrink_flags flags) 1073 + { 1074 + static const struct ttm_place sys_placement_flags = { 1075 + .fpfn = 0, 1076 + .lpfn = 0, 1077 + .mem_type = TTM_PL_SYSTEM, 1078 + .flags = 0, 1079 + }; 1080 + static struct ttm_placement sys_placement = { 1081 + .num_placement = 1, 1082 + .placement = &sys_placement_flags, 1083 + }; 1084 + struct ttm_tt *tt = bo->ttm; 1085 + long lret; 1086 + 1087 + dma_resv_assert_held(bo->base.resv); 1088 + 1089 + if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) { 1090 + int ret = ttm_bo_validate(bo, &sys_placement, ctx); 1091 + 1092 + /* Consider -ENOMEM and -ENOSPC non-fatal. */ 1093 + if (ret) { 1094 + if (ret == -ENOMEM || ret == -ENOSPC) 1095 + ret = -EBUSY; 1096 + return ret; 1097 + } 1098 + } 1099 + 1100 + ttm_bo_unmap_virtual(bo); 1101 + lret = ttm_bo_wait_ctx(bo, ctx); 1102 + if (lret < 0) 1103 + return lret; 1104 + 1105 + if (bo->bulk_move) { 1106 + spin_lock(&bo->bdev->lru_lock); 1107 + ttm_resource_del_bulk_move(bo->resource, bo); 1108 + spin_unlock(&bo->bdev->lru_lock); 1109 + } 1110 + 1111 + lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags) 1112 + {.purge = flags.purge, 1113 + .writeback = flags.writeback}); 1114 + 1115 + if (lret <= 0 && bo->bulk_move) { 1116 + spin_lock(&bo->bdev->lru_lock); 1117 + ttm_resource_add_bulk_move(bo->resource, bo); 1118 + spin_unlock(&bo->bdev->lru_lock); 1119 + } 1120 + 1121 + if (lret < 0 && lret != -EINTR) 1122 + return -EBUSY; 1123 + 1124 + return lret; 1125 + } 1126 + EXPORT_SYMBOL(ttm_bo_shrink); 1127 + 1128 + /** 1129 + * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking 1130 + * @ctx: The struct ttm_operation_ctx governing the shrinking. 1131 + * @bo: The candidate for shrinking. 1132 + * 1133 + * Check whether the object, given the information available to TTM, 1134 + * is suitable for shinking, This function can and should be used 1135 + * before attempting to shrink an object. 1136 + * 1137 + * Return: true if suitable. false if not. 1138 + */ 1139 + bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) 1140 + { 1141 + return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count && 1142 + (!ctx->no_wait_gpu || 1143 + dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)); 1144 + } 1145 + EXPORT_SYMBOL(ttm_bo_shrink_suitable); 1146 + 1147 + /** 1148 + * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU 1149 + * during shrinking 1150 + * 1151 + * In some situations, like direct reclaim, waiting (in particular gpu waiting) 1152 + * should be avoided since it may stall a system that could otherwise make progress 1153 + * shrinking something else less time consuming. 1154 + * 1155 + * Return: true if gpu waiting should be avoided, false if not. 1156 + */ 1157 + bool ttm_bo_shrink_avoid_wait(void) 1158 + { 1159 + return !current_is_kswapd(); 1160 + } 1161 + EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait);
+525 -52
drivers/gpu/drm/ttm/ttm_pool.c
··· 41 41 #include <asm/set_memory.h> 42 42 #endif 43 43 44 + #include <drm/ttm/ttm_backup.h> 44 45 #include <drm/ttm/ttm_pool.h> 45 46 #include <drm/ttm/ttm_tt.h> 46 47 #include <drm/ttm/ttm_bo.h> 47 48 48 49 #include "ttm_module.h" 50 + 51 + #ifdef CONFIG_FAULT_INJECTION 52 + #include <linux/fault-inject.h> 53 + static DECLARE_FAULT_ATTR(backup_fault_inject); 54 + #else 55 + #define should_fail(...) false 56 + #endif 49 57 50 58 /** 51 59 * struct ttm_pool_dma - Helper object for coherent DMA mappings ··· 81 73 dma_addr_t *dma_addr; 82 74 pgoff_t remaining_pages; 83 75 enum ttm_caching tt_caching; 76 + }; 77 + 78 + /** 79 + * struct ttm_pool_tt_restore - State representing restore from backup 80 + * @pool: The pool used for page allocation while restoring. 81 + * @snapshot_alloc: A snapshot of the most recent struct ttm_pool_alloc_state. 82 + * @alloced_page: Pointer to the page most recently allocated from a pool or system. 83 + * @first_dma: The dma address corresponding to @alloced_page if dma_mapping 84 + * is requested. 85 + * @alloced_pages: The number of allocated pages present in the struct ttm_tt 86 + * page vector from this restore session. 87 + * @restored_pages: The number of 4K pages restored for @alloced_page (which 88 + * is typically a multi-order page). 89 + * @page_caching: The struct ttm_tt requested caching 90 + * @order: The order of @alloced_page. 91 + * 92 + * Recovery from backup might fail when we've recovered less than the 93 + * full ttm_tt. In order not to loose any data (yet), keep information 94 + * around that allows us to restart a failed ttm backup recovery. 95 + */ 96 + struct ttm_pool_tt_restore { 97 + struct ttm_pool *pool; 98 + struct ttm_pool_alloc_state snapshot_alloc; 99 + struct page *alloced_page; 100 + dma_addr_t first_dma; 101 + pgoff_t alloced_pages; 102 + pgoff_t restored_pages; 103 + enum ttm_caching page_caching; 104 + unsigned int order; 84 105 }; 85 106 86 107 static unsigned long page_pool_size; ··· 236 199 return 0; 237 200 } 238 201 239 - /* Map pages of 1 << order size and fill the DMA address array */ 202 + /* DMA Map pages of 1 << order size and return the resulting dma_address. */ 240 203 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, 241 - struct page *p, dma_addr_t **dma_addr) 204 + struct page *p, dma_addr_t *dma_addr) 242 205 { 243 206 dma_addr_t addr; 244 - unsigned int i; 245 207 246 208 if (pool->use_dma_alloc) { 247 209 struct ttm_pool_dma *dma = (void *)p->private; ··· 254 218 return -EFAULT; 255 219 } 256 220 257 - for (i = 1 << order; i ; --i) { 258 - *(*dma_addr)++ = addr; 259 - addr += PAGE_SIZE; 260 - } 221 + *dma_addr = addr; 261 222 262 223 return 0; 263 224 } ··· 405 372 } 406 373 407 374 /* 375 + * Split larger pages so that we can free each PAGE_SIZE page as soon 376 + * as it has been backed up, in order to avoid memory pressure during 377 + * reclaim. 378 + */ 379 + static void ttm_pool_split_for_swap(struct ttm_pool *pool, struct page *p) 380 + { 381 + unsigned int order = ttm_pool_page_order(pool, p); 382 + pgoff_t nr; 383 + 384 + if (!order) 385 + return; 386 + 387 + split_page(p, order); 388 + nr = 1UL << order; 389 + while (nr--) 390 + (p++)->private = 0; 391 + } 392 + 393 + /** 394 + * DOC: Partial backup and restoration of a struct ttm_tt. 395 + * 396 + * Swapout using ttm_backup_backup_page() and swapin using 397 + * ttm_backup_copy_page() may fail. 398 + * The former most likely due to lack of swap-space or memory, the latter due 399 + * to lack of memory or because of signal interruption during waits. 400 + * 401 + * Backup failure is easily handled by using a ttm_tt pages vector that holds 402 + * both backup handles and page pointers. This has to be taken into account when 403 + * restoring such a ttm_tt from backup, and when freeing it while backed up. 404 + * When restoring, for simplicity, new pages are actually allocated from the 405 + * pool and the contents of any old pages are copied in and then the old pages 406 + * are released. 407 + * 408 + * For restoration failures, the struct ttm_pool_tt_restore holds sufficient state 409 + * to be able to resume an interrupted restore, and that structure is freed once 410 + * the restoration is complete. If the struct ttm_tt is destroyed while there 411 + * is a valid struct ttm_pool_tt_restore attached, that is also properly taken 412 + * care of. 413 + */ 414 + 415 + /* Is restore ongoing for the currently allocated page? */ 416 + static bool ttm_pool_restore_valid(const struct ttm_pool_tt_restore *restore) 417 + { 418 + return restore && restore->restored_pages < (1 << restore->order); 419 + } 420 + 421 + /* DMA unmap and free a multi-order page, either to the relevant pool or to system. */ 422 + static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool *pool, struct page *page, 423 + const dma_addr_t *dma_addr, enum ttm_caching caching) 424 + { 425 + struct ttm_pool_type *pt = NULL; 426 + unsigned int order; 427 + pgoff_t nr; 428 + 429 + if (pool) { 430 + order = ttm_pool_page_order(pool, page); 431 + nr = (1UL << order); 432 + if (dma_addr) 433 + ttm_pool_unmap(pool, *dma_addr, nr); 434 + 435 + pt = ttm_pool_select_type(pool, caching, order); 436 + } else { 437 + order = page->private; 438 + nr = (1UL << order); 439 + } 440 + 441 + if (pt) 442 + ttm_pool_type_give(pt, page); 443 + else 444 + ttm_pool_free_page(pool, caching, order, page); 445 + 446 + return nr; 447 + } 448 + 449 + /* Populate the page-array using the most recent allocated multi-order page. */ 450 + static void ttm_pool_allocated_page_commit(struct page *allocated, 451 + dma_addr_t first_dma, 452 + struct ttm_pool_alloc_state *alloc, 453 + pgoff_t nr) 454 + { 455 + pgoff_t i; 456 + 457 + for (i = 0; i < nr; ++i) 458 + *alloc->pages++ = allocated++; 459 + 460 + alloc->remaining_pages -= nr; 461 + 462 + if (!alloc->dma_addr) 463 + return; 464 + 465 + for (i = 0; i < nr; ++i) { 466 + *alloc->dma_addr++ = first_dma; 467 + first_dma += PAGE_SIZE; 468 + } 469 + } 470 + 471 + /* 472 + * When restoring, restore backed-up content to the newly allocated page and 473 + * if successful, populate the page-table and dma-address arrays. 474 + */ 475 + static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore, 476 + struct ttm_backup *backup, 477 + const struct ttm_operation_ctx *ctx, 478 + struct ttm_pool_alloc_state *alloc) 479 + 480 + { 481 + pgoff_t i, nr = 1UL << restore->order; 482 + struct page **first_page = alloc->pages; 483 + struct page *p; 484 + int ret = 0; 485 + 486 + for (i = restore->restored_pages; i < nr; ++i) { 487 + p = first_page[i]; 488 + if (ttm_backup_page_ptr_is_handle(p)) { 489 + unsigned long handle = ttm_backup_page_ptr_to_handle(p); 490 + 491 + if (IS_ENABLED(CONFIG_FAULT_INJECTION) && ctx->interruptible && 492 + should_fail(&backup_fault_inject, 1)) { 493 + ret = -EINTR; 494 + break; 495 + } 496 + 497 + if (handle == 0) { 498 + restore->restored_pages++; 499 + continue; 500 + } 501 + 502 + ret = ttm_backup_copy_page(backup, restore->alloced_page + i, 503 + handle, ctx->interruptible); 504 + if (ret) 505 + break; 506 + 507 + ttm_backup_drop(backup, handle); 508 + } else if (p) { 509 + /* 510 + * We could probably avoid splitting the old page 511 + * using clever logic, but ATM we don't care, as 512 + * we prioritize releasing memory ASAP. Note that 513 + * here, the old retained page is always write-back 514 + * cached. 515 + */ 516 + ttm_pool_split_for_swap(restore->pool, p); 517 + copy_highpage(restore->alloced_page + i, p); 518 + __free_pages(p, 0); 519 + } 520 + 521 + restore->restored_pages++; 522 + first_page[i] = ttm_backup_handle_to_page_ptr(0); 523 + } 524 + 525 + if (ret) { 526 + if (!restore->restored_pages) { 527 + dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL; 528 + 529 + ttm_pool_unmap_and_free(restore->pool, restore->alloced_page, 530 + dma_addr, restore->page_caching); 531 + restore->restored_pages = nr; 532 + } 533 + return ret; 534 + } 535 + 536 + ttm_pool_allocated_page_commit(restore->alloced_page, restore->first_dma, 537 + alloc, nr); 538 + if (restore->page_caching == alloc->tt_caching || PageHighMem(restore->alloced_page)) 539 + alloc->caching_divide = alloc->pages; 540 + restore->snapshot_alloc = *alloc; 541 + restore->alloced_pages += nr; 542 + 543 + return 0; 544 + } 545 + 546 + /* If restoring, save information needed for ttm_pool_restore_commit(). */ 547 + static void 548 + ttm_pool_page_allocated_restore(struct ttm_pool *pool, unsigned int order, 549 + struct page *p, 550 + enum ttm_caching page_caching, 551 + dma_addr_t first_dma, 552 + struct ttm_pool_tt_restore *restore, 553 + const struct ttm_pool_alloc_state *alloc) 554 + { 555 + restore->pool = pool; 556 + restore->order = order; 557 + restore->restored_pages = 0; 558 + restore->page_caching = page_caching; 559 + restore->first_dma = first_dma; 560 + restore->alloced_page = p; 561 + restore->snapshot_alloc = *alloc; 562 + } 563 + 564 + /* 408 565 * Called when we got a page, either from a pool or newly allocated. 409 566 * if needed, dma map the page and populate the dma address array. 410 567 * Populate the page address array. ··· 603 380 */ 604 381 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, 605 382 struct page *p, enum ttm_caching page_caching, 606 - struct ttm_pool_alloc_state *alloc) 383 + struct ttm_pool_alloc_state *alloc, 384 + struct ttm_pool_tt_restore *restore) 607 385 { 608 - pgoff_t i, nr = 1UL << order; 609 386 bool caching_consistent; 387 + dma_addr_t first_dma; 610 388 int r = 0; 611 389 612 390 caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p); ··· 619 395 } 620 396 621 397 if (alloc->dma_addr) { 622 - r = ttm_pool_map(pool, order, p, &alloc->dma_addr); 398 + r = ttm_pool_map(pool, order, p, &first_dma); 623 399 if (r) 624 400 return r; 625 401 } 626 402 627 - alloc->remaining_pages -= nr; 628 - for (i = 0; i < nr; ++i) 629 - *alloc->pages++ = p++; 403 + if (restore) { 404 + ttm_pool_page_allocated_restore(pool, order, p, page_caching, 405 + first_dma, restore, alloc); 406 + } else { 407 + ttm_pool_allocated_page_commit(p, first_dma, alloc, 1UL << order); 630 408 631 - if (caching_consistent) 632 - alloc->caching_divide = alloc->pages; 409 + if (caching_consistent) 410 + alloc->caching_divide = alloc->pages; 411 + } 633 412 634 413 return 0; 635 414 } ··· 655 428 pgoff_t start_page, pgoff_t end_page) 656 429 { 657 430 struct page **pages = &tt->pages[start_page]; 658 - unsigned int order; 431 + struct ttm_backup *backup = tt->backup; 659 432 pgoff_t i, nr; 660 433 661 434 for (i = start_page; i < end_page; i += nr, pages += nr) { 662 - struct ttm_pool_type *pt = NULL; 435 + struct page *p = *pages; 663 436 664 - order = ttm_pool_page_order(pool, *pages); 665 - nr = (1UL << order); 666 - if (tt->dma_address) 667 - ttm_pool_unmap(pool, tt->dma_address[i], nr); 437 + nr = 1; 438 + if (ttm_backup_page_ptr_is_handle(p)) { 439 + unsigned long handle = ttm_backup_page_ptr_to_handle(p); 668 440 669 - pt = ttm_pool_select_type(pool, caching, order); 670 - if (pt) 671 - ttm_pool_type_give(pt, *pages); 672 - else 673 - ttm_pool_free_page(pool, caching, order, *pages); 441 + if (handle != 0) 442 + ttm_backup_drop(backup, handle); 443 + } else if (p) { 444 + dma_addr_t *dma_addr = tt->dma_address ? 445 + tt->dma_address + i : NULL; 446 + 447 + nr = ttm_pool_unmap_and_free(pool, p, dma_addr, caching); 448 + } 674 449 } 675 450 } 676 451 ··· 696 467 return min_t(unsigned int, highest, __fls(alloc->remaining_pages)); 697 468 } 698 469 699 - /** 700 - * ttm_pool_alloc - Fill a ttm_tt object 701 - * 702 - * @pool: ttm_pool to use 703 - * @tt: ttm_tt object to fill 704 - * @ctx: operation context 705 - * 706 - * Fill the ttm_tt object with pages and also make sure to DMA map them when 707 - * necessary. 708 - * 709 - * Returns: 0 on successe, negative error code otherwise. 710 - */ 711 - int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, 712 - struct ttm_operation_ctx *ctx) 470 + static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, 471 + const struct ttm_operation_ctx *ctx, 472 + struct ttm_pool_alloc_state *alloc, 473 + struct ttm_pool_tt_restore *restore) 713 474 { 714 - struct ttm_pool_alloc_state alloc; 715 475 enum ttm_caching page_caching; 716 476 gfp_t gfp_flags = GFP_USER; 717 477 pgoff_t caching_divide; ··· 709 491 struct page *p; 710 492 int r; 711 493 712 - ttm_pool_alloc_state_init(tt, &alloc); 713 - 714 - WARN_ON(!alloc.remaining_pages || ttm_tt_is_populated(tt)); 715 - WARN_ON(alloc.dma_addr && !pool->dev); 494 + WARN_ON(!alloc->remaining_pages || ttm_tt_is_populated(tt)); 495 + WARN_ON(alloc->dma_addr && !pool->dev); 716 496 717 497 if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC) 718 498 gfp_flags |= __GFP_ZERO; ··· 725 509 726 510 page_caching = tt->caching; 727 511 allow_pools = true; 728 - for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, &alloc); 729 - alloc.remaining_pages; 730 - order = ttm_pool_alloc_find_order(order, &alloc)) { 512 + for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc); 513 + alloc->remaining_pages; 514 + order = ttm_pool_alloc_find_order(order, alloc)) { 731 515 struct ttm_pool_type *pt; 732 516 733 517 /* First, try to allocate a page from a pool if one exists. */ ··· 757 541 r = -ENOMEM; 758 542 goto error_free_all; 759 543 } 760 - r = ttm_pool_page_allocated(pool, order, p, page_caching, &alloc); 544 + r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc, 545 + restore); 761 546 if (r) 762 547 goto error_free_page; 548 + 549 + if (ttm_pool_restore_valid(restore)) { 550 + r = ttm_pool_restore_commit(restore, tt->backup, ctx, alloc); 551 + if (r) 552 + goto error_free_all; 553 + } 763 554 } 764 555 765 - r = ttm_pool_apply_caching(&alloc); 556 + r = ttm_pool_apply_caching(alloc); 766 557 if (r) 767 558 goto error_free_all; 559 + 560 + kfree(tt->restore); 561 + tt->restore = NULL; 768 562 769 563 return 0; 770 564 ··· 782 556 ttm_pool_free_page(pool, page_caching, order, p); 783 557 784 558 error_free_all: 785 - caching_divide = alloc.caching_divide - tt->pages; 559 + if (tt->restore) 560 + return r; 561 + 562 + caching_divide = alloc->caching_divide - tt->pages; 786 563 ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide); 787 564 ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, 788 - tt->num_pages - alloc.remaining_pages); 565 + tt->num_pages - alloc->remaining_pages); 789 566 790 567 return r; 791 568 } 569 + 570 + /** 571 + * ttm_pool_alloc - Fill a ttm_tt object 572 + * 573 + * @pool: ttm_pool to use 574 + * @tt: ttm_tt object to fill 575 + * @ctx: operation context 576 + * 577 + * Fill the ttm_tt object with pages and also make sure to DMA map them when 578 + * necessary. 579 + * 580 + * Returns: 0 on successe, negative error code otherwise. 581 + */ 582 + int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, 583 + struct ttm_operation_ctx *ctx) 584 + { 585 + struct ttm_pool_alloc_state alloc; 586 + 587 + if (WARN_ON(ttm_tt_is_backed_up(tt))) 588 + return -EINVAL; 589 + 590 + ttm_pool_alloc_state_init(tt, &alloc); 591 + 592 + return __ttm_pool_alloc(pool, tt, ctx, &alloc, NULL); 593 + } 792 594 EXPORT_SYMBOL(ttm_pool_alloc); 595 + 596 + /** 597 + * ttm_pool_restore_and_alloc - Fill a ttm_tt, restoring previously backed-up 598 + * content. 599 + * 600 + * @pool: ttm_pool to use 601 + * @tt: ttm_tt object to fill 602 + * @ctx: operation context 603 + * 604 + * Fill the ttm_tt object with pages and also make sure to DMA map them when 605 + * necessary. Read in backed-up content. 606 + * 607 + * Returns: 0 on successe, negative error code otherwise. 608 + */ 609 + int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt, 610 + const struct ttm_operation_ctx *ctx) 611 + { 612 + struct ttm_pool_alloc_state alloc; 613 + 614 + if (WARN_ON(!ttm_tt_is_backed_up(tt))) 615 + return -EINVAL; 616 + 617 + if (!tt->restore) { 618 + gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 619 + 620 + ttm_pool_alloc_state_init(tt, &alloc); 621 + if (ctx->gfp_retry_mayfail) 622 + gfp |= __GFP_RETRY_MAYFAIL; 623 + 624 + tt->restore = kzalloc(sizeof(*tt->restore), gfp); 625 + if (!tt->restore) 626 + return -ENOMEM; 627 + 628 + tt->restore->snapshot_alloc = alloc; 629 + tt->restore->pool = pool; 630 + tt->restore->restored_pages = 1; 631 + } else { 632 + struct ttm_pool_tt_restore *restore = tt->restore; 633 + int ret; 634 + 635 + alloc = restore->snapshot_alloc; 636 + if (ttm_pool_restore_valid(tt->restore)) { 637 + ret = ttm_pool_restore_commit(restore, tt->backup, ctx, &alloc); 638 + if (ret) 639 + return ret; 640 + } 641 + if (!alloc.remaining_pages) 642 + return 0; 643 + } 644 + 645 + return __ttm_pool_alloc(pool, tt, ctx, &alloc, tt->restore); 646 + } 793 647 794 648 /** 795 649 * ttm_pool_free - Free the backing pages from a ttm_tt object ··· 887 581 ttm_pool_shrink(); 888 582 } 889 583 EXPORT_SYMBOL(ttm_pool_free); 584 + 585 + /** 586 + * ttm_pool_drop_backed_up() - Release content of a swapped-out struct ttm_tt 587 + * @tt: The struct ttm_tt. 588 + * 589 + * Release handles with associated content or any remaining pages of 590 + * a backed-up struct ttm_tt. 591 + */ 592 + void ttm_pool_drop_backed_up(struct ttm_tt *tt) 593 + { 594 + struct ttm_pool_tt_restore *restore; 595 + pgoff_t start_page = 0; 596 + 597 + WARN_ON(!ttm_tt_is_backed_up(tt)); 598 + 599 + restore = tt->restore; 600 + 601 + /* 602 + * Unmap and free any uncommitted restore page. 603 + * any tt page-array backup entries already read back has 604 + * been cleared already 605 + */ 606 + if (ttm_pool_restore_valid(restore)) { 607 + dma_addr_t *dma_addr = tt->dma_address ? &restore->first_dma : NULL; 608 + 609 + ttm_pool_unmap_and_free(restore->pool, restore->alloced_page, 610 + dma_addr, restore->page_caching); 611 + restore->restored_pages = 1UL << restore->order; 612 + } 613 + 614 + /* 615 + * If a restore is ongoing, part of the tt pages may have a 616 + * caching different than writeback. 617 + */ 618 + if (restore) { 619 + pgoff_t mid = restore->snapshot_alloc.caching_divide - tt->pages; 620 + 621 + start_page = restore->alloced_pages; 622 + WARN_ON(mid > start_page); 623 + /* Pages that might be dma-mapped and non-cached */ 624 + ttm_pool_free_range(restore->pool, tt, tt->caching, 625 + 0, mid); 626 + /* Pages that might be dma-mapped but cached */ 627 + ttm_pool_free_range(restore->pool, tt, ttm_cached, 628 + mid, restore->alloced_pages); 629 + kfree(restore); 630 + tt->restore = NULL; 631 + } 632 + 633 + ttm_pool_free_range(NULL, tt, ttm_cached, start_page, tt->num_pages); 634 + } 635 + 636 + /** 637 + * ttm_pool_backup() - Back up or purge a struct ttm_tt 638 + * @pool: The pool used when allocating the struct ttm_tt. 639 + * @tt: The struct ttm_tt. 640 + * @flags: Flags to govern the backup behaviour. 641 + * 642 + * Back up or purge a struct ttm_tt. If @purge is true, then 643 + * all pages will be freed directly to the system rather than to the pool 644 + * they were allocated from, making the function behave similarly to 645 + * ttm_pool_free(). If @purge is false the pages will be backed up instead, 646 + * exchanged for handles. 647 + * A subsequent call to ttm_pool_restore_and_alloc() will then read back the content and 648 + * a subsequent call to ttm_pool_drop_backed_up() will drop it. 649 + * If backup of a page fails for whatever reason, @ttm will still be 650 + * partially backed up, retaining those pages for which backup fails. 651 + * In that case, this function can be retried, possibly after freeing up 652 + * memory resources. 653 + * 654 + * Return: Number of pages actually backed up or freed, or negative 655 + * error code on error. 656 + */ 657 + long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt, 658 + const struct ttm_backup_flags *flags) 659 + { 660 + struct ttm_backup *backup = tt->backup; 661 + struct page *page; 662 + unsigned long handle; 663 + gfp_t alloc_gfp; 664 + gfp_t gfp; 665 + int ret = 0; 666 + pgoff_t shrunken = 0; 667 + pgoff_t i, num_pages; 668 + 669 + if (WARN_ON(ttm_tt_is_backed_up(tt))) 670 + return -EINVAL; 671 + 672 + if ((!ttm_backup_bytes_avail() && !flags->purge) || 673 + pool->use_dma_alloc || ttm_tt_is_backed_up(tt)) 674 + return -EBUSY; 675 + 676 + #ifdef CONFIG_X86 677 + /* Anything returned to the system needs to be cached. */ 678 + if (tt->caching != ttm_cached) 679 + set_pages_array_wb(tt->pages, tt->num_pages); 680 + #endif 681 + 682 + if (tt->dma_address || flags->purge) { 683 + for (i = 0; i < tt->num_pages; i += num_pages) { 684 + unsigned int order; 685 + 686 + page = tt->pages[i]; 687 + if (unlikely(!page)) { 688 + num_pages = 1; 689 + continue; 690 + } 691 + 692 + order = ttm_pool_page_order(pool, page); 693 + num_pages = 1UL << order; 694 + if (tt->dma_address) 695 + ttm_pool_unmap(pool, tt->dma_address[i], 696 + num_pages); 697 + if (flags->purge) { 698 + shrunken += num_pages; 699 + page->private = 0; 700 + __free_pages(page, order); 701 + memset(tt->pages + i, 0, 702 + num_pages * sizeof(*tt->pages)); 703 + } 704 + } 705 + } 706 + 707 + if (flags->purge) 708 + return shrunken; 709 + 710 + if (pool->use_dma32) 711 + gfp = GFP_DMA32; 712 + else 713 + gfp = GFP_HIGHUSER; 714 + 715 + alloc_gfp = GFP_KERNEL | __GFP_HIGH | __GFP_NOWARN | __GFP_RETRY_MAYFAIL; 716 + 717 + num_pages = tt->num_pages; 718 + 719 + /* Pretend doing fault injection by shrinking only half of the pages. */ 720 + if (IS_ENABLED(CONFIG_FAULT_INJECTION) && should_fail(&backup_fault_inject, 1)) 721 + num_pages = DIV_ROUND_UP(num_pages, 2); 722 + 723 + for (i = 0; i < num_pages; ++i) { 724 + s64 shandle; 725 + 726 + page = tt->pages[i]; 727 + if (unlikely(!page)) 728 + continue; 729 + 730 + ttm_pool_split_for_swap(pool, page); 731 + 732 + shandle = ttm_backup_backup_page(backup, page, flags->writeback, i, 733 + gfp, alloc_gfp); 734 + if (shandle < 0) { 735 + /* We allow partially shrunken tts */ 736 + ret = shandle; 737 + break; 738 + } 739 + handle = shandle; 740 + tt->pages[i] = ttm_backup_handle_to_page_ptr(handle); 741 + put_page(page); 742 + shrunken++; 743 + } 744 + 745 + return shrunken ? shrunken : ret; 746 + } 890 747 891 748 /** 892 749 * ttm_pool_init - Initialize a pool ··· 1312 843 &ttm_pool_debugfs_globals_fops); 1313 844 debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL, 1314 845 &ttm_pool_debugfs_shrink_fops); 846 + #ifdef CONFIG_FAULT_INJECTION 847 + fault_create_debugfs_attr("backup_fault_inject", ttm_debugfs_root, 848 + &backup_fault_inject); 849 + #endif 1315 850 #endif 1316 851 1317 852 mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
+83
drivers/gpu/drm/ttm/ttm_tt.c
··· 40 40 #include <drm/drm_cache.h> 41 41 #include <drm/drm_device.h> 42 42 #include <drm/drm_util.h> 43 + #include <drm/ttm/ttm_backup.h> 43 44 #include <drm/ttm/ttm_bo.h> 44 45 #include <drm/ttm/ttm_tt.h> 45 46 ··· 159 158 ttm->swap_storage = NULL; 160 159 ttm->sg = bo->sg; 161 160 ttm->caching = caching; 161 + ttm->restore = NULL; 162 + ttm->backup = NULL; 162 163 } 163 164 164 165 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, ··· 184 181 if (ttm->swap_storage) 185 182 fput(ttm->swap_storage); 186 183 ttm->swap_storage = NULL; 184 + 185 + if (ttm_tt_is_backed_up(ttm)) 186 + ttm_pool_drop_backed_up(ttm); 187 + if (ttm->backup) { 188 + ttm_backup_fini(ttm->backup); 189 + ttm->backup = NULL; 190 + } 187 191 188 192 if (ttm->pages) 189 193 kvfree(ttm->pages); ··· 262 252 return ret; 263 253 } 264 254 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin); 255 + 256 + /** 257 + * ttm_tt_backup() - Helper to back up a struct ttm_tt. 258 + * @bdev: The TTM device. 259 + * @tt: The struct ttm_tt. 260 + * @flags: Flags that govern the backup behaviour. 261 + * 262 + * Update the page accounting and call ttm_pool_shrink_tt to free pages 263 + * or back them up. 264 + * 265 + * Return: Number of pages freed or swapped out, or negative error code on 266 + * error. 267 + */ 268 + long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt, 269 + const struct ttm_backup_flags flags) 270 + { 271 + long ret; 272 + 273 + if (WARN_ON(IS_ERR_OR_NULL(tt->backup))) 274 + return 0; 275 + 276 + ret = ttm_pool_backup(&bdev->pool, tt, &flags); 277 + if (ret > 0) { 278 + tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED; 279 + tt->page_flags |= TTM_TT_FLAG_BACKED_UP; 280 + } 281 + 282 + return ret; 283 + } 284 + 285 + int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt, 286 + const struct ttm_operation_ctx *ctx) 287 + { 288 + int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx); 289 + 290 + if (ret) 291 + return ret; 292 + 293 + tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP; 294 + 295 + return 0; 296 + } 297 + EXPORT_SYMBOL(ttm_tt_restore); 265 298 266 299 /** 267 300 * ttm_tt_swapout - swap out tt object ··· 401 348 goto error; 402 349 403 350 ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED; 351 + ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP; 404 352 if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) { 405 353 ret = ttm_tt_swapin(ttm); 406 354 if (unlikely(ret != 0)) { ··· 531 477 return ttm_pages_limit; 532 478 } 533 479 EXPORT_SYMBOL(ttm_tt_pages_limit); 480 + 481 + /** 482 + * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt 483 + * @tt: The ttm_tt for wich to allocate and assign a backup structure. 484 + * 485 + * Assign a backup structure to be used for tt backup. This should 486 + * typically be done at bo creation, to avoid allocations at shrinking 487 + * time. 488 + * 489 + * Return: 0 on success, negative error code on failure. 490 + */ 491 + int ttm_tt_setup_backup(struct ttm_tt *tt) 492 + { 493 + struct ttm_backup *backup = 494 + ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT); 495 + 496 + if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))) 497 + return -EINVAL; 498 + 499 + if (IS_ERR(backup)) 500 + return PTR_ERR(backup); 501 + 502 + if (tt->backup) 503 + ttm_backup_fini(tt->backup); 504 + 505 + tt->backup = backup; 506 + return 0; 507 + } 508 + EXPORT_SYMBOL(ttm_tt_setup_backup);
-37
drivers/gpu/drm/vboxvideo/hgsmi_base.c
··· 181 181 182 182 return rc; 183 183 } 184 - 185 - /** 186 - * hgsmi_cursor_position - Report the guest cursor position. The host may 187 - * wish to use this information to re-position its 188 - * own cursor (though this is currently unlikely). 189 - * The current host cursor position is returned. 190 - * Return: 0 or negative errno value. 191 - * @ctx: The context containing the heap used. 192 - * @report_position: Are we reporting a position? 193 - * @x: Guest cursor X position. 194 - * @y: Guest cursor Y position. 195 - * @x_host: Host cursor X position is stored here. Optional. 196 - * @y_host: Host cursor Y position is stored here. Optional. 197 - */ 198 - int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position, 199 - u32 x, u32 y, u32 *x_host, u32 *y_host) 200 - { 201 - struct vbva_cursor_position *p; 202 - 203 - p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, 204 - VBVA_CURSOR_POSITION); 205 - if (!p) 206 - return -ENOMEM; 207 - 208 - p->report_position = report_position; 209 - p->x = x; 210 - p->y = y; 211 - 212 - hgsmi_buffer_submit(ctx, p); 213 - 214 - *x_host = p->x; 215 - *y_host = p->y; 216 - 217 - hgsmi_buffer_free(ctx, p); 218 - 219 - return 0; 220 - }
-2
drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
··· 34 34 int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags, 35 35 u32 hot_x, u32 hot_y, u32 width, u32 height, 36 36 u8 *pixels, u32 len); 37 - int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position, 38 - u32 x, u32 y, u32 *x_host, u32 *y_host); 39 37 40 38 bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, 41 39 struct vbva_buffer *vbva, s32 screen);
+27 -19
drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
··· 724 724 static int vc4_pv_muxing_test_init(struct kunit *test) 725 725 { 726 726 const struct pv_muxing_param *params = test->param_value; 727 - struct drm_modeset_acquire_ctx *ctx; 727 + struct drm_modeset_acquire_ctx ctx; 728 728 struct pv_muxing_priv *priv; 729 729 struct drm_device *drm; 730 730 struct vc4_dev *vc4; ··· 737 737 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); 738 738 priv->vc4 = vc4; 739 739 740 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 741 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 740 + drm_modeset_acquire_init(&ctx, 0); 742 741 743 742 drm = &vc4->base; 744 - priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 743 + priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 745 744 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state); 745 + 746 + drm_modeset_drop_locks(&ctx); 747 + drm_modeset_acquire_fini(&ctx); 746 748 747 749 return 0; 748 750 } ··· 784 782 */ 785 783 static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *test) 786 784 { 787 - struct drm_modeset_acquire_ctx *ctx; 785 + struct drm_modeset_acquire_ctx ctx; 788 786 struct drm_atomic_state *state; 789 787 struct vc4_crtc_state *new_vc4_crtc_state; 790 788 struct vc4_hvs_state *new_hvs_state; ··· 797 795 vc4 = vc5_mock_device(test); 798 796 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); 799 797 800 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 801 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 798 + drm_modeset_acquire_init(&ctx, 0); 802 799 803 800 drm = &vc4->base; 804 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 801 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 805 802 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 806 803 807 804 ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); ··· 823 822 ret = drm_atomic_helper_swap_state(state, false); 824 823 KUNIT_ASSERT_EQ(test, ret, 0); 825 824 826 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 825 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 827 826 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 828 827 829 828 ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1); ··· 844 843 KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use); 845 844 846 845 KUNIT_EXPECT_NE(test, hdmi0_channel, hdmi1_channel); 846 + 847 + drm_modeset_drop_locks(&ctx); 848 + drm_modeset_acquire_fini(&ctx); 847 849 } 848 850 849 851 /* ··· 858 854 */ 859 855 static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) 860 856 { 861 - struct drm_modeset_acquire_ctx *ctx; 857 + struct drm_modeset_acquire_ctx ctx; 862 858 struct drm_atomic_state *state; 863 859 struct vc4_crtc_state *new_vc4_crtc_state; 864 860 struct vc4_hvs_state *new_hvs_state; ··· 871 867 vc4 = vc5_mock_device(test); 872 868 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); 873 869 874 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 875 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 870 + drm_modeset_acquire_init(&ctx, 0); 876 871 877 872 drm = &vc4->base; 878 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 873 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 879 874 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 880 875 881 876 ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); ··· 908 905 ret = drm_atomic_helper_swap_state(state, false); 909 906 KUNIT_ASSERT_EQ(test, ret, 0); 910 907 911 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 908 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 912 909 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 913 910 914 911 ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0); ··· 932 929 933 930 KUNIT_EXPECT_EQ(test, old_hdmi1_channel, hdmi1_channel); 934 931 } 932 + 933 + drm_modeset_drop_locks(&ctx); 934 + drm_modeset_acquire_fini(&ctx); 935 935 } 936 936 937 937 /* ··· 955 949 static void 956 950 drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct kunit *test) 957 951 { 958 - struct drm_modeset_acquire_ctx *ctx; 952 + struct drm_modeset_acquire_ctx ctx; 959 953 struct drm_atomic_state *state; 960 954 struct vc4_crtc_state *new_vc4_crtc_state; 961 955 struct drm_device *drm; ··· 965 959 vc4 = vc5_mock_device(test); 966 960 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); 967 961 968 - ctx = drm_kunit_helper_acquire_ctx_alloc(test); 969 - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 962 + drm_modeset_acquire_init(&ctx, 0); 970 963 971 964 drm = &vc4->base; 972 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 965 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 973 966 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 974 967 975 968 ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); ··· 980 975 ret = drm_atomic_helper_swap_state(state, false); 981 976 KUNIT_ASSERT_EQ(test, ret, 0); 982 977 983 - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); 978 + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); 984 979 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); 985 980 986 981 ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1); ··· 992 987 new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, 993 988 VC4_ENCODER_TYPE_HDMI0); 994 989 KUNIT_EXPECT_NULL(test, new_vc4_crtc_state); 990 + 991 + drm_modeset_drop_locks(&ctx); 992 + drm_modeset_acquire_fini(&ctx); 995 993 } 996 994 997 995 static struct kunit_case vc5_pv_muxing_bugs_tests[] = {
+14 -14
drivers/gpu/drm/vc4/vc4_hdmi.c
··· 2928 2928 2929 2929 vc4_hdmi->hdmicore_regs = devm_platform_ioremap_resource_byname(pdev, 2930 2930 "hdmi"); 2931 - if (!vc4_hdmi->hdmicore_regs) 2932 - return -ENOMEM; 2931 + if (IS_ERR(vc4_hdmi->hdmicore_regs)) 2932 + return PTR_ERR(vc4_hdmi->hdmicore_regs); 2933 2933 2934 2934 /* This is shared between both HDMI controllers. Cannot 2935 2935 * claim for both instances. Lets not convert to using ··· 2946 2946 2947 2947 vc4_hdmi->cec_regs = devm_platform_ioremap_resource_byname(pdev, 2948 2948 "cec"); 2949 - if (!vc4_hdmi->cec_regs) 2950 - return -ENOMEM; 2949 + if (IS_ERR(vc4_hdmi->cec_regs)) 2950 + return PTR_ERR(vc4_hdmi->cec_regs); 2951 2951 2952 2952 vc4_hdmi->csc_regs = devm_platform_ioremap_resource_byname(pdev, 2953 2953 "csc"); 2954 - if (!vc4_hdmi->csc_regs) 2955 - return -ENOMEM; 2954 + if (IS_ERR(vc4_hdmi->csc_regs)) 2955 + return PTR_ERR(vc4_hdmi->csc_regs); 2956 2956 2957 2957 vc4_hdmi->dvp_regs = devm_platform_ioremap_resource_byname(pdev, 2958 2958 "dvp"); 2959 - if (!vc4_hdmi->dvp_regs) 2960 - return -ENOMEM; 2959 + if (IS_ERR(vc4_hdmi->dvp_regs)) 2960 + return PTR_ERR(vc4_hdmi->dvp_regs); 2961 2961 2962 2962 vc4_hdmi->phy_regs = devm_platform_ioremap_resource_byname(pdev, 2963 2963 "phy"); 2964 2964 2965 - if (!vc4_hdmi->phy_regs) 2966 - return -ENOMEM; 2965 + if (IS_ERR(vc4_hdmi->phy_regs)) 2966 + return PTR_ERR(vc4_hdmi->phy_regs); 2967 2967 2968 2968 vc4_hdmi->ram_regs = devm_platform_ioremap_resource_byname(pdev, 2969 2969 "packet"); 2970 - if (!vc4_hdmi->ram_regs) 2971 - return -ENOMEM; 2970 + if (IS_ERR(vc4_hdmi->ram_regs)) 2971 + return PTR_ERR(vc4_hdmi->ram_regs); 2972 2972 2973 2973 vc4_hdmi->rm_regs = devm_platform_ioremap_resource_byname(pdev, "rm"); 2974 - if (!vc4_hdmi->rm_regs) 2975 - return -ENOMEM; 2974 + if (IS_ERR(vc4_hdmi->rm_regs)) 2975 + return PTR_ERR(vc4_hdmi->rm_regs); 2976 2976 2977 2977 vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi"); 2978 2978 if (IS_ERR(vc4_hdmi->hsm_clock)) {
+1
drivers/gpu/drm/xe/Makefile
··· 98 98 xe_rtp.o \ 99 99 xe_sa.o \ 100 100 xe_sched_job.o \ 101 + xe_shrinker.o \ 101 102 xe_step.o \ 102 103 xe_survivability_mode.o \ 103 104 xe_sync.o \
+5 -1
drivers/gpu/drm/xe/tests/xe_bo.c
··· 514 514 * other way around, they may not be subject to swapping... 515 515 */ 516 516 if (alloced < purgeable) { 517 + xe_ttm_tt_account_subtract(&xe_tt->ttm); 517 518 xe_tt->purgeable = true; 519 + xe_ttm_tt_account_add(&xe_tt->ttm); 518 520 bo->ttm.priority = 0; 521 + spin_lock(&bo->ttm.bdev->lru_lock); 522 + ttm_bo_move_to_lru_tail(&bo->ttm); 523 + spin_unlock(&bo->ttm.bdev->lru_lock); 519 524 } else { 520 525 int ret = shrink_test_fill_random(bo, &prng, link); 521 526 ··· 575 570 if (ret == -EINTR) 576 571 intr = true; 577 572 } while (ret == -EINTR && !signal_pending(current)); 578 - 579 573 if (!ret && !purgeable) 580 574 failed = shrink_test_verify(test, bo, count, &prng, link); 581 575
+185 -17
drivers/gpu/drm/xe/xe_bo.c
··· 11 11 #include <drm/drm_drv.h> 12 12 #include <drm/drm_gem_ttm_helper.h> 13 13 #include <drm/drm_managed.h> 14 + #include <drm/ttm/ttm_backup.h> 14 15 #include <drm/ttm/ttm_device.h> 15 16 #include <drm/ttm/ttm_placement.h> 16 17 #include <drm/ttm/ttm_tt.h> ··· 30 29 #include "xe_preempt_fence.h" 31 30 #include "xe_pxp.h" 32 31 #include "xe_res_cursor.h" 32 + #include "xe_shrinker.h" 33 33 #include "xe_trace_bo.h" 34 34 #include "xe_ttm_stolen_mgr.h" 35 35 #include "xe_vm.h" ··· 309 307 } 310 308 } 311 309 310 + /* struct xe_ttm_tt - Subclassed ttm_tt for xe */ 312 311 struct xe_ttm_tt { 313 312 struct ttm_tt ttm; 314 - struct device *dev; 313 + /** @xe - The xe device */ 314 + struct xe_device *xe; 315 315 struct sg_table sgt; 316 316 struct sg_table *sg; 317 317 /** @purgeable: Whether the content of the pages of @ttm is purgeable. */ ··· 326 322 unsigned long num_pages = tt->num_pages; 327 323 int ret; 328 324 329 - XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL); 325 + XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && 326 + !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)); 330 327 331 328 if (xe_tt->sg) 332 329 return 0; ··· 335 330 ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, 336 331 num_pages, 0, 337 332 (u64)num_pages << PAGE_SHIFT, 338 - xe_sg_segment_size(xe_tt->dev), 333 + xe_sg_segment_size(xe_tt->xe->drm.dev), 339 334 GFP_KERNEL); 340 335 if (ret) 341 336 return ret; 342 337 343 338 xe_tt->sg = &xe_tt->sgt; 344 - ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL, 339 + ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, 345 340 DMA_ATTR_SKIP_CPU_SYNC); 346 341 if (ret) { 347 342 sg_free_table(xe_tt->sg); ··· 357 352 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 358 353 359 354 if (xe_tt->sg) { 360 - dma_unmap_sgtable(xe_tt->dev, xe_tt->sg, 355 + dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, 361 356 DMA_BIDIRECTIONAL, 0); 362 357 sg_free_table(xe_tt->sg); 363 358 xe_tt->sg = NULL; ··· 372 367 return xe_tt->sg; 373 368 } 374 369 370 + /* 371 + * Account ttm pages against the device shrinker's shrinkable and 372 + * purgeable counts. 373 + */ 374 + static void xe_ttm_tt_account_add(struct ttm_tt *tt) 375 + { 376 + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 377 + 378 + if (xe_tt->purgeable) 379 + xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages); 380 + else 381 + xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0); 382 + } 383 + 384 + static void xe_ttm_tt_account_subtract(struct ttm_tt *tt) 385 + { 386 + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 387 + 388 + if (xe_tt->purgeable) 389 + xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages); 390 + else 391 + xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0); 392 + } 393 + 375 394 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, 376 395 u32 page_flags) 377 396 { 378 397 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); 379 398 struct xe_device *xe = xe_bo_device(bo); 380 - struct xe_ttm_tt *tt; 399 + struct xe_ttm_tt *xe_tt; 400 + struct ttm_tt *tt; 381 401 unsigned long extra_pages; 382 402 enum ttm_caching caching = ttm_cached; 383 403 int err; 384 404 385 - tt = kzalloc(sizeof(*tt), GFP_KERNEL); 386 - if (!tt) 405 + xe_tt = kzalloc(sizeof(*xe_tt), GFP_KERNEL); 406 + if (!xe_tt) 387 407 return NULL; 388 408 389 - tt->dev = xe->drm.dev; 409 + tt = &xe_tt->ttm; 410 + xe_tt->xe = xe; 390 411 391 412 extra_pages = 0; 392 413 if (xe_bo_needs_ccs_pages(bo)) ··· 458 427 caching = ttm_uncached; 459 428 } 460 429 461 - err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages); 430 + if (ttm_bo->type != ttm_bo_type_sg) 431 + page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE; 432 + 433 + err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages); 462 434 if (err) { 463 - kfree(tt); 435 + kfree(xe_tt); 464 436 return NULL; 465 437 } 466 438 467 - return &tt->ttm; 439 + if (ttm_bo->type != ttm_bo_type_sg) { 440 + err = ttm_tt_setup_backup(tt); 441 + if (err) { 442 + ttm_tt_fini(tt); 443 + kfree(xe_tt); 444 + return NULL; 445 + } 446 + } 447 + 448 + return tt; 468 449 } 469 450 470 451 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt, 471 452 struct ttm_operation_ctx *ctx) 472 453 { 454 + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 473 455 int err; 474 456 475 457 /* 476 458 * dma-bufs are not populated with pages, and the dma- 477 459 * addresses are set up when moved to XE_PL_TT. 478 460 */ 479 - if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) 461 + if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && 462 + !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) 480 463 return 0; 481 464 482 - err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); 465 + if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) { 466 + err = ttm_tt_restore(ttm_dev, tt, ctx); 467 + } else { 468 + ttm_tt_clear_backed_up(tt); 469 + err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); 470 + } 483 471 if (err) 484 472 return err; 485 473 486 - return err; 474 + xe_tt->purgeable = false; 475 + xe_ttm_tt_account_add(tt); 476 + 477 + return 0; 487 478 } 488 479 489 480 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt) 490 481 { 491 - if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) 482 + if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && 483 + !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) 492 484 return; 493 485 494 486 xe_tt_unmap_sg(tt); 495 487 496 - return ttm_pool_free(&ttm_dev->pool, tt); 488 + ttm_pool_free(&ttm_dev->pool, tt); 489 + xe_ttm_tt_account_subtract(tt); 497 490 } 498 491 499 492 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt) ··· 965 910 } 966 911 967 912 return ret; 913 + } 914 + 915 + static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx, 916 + struct ttm_buffer_object *bo, 917 + unsigned long *scanned) 918 + { 919 + long lret; 920 + 921 + /* Fake move to system, without copying data. */ 922 + if (bo->resource->mem_type != XE_PL_SYSTEM) { 923 + struct ttm_resource *new_resource; 924 + 925 + lret = ttm_bo_wait_ctx(bo, ctx); 926 + if (lret) 927 + return lret; 928 + 929 + lret = ttm_bo_mem_space(bo, &sys_placement, &new_resource, ctx); 930 + if (lret) 931 + return lret; 932 + 933 + xe_tt_unmap_sg(bo->ttm); 934 + ttm_bo_move_null(bo, new_resource); 935 + } 936 + 937 + *scanned += bo->ttm->num_pages; 938 + lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags) 939 + {.purge = true, 940 + .writeback = false, 941 + .allow_move = false}); 942 + 943 + if (lret > 0) 944 + xe_ttm_tt_account_subtract(bo->ttm); 945 + 946 + return lret; 947 + } 948 + 949 + /** 950 + * xe_bo_shrink() - Try to shrink an xe bo. 951 + * @ctx: The struct ttm_operation_ctx used for shrinking. 952 + * @bo: The TTM buffer object whose pages to shrink. 953 + * @flags: Flags governing the shrink behaviour. 954 + * @scanned: Pointer to a counter of the number of pages 955 + * attempted to shrink. 956 + * 957 + * Try to shrink- or purge a bo, and if it succeeds, unmap dma. 958 + * Note that we need to be able to handle also non xe bos 959 + * (ghost bos), but only if the struct ttm_tt is embedded in 960 + * a struct xe_ttm_tt. When the function attempts to shrink 961 + * the pages of a buffer object, The value pointed to by @scanned 962 + * is updated. 963 + * 964 + * Return: The number of pages shrunken or purged, or negative error 965 + * code on failure. 966 + */ 967 + long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, 968 + const struct xe_bo_shrink_flags flags, 969 + unsigned long *scanned) 970 + { 971 + struct ttm_tt *tt = bo->ttm; 972 + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 973 + struct ttm_place place = {.mem_type = bo->resource->mem_type}; 974 + struct xe_bo *xe_bo = ttm_to_xe_bo(bo); 975 + struct xe_device *xe = xe_tt->xe; 976 + bool needs_rpm; 977 + long lret = 0L; 978 + 979 + if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) || 980 + (flags.purge && !xe_tt->purgeable)) 981 + return -EBUSY; 982 + 983 + if (!ttm_bo_eviction_valuable(bo, &place)) 984 + return -EBUSY; 985 + 986 + if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo)) 987 + return xe_bo_shrink_purge(ctx, bo, scanned); 988 + 989 + if (xe_tt->purgeable) { 990 + if (bo->resource->mem_type != XE_PL_SYSTEM) 991 + lret = xe_bo_move_notify(xe_bo, ctx); 992 + if (!lret) 993 + lret = xe_bo_shrink_purge(ctx, bo, scanned); 994 + goto out_unref; 995 + } 996 + 997 + /* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */ 998 + needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM && 999 + xe_bo_needs_ccs_pages(xe_bo)); 1000 + if (needs_rpm && !xe_pm_runtime_get_if_active(xe)) 1001 + goto out_unref; 1002 + 1003 + *scanned += tt->num_pages; 1004 + lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags) 1005 + {.purge = false, 1006 + .writeback = flags.writeback, 1007 + .allow_move = true}); 1008 + if (needs_rpm) 1009 + xe_pm_runtime_put(xe); 1010 + 1011 + if (lret > 0) 1012 + xe_ttm_tt_account_subtract(tt); 1013 + 1014 + out_unref: 1015 + xe_bo_put(xe_bo); 1016 + 1017 + return lret; 968 1018 } 969 1019 970 1020 /** ··· 2089 1929 } 2090 1930 2091 1931 ttm_bo_pin(&bo->ttm); 1932 + if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) 1933 + xe_ttm_tt_account_subtract(bo->ttm.ttm); 2092 1934 2093 1935 /* 2094 1936 * FIXME: If we always use the reserve / unreserve functions for locking ··· 2150 1988 } 2151 1989 2152 1990 ttm_bo_pin(&bo->ttm); 1991 + if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) 1992 + xe_ttm_tt_account_subtract(bo->ttm.ttm); 2153 1993 2154 1994 /* 2155 1995 * FIXME: If we always use the reserve / unreserve functions for locking ··· 2186 2022 spin_unlock(&xe->pinned.lock); 2187 2023 2188 2024 ttm_bo_unpin(&bo->ttm); 2025 + if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) 2026 + xe_ttm_tt_account_add(bo->ttm.ttm); 2189 2027 2190 2028 /* 2191 2029 * FIXME: If we always use the reserve / unreserve functions for locking ··· 2211 2045 spin_unlock(&xe->pinned.lock); 2212 2046 } 2213 2047 ttm_bo_unpin(&bo->ttm); 2048 + if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) 2049 + xe_ttm_tt_account_add(bo->ttm.ttm); 2214 2050 } 2215 2051 2216 2052 /**
+36
drivers/gpu/drm/xe/xe_bo.h
··· 148 148 149 149 void xe_bo_put(struct xe_bo *bo); 150 150 151 + /* 152 + * xe_bo_get_unless_zero() - Conditionally obtain a GEM object refcount on an 153 + * xe bo 154 + * @bo: The bo for which we want to obtain a refcount. 155 + * 156 + * There is a short window between where the bo's GEM object refcount reaches 157 + * zero and where we put the final ttm_bo reference. Code in the eviction- and 158 + * shrinking path should therefore attempt to grab a gem object reference before 159 + * trying to use members outside of the base class ttm object. This function is 160 + * intended for that purpose. On successful return, this function must be paired 161 + * with an xe_bo_put(). 162 + * 163 + * Return: @bo on success, NULL on failure. 164 + */ 165 + static inline __must_check struct xe_bo *xe_bo_get_unless_zero(struct xe_bo *bo) 166 + { 167 + if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount)) 168 + return NULL; 169 + 170 + return bo; 171 + } 172 + 151 173 static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo) 152 174 { 153 175 if (bo) ··· 370 348 */ 371 349 return round_down(max / 2, PAGE_SIZE); 372 350 } 351 + 352 + /** 353 + * struct xe_bo_shrink_flags - flags governing the shrink behaviour. 354 + * @purge: Only purging allowed. Don't shrink if bo not purgeable. 355 + * @writeback: Attempt to immediately move content to swap. 356 + */ 357 + struct xe_bo_shrink_flags { 358 + u32 purge : 1; 359 + u32 writeback : 1; 360 + }; 361 + 362 + long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, 363 + const struct xe_bo_shrink_flags flags, 364 + unsigned long *scanned); 373 365 374 366 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 375 367 /**
+8
drivers/gpu/drm/xe/xe_device.c
··· 52 52 #include "xe_pmu.h" 53 53 #include "xe_pxp.h" 54 54 #include "xe_query.h" 55 + #include "xe_shrinker.h" 55 56 #include "xe_sriov.h" 56 57 #include "xe_survivability_mode.h" 57 58 #include "xe_tile.h" ··· 404 403 if (xe->unordered_wq) 405 404 destroy_workqueue(xe->unordered_wq); 406 405 406 + if (!IS_ERR_OR_NULL(xe->mem.shrinker)) 407 + xe_shrinker_destroy(xe->mem.shrinker); 408 + 407 409 if (xe->destroy_wq) 408 410 destroy_workqueue(xe->destroy_wq); 409 411 ··· 438 434 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); 439 435 if (err) 440 436 goto err; 437 + 438 + xe->mem.shrinker = xe_shrinker_create(xe); 439 + if (IS_ERR(xe->mem.shrinker)) 440 + return ERR_CAST(xe->mem.shrinker); 441 441 442 442 xe->info.devid = pdev->device; 443 443 xe->info.revid = pdev->revision;
+2
drivers/gpu/drm/xe/xe_device_types.h
··· 372 372 struct xe_vram_region vram; 373 373 /** @mem.sys_mgr: system TTM manager */ 374 374 struct ttm_resource_manager sys_mgr; 375 + /** @mem.sys_mgr: system memory shrinker. */ 376 + struct xe_shrinker *shrinker; 375 377 } mem; 376 378 377 379 /** @sriov: device level virtualization data */
+258
drivers/gpu/drm/xe/xe_shrinker.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2024 Intel Corporation 4 + */ 5 + 6 + #include <linux/shrinker.h> 7 + 8 + #include <drm/ttm/ttm_backup.h> 9 + #include <drm/ttm/ttm_bo.h> 10 + #include <drm/ttm/ttm_tt.h> 11 + 12 + #include "xe_bo.h" 13 + #include "xe_pm.h" 14 + #include "xe_shrinker.h" 15 + 16 + /** 17 + * struct xe_shrinker - per-device shrinker 18 + * @xe: Back pointer to the device. 19 + * @lock: Lock protecting accounting. 20 + * @shrinkable_pages: Number of pages that are currently shrinkable. 21 + * @purgeable_pages: Number of pages that are currently purgeable. 22 + * @shrink: Pointer to the mm shrinker. 23 + * @pm_worker: Worker to wake up the device if required. 24 + */ 25 + struct xe_shrinker { 26 + struct xe_device *xe; 27 + rwlock_t lock; 28 + long shrinkable_pages; 29 + long purgeable_pages; 30 + struct shrinker *shrink; 31 + struct work_struct pm_worker; 32 + }; 33 + 34 + static struct xe_shrinker *to_xe_shrinker(struct shrinker *shrink) 35 + { 36 + return shrink->private_data; 37 + } 38 + 39 + /** 40 + * xe_shrinker_mod_pages() - Modify shrinker page accounting 41 + * @shrinker: Pointer to the struct xe_shrinker. 42 + * @shrinkable: Shrinkable pages delta. May be negative. 43 + * @purgeable: Purgeable page delta. May be negative. 44 + * 45 + * Modifies the shrinkable and purgeable pages accounting. 46 + */ 47 + void 48 + xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable) 49 + { 50 + write_lock(&shrinker->lock); 51 + shrinker->shrinkable_pages += shrinkable; 52 + shrinker->purgeable_pages += purgeable; 53 + write_unlock(&shrinker->lock); 54 + } 55 + 56 + static s64 xe_shrinker_walk(struct xe_device *xe, 57 + struct ttm_operation_ctx *ctx, 58 + const struct xe_bo_shrink_flags flags, 59 + unsigned long to_scan, unsigned long *scanned) 60 + { 61 + unsigned int mem_type; 62 + s64 freed = 0, lret; 63 + 64 + for (mem_type = XE_PL_SYSTEM; mem_type <= XE_PL_TT; ++mem_type) { 65 + struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type); 66 + struct ttm_bo_lru_cursor curs; 67 + struct ttm_buffer_object *ttm_bo; 68 + 69 + if (!man || !man->use_tt) 70 + continue; 71 + 72 + ttm_bo_lru_for_each_reserved_guarded(&curs, man, ctx, ttm_bo) { 73 + if (!ttm_bo_shrink_suitable(ttm_bo, ctx)) 74 + continue; 75 + 76 + lret = xe_bo_shrink(ctx, ttm_bo, flags, scanned); 77 + if (lret < 0) 78 + return lret; 79 + 80 + freed += lret; 81 + if (*scanned >= to_scan) 82 + break; 83 + } 84 + } 85 + 86 + return freed; 87 + } 88 + 89 + static unsigned long 90 + xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc) 91 + { 92 + struct xe_shrinker *shrinker = to_xe_shrinker(shrink); 93 + unsigned long num_pages; 94 + bool can_backup = !!(sc->gfp_mask & __GFP_FS); 95 + 96 + num_pages = ttm_backup_bytes_avail() >> PAGE_SHIFT; 97 + read_lock(&shrinker->lock); 98 + 99 + if (can_backup) 100 + num_pages = min_t(unsigned long, num_pages, shrinker->shrinkable_pages); 101 + else 102 + num_pages = 0; 103 + 104 + num_pages += shrinker->purgeable_pages; 105 + read_unlock(&shrinker->lock); 106 + 107 + return num_pages ? num_pages : SHRINK_EMPTY; 108 + } 109 + 110 + /* 111 + * Check if we need runtime pm, and if so try to grab a reference if 112 + * already active. If grabbing a reference fails, queue a worker that 113 + * does it for us outside of reclaim, but don't wait for it to complete. 114 + * If bo shrinking needs an rpm reference and we don't have it (yet), 115 + * that bo will be skipped anyway. 116 + */ 117 + static bool xe_shrinker_runtime_pm_get(struct xe_shrinker *shrinker, bool force, 118 + unsigned long nr_to_scan, bool can_backup) 119 + { 120 + struct xe_device *xe = shrinker->xe; 121 + 122 + if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe) || 123 + !ttm_backup_bytes_avail()) 124 + return false; 125 + 126 + if (!force) { 127 + read_lock(&shrinker->lock); 128 + force = (nr_to_scan > shrinker->purgeable_pages && can_backup); 129 + read_unlock(&shrinker->lock); 130 + if (!force) 131 + return false; 132 + } 133 + 134 + if (!xe_pm_runtime_get_if_active(xe)) { 135 + if (xe_rpm_reclaim_safe(xe) && !ttm_bo_shrink_avoid_wait()) { 136 + xe_pm_runtime_get(xe); 137 + return true; 138 + } 139 + queue_work(xe->unordered_wq, &shrinker->pm_worker); 140 + return false; 141 + } 142 + 143 + return true; 144 + } 145 + 146 + static void xe_shrinker_runtime_pm_put(struct xe_shrinker *shrinker, bool runtime_pm) 147 + { 148 + if (runtime_pm) 149 + xe_pm_runtime_put(shrinker->xe); 150 + } 151 + 152 + static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc) 153 + { 154 + struct xe_shrinker *shrinker = to_xe_shrinker(shrink); 155 + struct ttm_operation_ctx ctx = { 156 + .interruptible = false, 157 + .no_wait_gpu = ttm_bo_shrink_avoid_wait(), 158 + }; 159 + unsigned long nr_to_scan, nr_scanned = 0, freed = 0; 160 + struct xe_bo_shrink_flags shrink_flags = { 161 + .purge = true, 162 + /* Don't request writeback without __GFP_IO. */ 163 + .writeback = !ctx.no_wait_gpu && (sc->gfp_mask & __GFP_IO), 164 + }; 165 + bool runtime_pm; 166 + bool purgeable; 167 + bool can_backup = !!(sc->gfp_mask & __GFP_FS); 168 + s64 lret; 169 + 170 + nr_to_scan = sc->nr_to_scan; 171 + 172 + read_lock(&shrinker->lock); 173 + purgeable = !!shrinker->purgeable_pages; 174 + read_unlock(&shrinker->lock); 175 + 176 + /* Might need runtime PM. Try to wake early if it looks like it. */ 177 + runtime_pm = xe_shrinker_runtime_pm_get(shrinker, false, nr_to_scan, can_backup); 178 + 179 + if (purgeable && nr_scanned < nr_to_scan) { 180 + lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags, 181 + nr_to_scan, &nr_scanned); 182 + if (lret >= 0) 183 + freed += lret; 184 + } 185 + 186 + sc->nr_scanned = nr_scanned; 187 + if (nr_scanned >= nr_to_scan || !can_backup) 188 + goto out; 189 + 190 + /* If we didn't wake before, try to do it now if needed. */ 191 + if (!runtime_pm) 192 + runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup); 193 + 194 + shrink_flags.purge = false; 195 + lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags, 196 + nr_to_scan, &nr_scanned); 197 + if (lret >= 0) 198 + freed += lret; 199 + 200 + sc->nr_scanned = nr_scanned; 201 + out: 202 + xe_shrinker_runtime_pm_put(shrinker, runtime_pm); 203 + return nr_scanned ? freed : SHRINK_STOP; 204 + } 205 + 206 + /* Wake up the device for shrinking. */ 207 + static void xe_shrinker_pm(struct work_struct *work) 208 + { 209 + struct xe_shrinker *shrinker = 210 + container_of(work, typeof(*shrinker), pm_worker); 211 + 212 + xe_pm_runtime_get(shrinker->xe); 213 + xe_pm_runtime_put(shrinker->xe); 214 + } 215 + 216 + /** 217 + * xe_shrinker_create() - Create an xe per-device shrinker 218 + * @xe: Pointer to the xe device. 219 + * 220 + * Returns: A pointer to the created shrinker on success, 221 + * Negative error code on failure. 222 + */ 223 + struct xe_shrinker *xe_shrinker_create(struct xe_device *xe) 224 + { 225 + struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL); 226 + 227 + if (!shrinker) 228 + return ERR_PTR(-ENOMEM); 229 + 230 + shrinker->shrink = shrinker_alloc(0, "xe system shrinker"); 231 + if (!shrinker->shrink) { 232 + kfree(shrinker); 233 + return ERR_PTR(-ENOMEM); 234 + } 235 + 236 + INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm); 237 + shrinker->xe = xe; 238 + rwlock_init(&shrinker->lock); 239 + shrinker->shrink->count_objects = xe_shrinker_count; 240 + shrinker->shrink->scan_objects = xe_shrinker_scan; 241 + shrinker->shrink->private_data = shrinker; 242 + shrinker_register(shrinker->shrink); 243 + 244 + return shrinker; 245 + } 246 + 247 + /** 248 + * xe_shrinker_destroy() - Destroy an xe per-device shrinker 249 + * @shrinker: Pointer to the shrinker to destroy. 250 + */ 251 + void xe_shrinker_destroy(struct xe_shrinker *shrinker) 252 + { 253 + xe_assert(shrinker->xe, !shrinker->shrinkable_pages); 254 + xe_assert(shrinker->xe, !shrinker->purgeable_pages); 255 + shrinker_free(shrinker->shrink); 256 + flush_work(&shrinker->pm_worker); 257 + kfree(shrinker); 258 + }
+18
drivers/gpu/drm/xe/xe_shrinker.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2024 Intel Corporation 4 + */ 5 + 6 + #ifndef _XE_SHRINKER_H_ 7 + #define _XE_SHRINKER_H_ 8 + 9 + struct xe_shrinker; 10 + struct xe_device; 11 + 12 + void xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable); 13 + 14 + struct xe_shrinker *xe_shrinker_create(struct xe_device *xe); 15 + 16 + void xe_shrinker_destroy(struct xe_shrinker *shrinker); 17 + 18 + #endif
+1 -2
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
··· 108 108 u64 gtt_size; 109 109 110 110 si_meminfo(&si); 111 + /* Potentially restrict amount of TT memory here. */ 111 112 gtt_size = (u64)si.totalram * si.mem_unit; 112 - /* TTM limits allocation of all TTM devices by 50% of system memory */ 113 - gtt_size /= 2; 114 113 115 114 man->use_tt = true; 116 115 man->func = &xe_ttm_sys_mgr_func;
-9
drivers/gpu/host1x/debug.c
··· 216 216 217 217 show_all(host1x, &o, true); 218 218 } 219 - 220 - void host1x_debug_dump_syncpts(struct host1x *host1x) 221 - { 222 - struct output o = { 223 - .fn = write_to_printk 224 - }; 225 - 226 - show_syncpts(host1x, &o, false); 227 - }
-1
drivers/gpu/host1x/debug.h
··· 41 41 void host1x_debug_init(struct host1x *host1x); 42 42 void host1x_debug_deinit(struct host1x *host1x); 43 43 void host1x_debug_dump(struct host1x *host1x); 44 - void host1x_debug_dump_syncpts(struct host1x *host1x); 45 44 46 45 #endif
-38
drivers/gpu/ipu-v3/ipu-common.c
··· 165 165 } 166 166 EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode); 167 167 168 - int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode, 169 - bool hflip, bool vflip) 170 - { 171 - u32 r90, vf, hf; 172 - 173 - r90 = ((u32)mode >> 2) & 0x1; 174 - hf = ((u32)mode >> 1) & 0x1; 175 - vf = ((u32)mode >> 0) & 0x1; 176 - hf ^= (u32)hflip; 177 - vf ^= (u32)vflip; 178 - 179 - switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) { 180 - case IPU_ROTATE_NONE: 181 - *degrees = 0; 182 - break; 183 - case IPU_ROTATE_90_RIGHT: 184 - *degrees = 90; 185 - break; 186 - case IPU_ROTATE_180: 187 - *degrees = 180; 188 - break; 189 - case IPU_ROTATE_90_LEFT: 190 - *degrees = 270; 191 - break; 192 - default: 193 - return -EINVAL; 194 - } 195 - 196 - return 0; 197 - } 198 - EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees); 199 - 200 168 struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num) 201 169 { 202 170 struct ipuv3_channel *channel; ··· 483 515 return 0; 484 516 } 485 517 EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel); 486 - 487 - bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno) 488 - { 489 - return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno)); 490 - } 491 - EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy); 492 518 493 519 int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms) 494 520 {
-23
drivers/gpu/ipu-v3/ipu-cpmem.c
··· 337 337 } 338 338 EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id); 339 339 340 - int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch) 341 - { 342 - return ipu_ch_param_read_field(ch, IPU_FIELD_NPB) + 1; 343 - } 344 - EXPORT_SYMBOL_GPL(ipu_cpmem_get_burstsize); 345 - 346 340 void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize) 347 341 { 348 342 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1); ··· 445 451 return 0; 446 452 } 447 453 EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough); 448 - 449 - void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format) 450 - { 451 - switch (pixel_format) { 452 - case V4L2_PIX_FMT_UYVY: 453 - ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */ 454 - ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);/* pix fmt */ 455 - ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */ 456 - break; 457 - case V4L2_PIX_FMT_YUYV: 458 - ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */ 459 - ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);/* pix fmt */ 460 - ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */ 461 - break; 462 - } 463 - } 464 - EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved); 465 454 466 455 void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, 467 456 unsigned int uv_stride,
-108
drivers/gpu/ipu-v3/ipu-csi.c
··· 186 186 } 187 187 188 188 /* 189 - * Set mclk division ratio for generating test mode mclk. Only used 190 - * for test generator. 191 - */ 192 - static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk, 193 - u32 ipu_clk) 194 - { 195 - u32 temp; 196 - int div_ratio; 197 - 198 - div_ratio = (ipu_clk / pixel_clk) - 1; 199 - 200 - if (div_ratio > 0xFF || div_ratio < 0) { 201 - dev_err(csi->ipu->dev, 202 - "value of pixel_clk extends normal range\n"); 203 - return -EINVAL; 204 - } 205 - 206 - temp = ipu_csi_read(csi, CSI_SENS_CONF); 207 - temp &= ~CSI_SENS_CONF_DIVRATIO_MASK; 208 - ipu_csi_write(csi, temp | (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT), 209 - CSI_SENS_CONF); 210 - 211 - return 0; 212 - } 213 - 214 - /* 215 189 * Find the CSI data format and data width for the given V4L2 media 216 190 * bus pixel format code. 217 191 */ ··· 512 538 } 513 539 EXPORT_SYMBOL_GPL(ipu_csi_init_interface); 514 540 515 - bool ipu_csi_is_interlaced(struct ipu_csi *csi) 516 - { 517 - unsigned long flags; 518 - u32 sensor_protocol; 519 - 520 - spin_lock_irqsave(&csi->lock, flags); 521 - sensor_protocol = 522 - (ipu_csi_read(csi, CSI_SENS_CONF) & 523 - CSI_SENS_CONF_SENS_PRTCL_MASK) >> 524 - CSI_SENS_CONF_SENS_PRTCL_SHIFT; 525 - spin_unlock_irqrestore(&csi->lock, flags); 526 - 527 - switch (sensor_protocol) { 528 - case IPU_CSI_CLK_MODE_GATED_CLK: 529 - case IPU_CSI_CLK_MODE_NONGATED_CLK: 530 - case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE: 531 - case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR: 532 - case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR: 533 - return false; 534 - case IPU_CSI_CLK_MODE_CCIR656_INTERLACED: 535 - case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR: 536 - case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR: 537 - return true; 538 - default: 539 - dev_err(csi->ipu->dev, 540 - "CSI %d sensor protocol unsupported\n", csi->id); 541 - return false; 542 - } 543 - } 544 - EXPORT_SYMBOL_GPL(ipu_csi_is_interlaced); 545 - 546 - void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w) 547 - { 548 - unsigned long flags; 549 - u32 reg; 550 - 551 - spin_lock_irqsave(&csi->lock, flags); 552 - 553 - reg = ipu_csi_read(csi, CSI_ACT_FRM_SIZE); 554 - w->width = (reg & 0xFFFF) + 1; 555 - w->height = (reg >> 16 & 0xFFFF) + 1; 556 - 557 - reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL); 558 - w->left = (reg & CSI_HSC_MASK) >> CSI_HSC_SHIFT; 559 - w->top = (reg & CSI_VSC_MASK) >> CSI_VSC_SHIFT; 560 - 561 - spin_unlock_irqrestore(&csi->lock, flags); 562 - } 563 - EXPORT_SYMBOL_GPL(ipu_csi_get_window); 564 - 565 541 void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w) 566 542 { 567 543 unsigned long flags; ··· 547 623 spin_unlock_irqrestore(&csi->lock, flags); 548 624 } 549 625 EXPORT_SYMBOL_GPL(ipu_csi_set_downsize); 550 - 551 - void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active, 552 - u32 r_value, u32 g_value, u32 b_value, 553 - u32 pix_clk) 554 - { 555 - unsigned long flags; 556 - u32 ipu_clk = clk_get_rate(csi->clk_ipu); 557 - u32 temp; 558 - 559 - spin_lock_irqsave(&csi->lock, flags); 560 - 561 - temp = ipu_csi_read(csi, CSI_TST_CTRL); 562 - 563 - if (!active) { 564 - temp &= ~CSI_TEST_GEN_MODE_EN; 565 - ipu_csi_write(csi, temp, CSI_TST_CTRL); 566 - } else { 567 - /* Set sensb_mclk div_ratio */ 568 - ipu_csi_set_testgen_mclk(csi, pix_clk, ipu_clk); 569 - 570 - temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK | 571 - CSI_TEST_GEN_B_MASK); 572 - temp |= CSI_TEST_GEN_MODE_EN; 573 - temp |= (r_value << CSI_TEST_GEN_R_SHIFT) | 574 - (g_value << CSI_TEST_GEN_G_SHIFT) | 575 - (b_value << CSI_TEST_GEN_B_SHIFT); 576 - ipu_csi_write(csi, temp, CSI_TST_CTRL); 577 - } 578 - 579 - spin_unlock_irqrestore(&csi->lock, flags); 580 - } 581 - EXPORT_SYMBOL_GPL(ipu_csi_set_test_generator); 582 626 583 627 int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc, 584 628 struct v4l2_mbus_framefmt *mbus_fmt)
-73
drivers/gpu/ipu-v3/ipu-ic.c
··· 321 321 } 322 322 EXPORT_SYMBOL_GPL(ipu_ic_task_disable); 323 323 324 - int ipu_ic_task_graphics_init(struct ipu_ic *ic, 325 - const struct ipu_ic_colorspace *g_in_cs, 326 - bool galpha_en, u32 galpha, 327 - bool colorkey_en, u32 colorkey) 328 - { 329 - struct ipu_ic_priv *priv = ic->priv; 330 - struct ipu_ic_csc csc2; 331 - unsigned long flags; 332 - u32 reg, ic_conf; 333 - int ret = 0; 334 - 335 - if (ic->task == IC_TASK_ENCODER) 336 - return -EINVAL; 337 - 338 - spin_lock_irqsave(&priv->lock, flags); 339 - 340 - ic_conf = ipu_ic_read(ic, IC_CONF); 341 - 342 - if (!(ic_conf & ic->bit->ic_conf_csc1_en)) { 343 - struct ipu_ic_csc csc1; 344 - 345 - ret = ipu_ic_calc_csc(&csc1, 346 - V4L2_YCBCR_ENC_601, 347 - V4L2_QUANTIZATION_FULL_RANGE, 348 - IPUV3_COLORSPACE_RGB, 349 - V4L2_YCBCR_ENC_601, 350 - V4L2_QUANTIZATION_FULL_RANGE, 351 - IPUV3_COLORSPACE_RGB); 352 - if (ret) 353 - goto unlock; 354 - 355 - /* need transparent CSC1 conversion */ 356 - ret = init_csc(ic, &csc1, 0); 357 - if (ret) 358 - goto unlock; 359 - } 360 - 361 - ic->g_in_cs = *g_in_cs; 362 - csc2.in_cs = ic->g_in_cs; 363 - csc2.out_cs = ic->out_cs; 364 - 365 - ret = __ipu_ic_calc_csc(&csc2); 366 - if (ret) 367 - goto unlock; 368 - 369 - ret = init_csc(ic, &csc2, 1); 370 - if (ret) 371 - goto unlock; 372 - 373 - if (galpha_en) { 374 - ic_conf |= IC_CONF_IC_GLB_LOC_A; 375 - reg = ipu_ic_read(ic, IC_CMBP_1); 376 - reg &= ~(0xff << ic->bit->ic_cmb_galpha_bit); 377 - reg |= (galpha << ic->bit->ic_cmb_galpha_bit); 378 - ipu_ic_write(ic, reg, IC_CMBP_1); 379 - } else 380 - ic_conf &= ~IC_CONF_IC_GLB_LOC_A; 381 - 382 - if (colorkey_en) { 383 - ic_conf |= IC_CONF_KEY_COLOR_EN; 384 - ipu_ic_write(ic, colorkey, IC_CMBP_2); 385 - } else 386 - ic_conf &= ~IC_CONF_KEY_COLOR_EN; 387 - 388 - ipu_ic_write(ic, ic_conf, IC_CONF); 389 - 390 - ic->graphics = true; 391 - unlock: 392 - spin_unlock_irqrestore(&priv->lock, flags); 393 - return ret; 394 - } 395 - EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init); 396 - 397 324 int ipu_ic_task_init_rsc(struct ipu_ic *ic, 398 325 const struct ipu_ic_csc *csc, 399 326 int in_width, int in_height,
-48
drivers/gpu/ipu-v3/ipu-image-convert.c
··· 355 355 (ic_image->fmt->fourcc >> 24) & 0xff); 356 356 } 357 357 358 - int ipu_image_convert_enum_format(int index, u32 *fourcc) 359 - { 360 - const struct ipu_image_pixfmt *fmt; 361 - 362 - if (index >= (int)ARRAY_SIZE(image_convert_formats)) 363 - return -EINVAL; 364 - 365 - /* Format found */ 366 - fmt = &image_convert_formats[index]; 367 - *fourcc = fmt->fourcc; 368 - return 0; 369 - } 370 - EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format); 371 - 372 358 static void free_dma_buf(struct ipu_image_convert_priv *priv, 373 359 struct ipu_image_convert_dma_buf *buf) 374 360 { ··· 2422 2436 return run; 2423 2437 } 2424 2438 EXPORT_SYMBOL_GPL(ipu_image_convert); 2425 - 2426 - /* "Canned" synchronous single image conversion */ 2427 - static void image_convert_sync_complete(struct ipu_image_convert_run *run, 2428 - void *data) 2429 - { 2430 - struct completion *comp = data; 2431 - 2432 - complete(comp); 2433 - } 2434 - 2435 - int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 2436 - struct ipu_image *in, struct ipu_image *out, 2437 - enum ipu_rotate_mode rot_mode) 2438 - { 2439 - struct ipu_image_convert_run *run; 2440 - struct completion comp; 2441 - int ret; 2442 - 2443 - init_completion(&comp); 2444 - 2445 - run = ipu_image_convert(ipu, ic_task, in, out, rot_mode, 2446 - image_convert_sync_complete, &comp); 2447 - if (IS_ERR(run)) 2448 - return PTR_ERR(run); 2449 - 2450 - ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000)); 2451 - ret = (ret == 0) ? -ETIMEDOUT : 0; 2452 - 2453 - ipu_image_convert_unprepare(run->ctx); 2454 - kfree(run); 2455 - 2456 - return ret; 2457 - } 2458 - EXPORT_SYMBOL_GPL(ipu_image_convert_sync); 2459 2439 2460 2440 int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev) 2461 2441 {
-2
drivers/gpu/ipu-v3/ipu-prv.h
··· 216 216 int ipu_module_enable(struct ipu_soc *ipu, u32 mask); 217 217 int ipu_module_disable(struct ipu_soc *ipu, u32 mask); 218 218 219 - bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno); 220 - 221 219 int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id, 222 220 unsigned long base, u32 module, struct clk *clk_ipu); 223 221 void ipu_csi_exit(struct ipu_soc *ipu, int id);
-11
drivers/gpu/ipu-v3/ipu-vdi.c
··· 150 150 } 151 151 EXPORT_SYMBOL_GPL(ipu_vdi_setup); 152 152 153 - void ipu_vdi_unsetup(struct ipu_vdi *vdi) 154 - { 155 - unsigned long flags; 156 - 157 - spin_lock_irqsave(&vdi->lock, flags); 158 - ipu_vdi_write(vdi, 0, VDI_FSIZE); 159 - ipu_vdi_write(vdi, 0, VDI_C); 160 - spin_unlock_irqrestore(&vdi->lock, flags); 161 - } 162 - EXPORT_SYMBOL_GPL(ipu_vdi_unsetup); 163 - 164 153 int ipu_vdi_enable(struct ipu_vdi *vdi) 165 154 { 166 155 unsigned long flags;
+1 -3
drivers/staging/fbtft/fbtft-core.c
··· 337 337 list_for_each_entry(pageref, pagereflist, list) { 338 338 y_low = pageref->offset / info->fix.line_length; 339 339 y_high = (pageref->offset + PAGE_SIZE - 1) / info->fix.line_length; 340 - dev_dbg(info->device, 341 - "page->index=%lu y_low=%d y_high=%d\n", 342 - pageref->page->index, y_low, y_high); 340 + dev_dbg(info->device, "y_low=%d y_high=%d\n", y_low, y_high); 343 341 if (y_high > info->var.yres - 1) 344 342 y_high = info->var.yres - 1; 345 343 if (y_low < dirty_lines_start)
+3
include/drm/drm_format_helper.h
··· 96 96 void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch, 97 97 const struct iosys_map *src, const struct drm_framebuffer *fb, 98 98 const struct drm_rect *clip, struct drm_format_conv_state *state); 99 + void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch, 100 + const struct iosys_map *src, const struct drm_framebuffer *fb, 101 + const struct drm_rect *clip, struct drm_format_conv_state *state); 99 102 void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch, 100 103 const struct iosys_map *src, const struct drm_framebuffer *fb, 101 104 const struct drm_rect *clip, struct drm_format_conv_state *state);
+14
include/drm/drm_gem.h
··· 35 35 */ 36 36 37 37 #include <linux/kref.h> 38 + #include <linux/dma-buf.h> 38 39 #include <linux/dma-resv.h> 39 40 #include <linux/list.h> 40 41 #include <linux/mutex.h> ··· 574 573 static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj) 575 574 { 576 575 return (obj->handle_count > 1) || obj->dma_buf; 576 + } 577 + 578 + /** 579 + * drm_gem_is_imported() - Tests if GEM object's buffer has been imported 580 + * @obj: the GEM object 581 + * 582 + * Returns: 583 + * True if the GEM object's buffer has been imported, false otherwise 584 + */ 585 + static inline bool drm_gem_is_imported(const struct drm_gem_object *obj) 586 + { 587 + /* The dma-buf's priv field points to the original GEM object. */ 588 + return obj->dma_buf && (obj->dma_buf->priv != obj); 577 589 } 578 590 579 591 #ifdef CONFIG_LOCKDEP
+1 -1
include/drm/drm_gem_shmem_helper.h
··· 120 120 { 121 121 return (shmem->madv > 0) && 122 122 !shmem->vmap_use_count && shmem->sgt && 123 - !shmem->base.dma_buf && !shmem->base.import_attach; 123 + !shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base); 124 124 } 125 125 126 126 void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
-2
include/drm/drm_kunit_helpers.h
··· 95 95 sizeof(_type), \ 96 96 offsetof(_type, _member), \ 97 97 _feat)) 98 - struct drm_modeset_acquire_ctx * 99 - drm_kunit_helper_acquire_ctx_alloc(struct kunit *test); 100 98 101 99 struct drm_atomic_state * 102 100 drm_kunit_helper_atomic_state_alloc(struct kunit *test,
+23 -18
include/drm/drm_print.h
··· 584 584 * Prefer drm_device based logging over device or prink based logging. 585 585 */ 586 586 587 + /* Helper to enforce struct drm_device type */ 588 + static inline struct device *__drm_to_dev(const struct drm_device *drm) 589 + { 590 + return drm ? drm->dev : NULL; 591 + } 592 + 587 593 /* Helper for struct drm_device based logging. */ 588 594 #define __drm_printk(drm, level, type, fmt, ...) \ 589 - dev_##level##type((drm) ? (drm)->dev : NULL, "[drm] " fmt, ##__VA_ARGS__) 595 + dev_##level##type(__drm_to_dev(drm), "[drm] " fmt, ##__VA_ARGS__) 590 596 591 597 592 598 #define drm_info(drm, fmt, ...) \ ··· 626 620 627 621 628 622 #define drm_dbg_core(drm, fmt, ...) \ 629 - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__) 630 - #define drm_dbg_driver(drm, fmt, ...) \ 631 - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) 623 + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_CORE, fmt, ##__VA_ARGS__) 624 + #define drm_dbg_driver(drm, fmt, ...) \ 625 + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRIVER, fmt, ##__VA_ARGS__) 632 626 #define drm_dbg_kms(drm, fmt, ...) \ 633 - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__) 627 + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_KMS, fmt, ##__VA_ARGS__) 634 628 #define drm_dbg_prime(drm, fmt, ...) \ 635 - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__) 629 + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_PRIME, fmt, ##__VA_ARGS__) 636 630 #define drm_dbg_atomic(drm, fmt, ...) \ 637 - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) 631 + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) 638 632 #define drm_dbg_vbl(drm, fmt, ...) \ 639 - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__) 633 + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_VBL, fmt, ##__VA_ARGS__) 640 634 #define drm_dbg_state(drm, fmt, ...) \ 641 - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__) 635 + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_STATE, fmt, ##__VA_ARGS__) 642 636 #define drm_dbg_lease(drm, fmt, ...) \ 643 - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__) 637 + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_LEASE, fmt, ##__VA_ARGS__) 644 638 #define drm_dbg_dp(drm, fmt, ...) \ 645 - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__) 639 + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DP, fmt, ##__VA_ARGS__) 646 640 #define drm_dbg_drmres(drm, fmt, ...) \ 647 - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__) 641 + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRMRES, fmt, ##__VA_ARGS__) 648 642 649 643 #define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__) 650 644 ··· 733 727 #define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \ 734 728 ({ \ 735 729 static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\ 736 - const struct drm_device *drm_ = (drm); \ 737 730 \ 738 731 if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \ 739 - drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \ 732 + drm_dev_printk(__drm_to_dev(drm), KERN_DEBUG, fmt, ## __VA_ARGS__); \ 740 733 }) 741 734 742 735 #define drm_dbg_ratelimited(drm, fmt, ...) \ ··· 757 752 /* Helper for struct drm_device based WARNs */ 758 753 #define drm_WARN(drm, condition, format, arg...) \ 759 754 WARN(condition, "%s %s: [drm] " format, \ 760 - dev_driver_string((drm)->dev), \ 761 - dev_name((drm)->dev), ## arg) 755 + dev_driver_string(__drm_to_dev(drm)), \ 756 + dev_name(__drm_to_dev(drm)), ## arg) 762 757 763 758 #define drm_WARN_ONCE(drm, condition, format, arg...) \ 764 759 WARN_ONCE(condition, "%s %s: [drm] " format, \ 765 - dev_driver_string((drm)->dev), \ 766 - dev_name((drm)->dev), ## arg) 760 + dev_driver_string(__drm_to_dev(drm)), \ 761 + dev_name(__drm_to_dev(drm)), ## arg) 767 762 768 763 #define drm_WARN_ON(drm, x) \ 769 764 drm_WARN((drm), (x), "%s", \
+74
include/drm/ttm/ttm_backup.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2024 Intel Corporation 4 + */ 5 + 6 + #ifndef _TTM_BACKUP_H_ 7 + #define _TTM_BACKUP_H_ 8 + 9 + #include <linux/mm_types.h> 10 + #include <linux/shmem_fs.h> 11 + 12 + struct ttm_backup; 13 + 14 + /** 15 + * ttm_backup_handle_to_page_ptr() - Convert handle to struct page pointer 16 + * @handle: The handle to convert. 17 + * 18 + * Converts an opaque handle received from the 19 + * struct ttm_backoup_ops::backup_page() function to an (invalid) 20 + * struct page pointer suitable for a struct page array. 21 + * 22 + * Return: An (invalid) struct page pointer. 23 + */ 24 + static inline struct page * 25 + ttm_backup_handle_to_page_ptr(unsigned long handle) 26 + { 27 + return (struct page *)(handle << 1 | 1); 28 + } 29 + 30 + /** 31 + * ttm_backup_page_ptr_is_handle() - Whether a struct page pointer is a handle 32 + * @page: The struct page pointer to check. 33 + * 34 + * Return: true if the struct page pointer is a handld returned from 35 + * ttm_backup_handle_to_page_ptr(). False otherwise. 36 + */ 37 + static inline bool ttm_backup_page_ptr_is_handle(const struct page *page) 38 + { 39 + return (unsigned long)page & 1; 40 + } 41 + 42 + /** 43 + * ttm_backup_page_ptr_to_handle() - Convert a struct page pointer to a handle 44 + * @page: The struct page pointer to convert 45 + * 46 + * Return: The handle that was previously used in 47 + * ttm_backup_handle_to_page_ptr() to obtain a struct page pointer, suitable 48 + * for use as argument in the struct ttm_backup_ops drop() or 49 + * copy_backed_up_page() functions. 50 + */ 51 + static inline unsigned long 52 + ttm_backup_page_ptr_to_handle(const struct page *page) 53 + { 54 + WARN_ON(!ttm_backup_page_ptr_is_handle(page)); 55 + return (unsigned long)page >> 1; 56 + } 57 + 58 + void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle); 59 + 60 + int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst, 61 + pgoff_t handle, bool intr); 62 + 63 + s64 64 + ttm_backup_backup_page(struct ttm_backup *backup, struct page *page, 65 + bool writeback, pgoff_t idx, gfp_t page_gfp, 66 + gfp_t alloc_gfp); 67 + 68 + void ttm_backup_fini(struct ttm_backup *backup); 69 + 70 + u64 ttm_backup_bytes_avail(void); 71 + 72 + struct ttm_backup *ttm_backup_shmem_create(loff_t size); 73 + 74 + #endif
+93
include/drm/ttm/ttm_bo.h
··· 226 226 struct ttm_resource_manager *man, s64 target); 227 227 228 228 /** 229 + * struct ttm_bo_shrink_flags - flags to govern the bo shrinking behaviour 230 + * @purge: Purge the content rather than backing it up. 231 + * @writeback: Attempt to immediately write content to swap space. 232 + * @allow_move: Allow moving to system before shrinking. This is typically 233 + * not desired for zombie- or ghost objects (with zombie object meaning 234 + * objects with a zero gem object refcount) 235 + */ 236 + struct ttm_bo_shrink_flags { 237 + u32 purge : 1; 238 + u32 writeback : 1; 239 + u32 allow_move : 1; 240 + }; 241 + 242 + long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, 243 + const struct ttm_bo_shrink_flags flags); 244 + 245 + bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx); 246 + 247 + bool ttm_bo_shrink_avoid_wait(void); 248 + 249 + /** 229 250 * ttm_bo_get - reference a struct ttm_buffer_object 230 251 * 231 252 * @bo: The buffer object. ··· 487 466 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); 488 467 int ttm_bo_populate(struct ttm_buffer_object *bo, 489 468 struct ttm_operation_ctx *ctx); 469 + 470 + /* Driver LRU walk helpers initially targeted for shrinking. */ 471 + 472 + /** 473 + * struct ttm_bo_lru_cursor - Iterator cursor for TTM LRU list looping 474 + */ 475 + struct ttm_bo_lru_cursor { 476 + /** @res_curs: Embedded struct ttm_resource_cursor. */ 477 + struct ttm_resource_cursor res_curs; 478 + /** 479 + * @ctx: The struct ttm_operation_ctx used while looping. 480 + * governs the locking mode. 481 + */ 482 + struct ttm_operation_ctx *ctx; 483 + /** 484 + * @bo: Buffer object pointer if a buffer object is refcounted, 485 + * NULL otherwise. 486 + */ 487 + struct ttm_buffer_object *bo; 488 + /** 489 + * @needs_unlock: Valid iff @bo != NULL. The bo resv needs 490 + * unlock before the next iteration or after loop exit. 491 + */ 492 + bool needs_unlock; 493 + }; 494 + 495 + void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs); 496 + 497 + struct ttm_bo_lru_cursor * 498 + ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs, 499 + struct ttm_resource_manager *man, 500 + struct ttm_operation_ctx *ctx); 501 + 502 + struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs); 503 + 504 + struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs); 505 + 506 + /* 507 + * Defines needed to use autocleanup (linux/cleanup.h) with struct ttm_bo_lru_cursor. 508 + */ 509 + DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *, 510 + if (_T) {ttm_bo_lru_cursor_fini(_T); }, 511 + ttm_bo_lru_cursor_init(curs, man, ctx), 512 + struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man, 513 + struct ttm_operation_ctx *ctx); 514 + static inline void * 515 + class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T) 516 + { return *_T; } 517 + #define class_ttm_bo_lru_cursor_is_conditional false 518 + 519 + /** 520 + * ttm_bo_lru_for_each_reserved_guarded() - Iterate over buffer objects owning 521 + * resources on LRU lists. 522 + * @_cursor: struct ttm_bo_lru_cursor to use for the iteration. 523 + * @_man: The resource manager whose LRU lists to iterate over. 524 + * @_ctx: The struct ttm_operation_context to govern the @_bo locking. 525 + * @_bo: The struct ttm_buffer_object pointer pointing to the buffer object 526 + * for the current iteration. 527 + * 528 + * Iterate over all resources of @_man and for each resource, attempt to 529 + * reference and lock (using the locking mode detailed in @_ctx) the buffer 530 + * object it points to. If successful, assign @_bo to the address of the 531 + * buffer object and update @_cursor. The iteration is guarded in the 532 + * sense that @_cursor will be initialized before looping start and cleaned 533 + * up at looping termination, even if terminated prematurely by, for 534 + * example a return or break statement. Exiting the loop will also unlock 535 + * (if needed) and unreference @_bo. 536 + */ 537 + #define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _ctx, _bo) \ 538 + scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _ctx) \ 539 + for ((_bo) = ttm_bo_lru_cursor_first(_cursor); (_bo); \ 540 + (_bo) = ttm_bo_lru_cursor_next(_cursor)) 490 541 491 542 #endif
+8
include/drm/ttm/ttm_pool.h
··· 33 33 34 34 struct device; 35 35 struct seq_file; 36 + struct ttm_backup_flags; 36 37 struct ttm_operation_ctx; 37 38 struct ttm_pool; 38 39 struct ttm_tt; ··· 89 88 void ttm_pool_fini(struct ttm_pool *pool); 90 89 91 90 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m); 91 + 92 + void ttm_pool_drop_backed_up(struct ttm_tt *tt); 93 + 94 + long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *ttm, 95 + const struct ttm_backup_flags *flags); 96 + int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt, 97 + const struct ttm_operation_ctx *ctx); 92 98 93 99 int ttm_pool_mgr_init(unsigned long num_pages); 94 100 void ttm_pool_mgr_fini(void);
+67 -2
include/drm/ttm/ttm_tt.h
··· 32 32 #include <drm/ttm/ttm_caching.h> 33 33 #include <drm/ttm/ttm_kmap_iter.h> 34 34 35 + struct ttm_backup; 35 36 struct ttm_device; 36 37 struct ttm_tt; 37 38 struct ttm_resource; 38 39 struct ttm_buffer_object; 39 40 struct ttm_operation_ctx; 41 + struct ttm_pool_tt_restore; 40 42 41 43 /** 42 44 * struct ttm_tt - This is a structure holding the pages, caching- and aperture ··· 87 85 * fault handling abuses the DMA api a bit and dma_map_attrs can't be 88 86 * used to assure pgprot always matches. 89 87 * 88 + * TTM_TT_FLAG_BACKED_UP: TTM internal only. This is set if the 89 + * struct ttm_tt has been (possibly partially) backed up. 90 + * 90 91 * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is 91 92 * set by TTM after ttm_tt_populate() has successfully returned, and is 92 93 * then unset when TTM calls ttm_tt_unpopulate(). 94 + * 93 95 */ 94 96 #define TTM_TT_FLAG_SWAPPED BIT(0) 95 97 #define TTM_TT_FLAG_ZERO_ALLOC BIT(1) 96 98 #define TTM_TT_FLAG_EXTERNAL BIT(2) 97 99 #define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3) 98 100 #define TTM_TT_FLAG_DECRYPTED BIT(4) 101 + #define TTM_TT_FLAG_BACKED_UP BIT(5) 99 102 100 - #define TTM_TT_FLAG_PRIV_POPULATED BIT(5) 103 + #define TTM_TT_FLAG_PRIV_POPULATED BIT(6) 101 104 uint32_t page_flags; 102 105 /** @num_pages: Number of pages in the page array. */ 103 106 uint32_t num_pages; ··· 113 106 /** @swap_storage: Pointer to shmem struct file for swap storage. */ 114 107 struct file *swap_storage; 115 108 /** 109 + * @backup: Pointer to backup struct for backed up tts. 110 + * Could be unified with @swap_storage. Meanwhile, the driver's 111 + * ttm_tt_create() callback is responsible for assigning 112 + * this field. 113 + */ 114 + struct ttm_backup *backup; 115 + /** 116 116 * @caching: The current caching state of the pages, see enum 117 117 * ttm_caching. 118 118 */ 119 119 enum ttm_caching caching; 120 + /** @restore: Partial restoration from backup state. TTM private */ 121 + struct ttm_pool_tt_restore *restore; 120 122 }; 121 123 122 124 /** ··· 145 129 return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED; 146 130 } 147 131 132 + /** 133 + * ttm_tt_is_swapped() - Whether the ttm_tt is swapped out or backed up 134 + * @tt: The struct ttm_tt. 135 + * 136 + * Return: true if swapped or backed up, false otherwise. 137 + */ 148 138 static inline bool ttm_tt_is_swapped(const struct ttm_tt *tt) 149 139 { 150 - return tt->page_flags & TTM_TT_FLAG_SWAPPED; 140 + return tt->page_flags & (TTM_TT_FLAG_SWAPPED | TTM_TT_FLAG_BACKED_UP); 141 + } 142 + 143 + /** 144 + * ttm_tt_is_backed_up() - Whether the ttm_tt backed up 145 + * @tt: The struct ttm_tt. 146 + * 147 + * Return: true if swapped or backed up, false otherwise. 148 + */ 149 + static inline bool ttm_tt_is_backed_up(const struct ttm_tt *tt) 150 + { 151 + return tt->page_flags & TTM_TT_FLAG_BACKED_UP; 152 + } 153 + 154 + /** 155 + * ttm_tt_clear_backed_up() - Clear the ttm_tt backed-up status 156 + * @tt: The struct ttm_tt. 157 + * 158 + * Drivers can use this functionto clear the backed-up status, 159 + * for example before destroying or re-validating a purged tt. 160 + */ 161 + static inline void ttm_tt_clear_backed_up(struct ttm_tt *tt) 162 + { 163 + tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP; 151 164 } 152 165 153 166 /** ··· 280 235 struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt, 281 236 struct ttm_tt *tt); 282 237 unsigned long ttm_tt_pages_limit(void); 238 + 239 + /** 240 + * struct ttm_backup_flags - Flags to govern backup behaviour. 241 + * @purge: Free pages without backing up. Bypass pools. 242 + * @writeback: Attempt to copy contents directly to swap space, even 243 + * if that means blocking on writes to external memory. 244 + */ 245 + struct ttm_backup_flags { 246 + u32 purge : 1; 247 + u32 writeback : 1; 248 + }; 249 + 250 + long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt, 251 + const struct ttm_backup_flags flags); 252 + 253 + int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt, 254 + const struct ttm_operation_ctx *ctx); 255 + 256 + int ttm_tt_setup_backup(struct ttm_tt *tt); 257 + 283 258 #if IS_ENABLED(CONFIG_AGP) 284 259 #include <linux/agp_backend.h> 285 260
+3 -1
include/linux/component.h
··· 3 3 #define COMPONENT_H 4 4 5 5 #include <linux/stddef.h> 6 - 6 + #include <linux/types.h> 7 7 8 8 struct device; 9 9 ··· 90 90 91 91 void component_master_del(struct device *, 92 92 const struct component_master_ops *); 93 + bool component_master_is_bound(struct device *parent, 94 + const struct component_master_ops *ops); 93 95 94 96 struct component_match; 95 97
-32
include/video/imx-ipu-image-convert.h
··· 41 41 void *ctx); 42 42 43 43 /** 44 - * ipu_image_convert_enum_format() - enumerate the image converter's 45 - * supported input and output pixel formats. 46 - * 47 - * @index: pixel format index 48 - * @fourcc: v4l2 fourcc for this index 49 - * 50 - * Returns 0 with a valid index and fills in v4l2 fourcc, -EINVAL otherwise. 51 - * 52 - * In V4L2, drivers can call ipu_image_enum_format() in .enum_fmt. 53 - */ 54 - int ipu_image_convert_enum_format(int index, u32 *fourcc); 55 - 56 - /** 57 44 * ipu_image_convert_adjust() - adjust input/output images to IPU restrictions. 58 45 * 59 46 * @in: input image format, adjusted on return ··· 162 175 enum ipu_rotate_mode rot_mode, 163 176 ipu_image_convert_cb_t complete, 164 177 void *complete_context); 165 - 166 - /** 167 - * ipu_image_convert_sync() - synchronous single image conversion request 168 - * 169 - * @ipu: the IPU handle to use for the conversion 170 - * @ic_task: the IC task to use for the conversion 171 - * @in: input image format 172 - * @out: output image format 173 - * @rot_mode: rotation mode 174 - * 175 - * Carry out a single image conversion. Returns when the conversion 176 - * completes. The input/output formats and rotation mode must already 177 - * meet IPU retrictions. The created context is automatically unprepared 178 - * and the run freed on return. 179 - */ 180 - int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 181 - struct ipu_image *in, struct ipu_image *out, 182 - enum ipu_rotate_mode rot_mode); 183 - 184 178 185 179 #endif /* __IMX_IPU_IMAGE_CONVERT_H__ */
-14
include/video/imx-ipu-v3.h
··· 262 262 void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride, 263 263 u32 pixelformat); 264 264 void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id); 265 - int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch); 266 265 void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize); 267 266 void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch); 268 267 void ipu_cpmem_set_rotation(struct ipuv3_channel *ch, ··· 269 270 int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch, 270 271 const struct ipu_rgb *rgb); 271 272 int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width); 272 - void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format); 273 273 void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, 274 274 unsigned int uv_stride, 275 275 unsigned int u_offset, ··· 359 361 const struct v4l2_mbus_config *mbus_cfg, 360 362 const struct v4l2_mbus_framefmt *infmt, 361 363 const struct v4l2_mbus_framefmt *outfmt); 362 - bool ipu_csi_is_interlaced(struct ipu_csi *csi); 363 - void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w); 364 364 void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w); 365 365 void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert); 366 - void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active, 367 - u32 r_value, u32 g_value, u32 b_value, 368 - u32 pix_clk); 369 366 int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc, 370 367 struct v4l2_mbus_framefmt *mbus_fmt); 371 368 int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip, ··· 438 445 int in_width, int in_height, 439 446 int out_width, int out_height, 440 447 u32 rsc); 441 - int ipu_ic_task_graphics_init(struct ipu_ic *ic, 442 - const struct ipu_ic_colorspace *g_in_cs, 443 - bool galpha_en, u32 galpha, 444 - bool colorkey_en, u32 colorkey); 445 448 void ipu_ic_task_enable(struct ipu_ic *ic); 446 449 void ipu_ic_task_disable(struct ipu_ic *ic); 447 450 int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel, ··· 456 467 void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field); 457 468 void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel); 458 469 void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres); 459 - void ipu_vdi_unsetup(struct ipu_vdi *vdi); 460 470 int ipu_vdi_enable(struct ipu_vdi *vdi); 461 471 int ipu_vdi_disable(struct ipu_vdi *vdi); 462 472 struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu); ··· 475 487 enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc); 476 488 enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat); 477 489 int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees, 478 - bool hflip, bool vflip); 479 - int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode, 480 490 bool hflip, bool vflip); 481 491 482 492 struct ipu_client_platformdata {