Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2025-10-02' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

drm-misc-next for v6.19:

UAPI Changes:

Cross-subsystem Changes:
- fbcon cleanups.
- Make drivers depend on FB_TILEBLITTING instead of selecting it,
and hide FB_MODE_HELPERS.

Core Changes:
- More preparations for rust.
- Throttle dirty worker with vblank
- Use drm_for_each_bridge_in_chain_scoped in drm's bridge code and
assorted fixes.
- Ensure drm_client_modeset tests are enabled in UML.
- Rename ttm_bo_put to ttm_bo_fini, as a further step in removing the
TTM bo refcount.
- Add POST_LT_ADJ_REQ training sequence.
- Show list of removed but still allocated bridges.
- Add a simulated vblank interrupt for hardware without it,
and add some helpers to use them in vkms and hypervdrm.

Driver Changes:
- Assorted small fixes, cleanups and updates to host1x, tegra,
panthor, amdxdna, gud, vc4, ssd130x, ivpu, panfrost, panthor,
sysfb, bridge/sn65dsi86, solomon, ast, tidss.
- Convert drivers from using .round_rate() to .determine_rate()
- Add support for KD116N3730A07/A12, chromebook mt8189, JT101TM023,
LQ079L1SX01, raspberrypi 5" panels.
- Improve reclocking on tegra186+ with nouveau.
- Improve runtime pm in amdxdna.
- Add support for HTX_PAI in imx.
- Use a helper to calculate dumb buffer sizes in most drivers.

Signed-off-by: Simona Vetter <simona.vetter@ffwll.ch>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://lore.kernel.org/r/b412fb91-8545-466a-8102-d89c0f2758a7@linux.intel.com

+6738 -2831
+1 -1
.clang-format
··· 167 167 - 'drm_connector_for_each_possible_encoder' 168 168 - 'drm_exec_for_each_locked_object' 169 169 - 'drm_exec_for_each_locked_object_reverse' 170 - - 'drm_for_each_bridge_in_chain' 170 + - 'drm_for_each_bridge_in_chain_scoped' 171 171 - 'drm_for_each_connector_iter' 172 172 - 'drm_for_each_crtc' 173 173 - 'drm_for_each_crtc_reverse'
+12
Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml
··· 49 49 $ref: /schemas/graph.yaml#/properties/port 50 50 description: HDMI output port 51 51 52 + port@2: 53 + $ref: /schemas/graph.yaml#/properties/port 54 + description: Parallel audio input port 55 + 52 56 required: 53 57 - port@0 54 58 - port@1 ··· 100 96 reg = <1>; 101 97 endpoint { 102 98 remote-endpoint = <&hdmi0_con>; 99 + }; 100 + }; 101 + 102 + port@2 { 103 + reg = <2>; 104 + 105 + endpoint { 106 + remote-endpoint = <&pai_to_hdmi_tx>; 103 107 }; 104 108 }; 105 109 };
+69
Documentation/devicetree/bindings/display/imx/fsl,imx8mp-hdmi-pai.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/imx/fsl,imx8mp-hdmi-pai.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Freescale i.MX8MP HDMI Parallel Audio Interface 8 + 9 + maintainers: 10 + - Shengjiu Wang <shengjiu.wang@nxp.com> 11 + 12 + description: 13 + The HDMI TX Parallel Audio Interface (HTX_PAI) is a bridge between the 14 + Audio Subsystem to the HDMI TX Controller. 15 + 16 + properties: 17 + compatible: 18 + const: fsl,imx8mp-hdmi-pai 19 + 20 + reg: 21 + maxItems: 1 22 + 23 + interrupts: 24 + maxItems: 1 25 + 26 + clocks: 27 + maxItems: 1 28 + 29 + clock-names: 30 + const: apb 31 + 32 + power-domains: 33 + maxItems: 1 34 + 35 + port: 36 + $ref: /schemas/graph.yaml#/properties/port 37 + description: Output to the HDMI TX controller. 38 + 39 + required: 40 + - compatible 41 + - reg 42 + - interrupts 43 + - clocks 44 + - clock-names 45 + - power-domains 46 + - port 47 + 48 + additionalProperties: false 49 + 50 + examples: 51 + - | 52 + #include <dt-bindings/clock/imx8mp-clock.h> 53 + #include <dt-bindings/power/imx8mp-power.h> 54 + 55 + audio-bridge@32fc4800 { 56 + compatible = "fsl,imx8mp-hdmi-pai"; 57 + reg = <0x32fc4800 0x800>; 58 + interrupt-parent = <&irqsteer_hdmi>; 59 + interrupts = <14>; 60 + clocks = <&clk IMX8MP_CLK_HDMI_APB>; 61 + clock-names = "apb"; 62 + power-domains = <&hdmi_blk_ctrl IMX8MP_HDMIBLK_PD_PAI>; 63 + 64 + port { 65 + pai_to_hdmi_tx: endpoint { 66 + remote-endpoint = <&hdmi_tx_from_pai>; 67 + }; 68 + }; 69 + };
+2
Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
··· 20 20 - bananapi,lhr050h41 21 21 - bestar,bsd1218-a101kl68 22 22 - feixin,k101-im2byl02 23 + - raspberrypi,dsi-5inch 23 24 - raspberrypi,dsi-7inch 24 25 - startek,kd050hdfia020 25 26 - tdo,tl050hdv35 ··· 31 30 maxItems: 1 32 31 33 32 backlight: true 33 + port: true 34 34 power-supply: true 35 35 reset-gpios: true 36 36 rotation: true
+2
Documentation/devicetree/bindings/display/panel/panel-simple.yaml
··· 184 184 - innolux,n156bge-l21 185 185 # Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel 186 186 - innolux,zj070na-01p 187 + # JuTouch Technology Co.. 10" JT101TM023 WXGA (1280 x 800) LVDS panel 188 + - jutouch,jt101tm023 187 189 # Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel 188 190 - koe,tx14d24vm1bpa 189 191 # Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
+99
Documentation/devicetree/bindings/display/panel/sharp,lq079l1sx01.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/panel/sharp,lq079l1sx01.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Sharp Microelectronics 7.9" WQXGA TFT LCD panel 8 + 9 + maintainers: 10 + - Svyatoslav Ryhel <clamor95@gmail.com> 11 + 12 + description: > 13 + This panel requires a dual-channel DSI host to operate and it supports 14 + only left-right split mode, where each channel drives the left or right 15 + half of the screen and only video mode. 16 + 17 + Each of the DSI channels controls a separate DSI peripheral. 18 + The peripheral driven by the first link (DSI-LINK1), left one, is 19 + considered the primary peripheral and controls the device. 20 + 21 + allOf: 22 + - $ref: panel-common-dual.yaml# 23 + 24 + properties: 25 + compatible: 26 + const: sharp,lq079l1sx01 27 + 28 + reg: 29 + maxItems: 1 30 + 31 + avdd-supply: 32 + description: regulator that supplies the analog voltage 33 + 34 + vddio-supply: 35 + description: regulator that supplies the I/O voltage 36 + 37 + vsp-supply: 38 + description: positive boost supply regulator 39 + 40 + vsn-supply: 41 + description: negative boost supply regulator 42 + 43 + reset-gpios: 44 + maxItems: 1 45 + 46 + backlight: true 47 + ports: true 48 + 49 + required: 50 + - compatible 51 + - reg 52 + - avdd-supply 53 + - vddio-supply 54 + - ports 55 + 56 + additionalProperties: false 57 + 58 + examples: 59 + - | 60 + #include <dt-bindings/gpio/gpio.h> 61 + 62 + dsi { 63 + #address-cells = <1>; 64 + #size-cells = <0>; 65 + 66 + panel@0 { 67 + compatible = "sharp,lq079l1sx01"; 68 + reg = <0>; 69 + 70 + reset-gpios = <&gpio 59 GPIO_ACTIVE_LOW>; 71 + 72 + avdd-supply = <&avdd_lcd>; 73 + vddio-supply = <&vdd_lcd_io>; 74 + vsp-supply = <&vsp_5v5_lcd>; 75 + vsn-supply = <&vsn_5v5_lcd>; 76 + 77 + backlight = <&backlight>; 78 + 79 + ports { 80 + #address-cells = <1>; 81 + #size-cells = <0>; 82 + 83 + port@0 { 84 + reg = <0>; 85 + panel_in0: endpoint { 86 + remote-endpoint = <&dsi0_out>; 87 + }; 88 + }; 89 + 90 + port@1 { 91 + reg = <1>; 92 + panel_in1: endpoint { 93 + remote-endpoint = <&dsi1_out>; 94 + }; 95 + }; 96 + }; 97 + }; 98 + }; 99 + ...
+2
Documentation/devicetree/bindings/vendor-prefixes.yaml
··· 835 835 description: JOZ BV 836 836 "^jty,.*": 837 837 description: JTY 838 + "^jutouch,.*": 839 + description: JuTouch Technology Co., Ltd. 838 840 "^kam,.*": 839 841 description: Kamstrup A/S 840 842 "^karo,.*":
+12
Documentation/gpu/drm-kms-helpers.rst
··· 92 92 .. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c 93 93 :export: 94 94 95 + VBLANK Helper Reference 96 + ----------------------- 97 + 98 + .. kernel-doc:: drivers/gpu/drm/drm_vblank_helper.c 99 + :doc: overview 100 + 101 + .. kernel-doc:: include/drm/drm_vblank_helper.h 102 + :internal: 103 + 104 + .. kernel-doc:: drivers/gpu/drm/drm_vblank_helper.c 105 + :export: 106 + 95 107 Simple KMS Helper Reference 96 108 =========================== 97 109
+37
Documentation/gpu/todo.rst
··· 623 623 624 624 Level: Advanced 625 625 626 + Implement a new DUMB_CREATE2 ioctl 627 + ---------------------------------- 628 + 629 + The current DUMB_CREATE ioctl is not well defined. Instead of a pixel and 630 + framebuffer format, it only accepts a color mode of vague semantics. Assuming 631 + a linear framebuffer, the color mode gives an idea of the supported pixel 632 + format. But userspace effectively has to guess the correct values. It really 633 + only works reliably with framebuffers in XRGB8888. Userspace has begun to 634 + workaround these limitations by computing arbitrary format's buffer sizes and 635 + calculating their sizes in terms of XRGB8888 pixels. 636 + 637 + One possible solution is a new ioctl DUMB_CREATE2. It should accept a DRM 638 + format and a format modifier to resolve the color mode's ambiguity. As 639 + framebuffers can be multi-planar, the new ioctl has to return the buffer size, 640 + pitch and GEM handle for each individual color plane. 641 + 642 + In the first step, the new ioctl can be limited to the current features of 643 + the existing DUMB_CREATE. Individual drivers can then be extended to support 644 + multi-planar formats. Rockchip might require this and would be a good candidate. 645 + 646 + It might also be helpful to userspace to query information about the size of 647 + a potential buffer, if allocated. Userspace would supply geometry and format; 648 + the kernel would return minimal allocation sizes and scanline pitch. There is 649 + interest to allocate that memory from another device and provide it to the 650 + DRM driver (say via dma-buf). 651 + 652 + Another requested feature is the ability to allocate a buffer by size, without 653 + format. Accelators use this for their buffer allocation and it could likely be 654 + generalized. 655 + 656 + In addition to the kernel implementation, there must be user-space support 657 + for the new ioctl. There's code in Mesa that might be able to use the new 658 + call. 659 + 660 + Contact: Thomas Zimmermann <tzimmermann@suse.de> 661 + 662 + Level: Advanced 626 663 627 664 Better Testing 628 665 ==============
+1
drivers/accel/amdxdna/Makefile
··· 14 14 amdxdna_mailbox.o \ 15 15 amdxdna_mailbox_helper.o \ 16 16 amdxdna_pci_drv.o \ 17 + amdxdna_pm.o \ 17 18 amdxdna_sysfs.o \ 18 19 amdxdna_ubuf.o \ 19 20 npu1_regs.o \
+31 -11
drivers/accel/amdxdna/aie2_ctx.c
··· 21 21 #include "amdxdna_gem.h" 22 22 #include "amdxdna_mailbox.h" 23 23 #include "amdxdna_pci_drv.h" 24 + #include "amdxdna_pm.h" 24 25 25 26 static bool force_cmdlist; 26 27 module_param(force_cmdlist, bool, 0600); ··· 89 88 goto out; 90 89 } 91 90 92 - ret = aie2_config_cu(hwctx); 91 + ret = aie2_config_cu(hwctx, NULL); 93 92 if (ret) { 94 93 XDNA_ERR(xdna, "Config cu failed, ret %d", ret); 95 94 goto out; ··· 168 167 169 168 int aie2_hwctx_resume(struct amdxdna_client *client) 170 169 { 171 - struct amdxdna_dev *xdna = client->xdna; 172 - 173 170 /* 174 171 * The resume path cannot guarantee that mailbox channel can be 175 172 * regenerated. If this happen, when submit message to this 176 173 * mailbox channel, error will return. 177 174 */ 178 - drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); 179 175 return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb); 180 176 } 181 177 ··· 182 184 struct dma_fence *fence = job->fence; 183 185 184 186 trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq); 187 + 188 + amdxdna_pm_suspend_put(job->hwctx->client->xdna); 185 189 job->hwctx->priv->completed++; 186 190 dma_fence_signal(fence); 187 191 ··· 531 531 .num_rqs = DRM_SCHED_PRIORITY_COUNT, 532 532 .credit_limit = HWCTX_MAX_CMDS, 533 533 .timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT), 534 - .name = hwctx->name, 534 + .name = "amdxdna_js", 535 535 .dev = xdna->ddev.dev, 536 536 }; 537 537 struct drm_gpu_scheduler *sched; ··· 697 697 kfree(hwctx->cus); 698 698 } 699 699 700 + static int aie2_config_cu_resp_handler(void *handle, void __iomem *data, size_t size) 701 + { 702 + struct amdxdna_hwctx *hwctx = handle; 703 + 704 + amdxdna_pm_suspend_put(hwctx->client->xdna); 705 + return 0; 706 + } 707 + 700 708 static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size) 701 709 { 702 710 struct amdxdna_hwctx_param_config_cu *config = buf; ··· 736 728 if (!hwctx->cus) 737 729 return -ENOMEM; 738 730 739 - ret = aie2_config_cu(hwctx); 731 + ret = amdxdna_pm_resume_get(xdna); 732 + if (ret) 733 + goto free_cus; 734 + 735 + ret = aie2_config_cu(hwctx, aie2_config_cu_resp_handler); 740 736 if (ret) { 741 737 XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret); 742 - goto free_cus; 738 + goto pm_suspend_put; 743 739 } 744 740 745 741 wmb(); /* To avoid locking in command submit when check status */ ··· 751 739 752 740 return 0; 753 741 742 + pm_suspend_put: 743 + amdxdna_pm_suspend_put(xdna); 754 744 free_cus: 755 745 kfree(hwctx->cus); 756 746 hwctx->cus = NULL; ··· 876 862 goto free_chain; 877 863 } 878 864 865 + ret = amdxdna_pm_resume_get(xdna); 866 + if (ret) 867 + goto cleanup_job; 868 + 879 869 retry: 880 870 ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx); 881 871 if (ret) { 882 872 XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret); 883 - goto cleanup_job; 873 + goto suspend_put; 884 874 } 885 875 886 876 for (i = 0; i < job->bo_cnt; i++) { ··· 892 874 if (ret) { 893 875 XDNA_WARN(xdna, "Failed to reserve fences %d", ret); 894 876 drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx); 895 - goto cleanup_job; 877 + goto suspend_put; 896 878 } 897 879 } 898 880 ··· 907 889 msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); 908 890 } else if (time_after(jiffies, timeout)) { 909 891 ret = -ETIME; 910 - goto cleanup_job; 892 + goto suspend_put; 911 893 } 912 894 913 895 ret = aie2_populate_range(abo); 914 896 if (ret) 915 - goto cleanup_job; 897 + goto suspend_put; 916 898 goto retry; 917 899 } 918 900 } ··· 938 920 939 921 return 0; 940 922 923 + suspend_put: 924 + amdxdna_pm_suspend_put(xdna); 941 925 cleanup_job: 942 926 drm_sched_job_cleanup(&job->base); 943 927 free_chain:
+12 -16
drivers/accel/amdxdna/aie2_message.c
··· 37 37 if (!ndev->mgmt_chann) 38 38 return -ENODEV; 39 39 40 - drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); 40 + drm_WARN_ON(&xdna->ddev, xdna->rpm_on && !mutex_is_locked(&xdna->dev_lock)); 41 41 ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg); 42 42 if (ret == -ETIME) { 43 43 xdna_mailbox_stop_channel(ndev->mgmt_chann); ··· 377 377 return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT); 378 378 } 379 379 380 - int aie2_config_cu(struct amdxdna_hwctx *hwctx) 380 + int aie2_config_cu(struct amdxdna_hwctx *hwctx, 381 + int (*notify_cb)(void *, void __iomem *, size_t)) 381 382 { 382 383 struct mailbox_channel *chann = hwctx->priv->mbox_chann; 383 384 struct amdxdna_dev *xdna = hwctx->client->xdna; 384 385 u32 shift = xdna->dev_info->dev_mem_buf_shift; 385 - DECLARE_AIE2_MSG(config_cu, MSG_OP_CONFIG_CU); 386 + struct config_cu_req req = { 0 }; 387 + struct xdna_mailbox_msg msg; 386 388 struct drm_gem_object *gobj; 387 389 struct amdxdna_gem_obj *abo; 388 - int ret, i; 390 + int i; 389 391 390 392 if (!chann) 391 393 return -ENODEV; ··· 425 423 } 426 424 req.num_cus = hwctx->cus->num_cus; 427 425 428 - ret = xdna_send_msg_wait(xdna, chann, &msg); 429 - if (ret == -ETIME) 430 - aie2_destroy_context(xdna->dev_handle, hwctx); 431 - 432 - if (resp.status == AIE2_STATUS_SUCCESS) { 433 - XDNA_DBG(xdna, "Configure %d CUs, ret %d", req.num_cus, ret); 434 - return 0; 435 - } 436 - 437 - XDNA_ERR(xdna, "Command opcode 0x%x failed, status 0x%x ret %d", 438 - msg.opcode, resp.status, ret); 439 - return ret; 426 + msg.send_data = (u8 *)&req; 427 + msg.send_size = sizeof(req); 428 + msg.handle = hwctx; 429 + msg.opcode = MSG_OP_CONFIG_CU; 430 + msg.notify_cb = notify_cb; 431 + return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT); 440 432 } 441 433 442 434 int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+44 -41
drivers/accel/amdxdna/aie2_pci.c
··· 25 25 #include "amdxdna_gem.h" 26 26 #include "amdxdna_mailbox.h" 27 27 #include "amdxdna_pci_drv.h" 28 + #include "amdxdna_pm.h" 28 29 29 30 static int aie2_max_col = XRS_MAX_COL; 30 31 module_param(aie2_max_col, uint, 0600); ··· 224 223 return ret; 225 224 } 226 225 227 - if (!ndev->async_events) 228 - return 0; 229 - 230 - ret = aie2_error_async_events_send(ndev); 231 - if (ret) { 232 - XDNA_ERR(ndev->xdna, "Send async events failed"); 233 - return ret; 234 - } 235 - 236 226 return 0; 237 227 } 238 228 ··· 248 256 XDNA_ERR(ndev->xdna, "Query AIE metadata failed"); 249 257 return ret; 250 258 } 259 + 260 + ndev->total_col = min(aie2_max_col, ndev->metadata.cols); 251 261 252 262 return 0; 253 263 } ··· 332 338 ndev->mbox = NULL; 333 339 aie2_psp_stop(ndev->psp_hdl); 334 340 aie2_smu_fini(ndev); 341 + aie2_error_async_events_free(ndev); 335 342 pci_disable_device(pdev); 336 343 337 344 ndev->dev_status = AIE2_DEV_INIT; ··· 419 424 goto destroy_mgmt_chann; 420 425 } 421 426 427 + ret = aie2_mgmt_fw_query(ndev); 428 + if (ret) { 429 + XDNA_ERR(xdna, "failed to query fw, ret %d", ret); 430 + goto destroy_mgmt_chann; 431 + } 432 + 433 + ret = aie2_error_async_events_alloc(ndev); 434 + if (ret) { 435 + XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret); 436 + goto destroy_mgmt_chann; 437 + } 438 + 422 439 ndev->dev_status = AIE2_DEV_START; 423 440 424 441 return 0; ··· 466 459 struct amdxdna_client *client; 467 460 int ret; 468 461 469 - guard(mutex)(&xdna->dev_lock); 470 462 ret = aie2_hw_start(xdna); 471 463 if (ret) { 472 464 XDNA_ERR(xdna, "Start hardware failed, %d", ret); ··· 571 565 goto release_fw; 572 566 } 573 567 574 - ret = aie2_mgmt_fw_query(ndev); 575 - if (ret) { 576 - XDNA_ERR(xdna, "Query firmware failed, ret %d", ret); 577 - goto stop_hw; 578 - } 579 - ndev->total_col = min(aie2_max_col, ndev->metadata.cols); 580 - 581 568 xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1; 582 569 for (i = 0; i < xrs_cfg.clk_list.num_levels; i++) 583 570 xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk; ··· 586 587 goto stop_hw; 587 588 } 588 589 589 - ret = aie2_error_async_events_alloc(ndev); 590 - if (ret) { 591 - XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret); 592 - goto stop_hw; 593 - } 594 - 595 - ret = aie2_error_async_events_send(ndev); 596 - if (ret) { 597 - XDNA_ERR(xdna, "Send async events failed, ret %d", ret); 598 - goto async_event_free; 599 - } 600 - 601 - /* Issue a command to make sure firmware handled async events */ 602 - ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver); 603 - if (ret) { 604 - XDNA_ERR(xdna, "Re-query firmware version failed"); 605 - goto async_event_free; 606 - } 607 - 608 590 release_firmware(fw); 591 + amdxdna_pm_init(xdna); 609 592 return 0; 610 593 611 - async_event_free: 612 - aie2_error_async_events_free(ndev); 613 594 stop_hw: 614 595 aie2_hw_stop(xdna); 615 596 release_fw: ··· 600 621 601 622 static void aie2_fini(struct amdxdna_dev *xdna) 602 623 { 603 - struct amdxdna_dev_hdl *ndev = xdna->dev_handle; 604 - 624 + amdxdna_pm_fini(xdna); 605 625 aie2_hw_stop(xdna); 606 - aie2_error_async_events_free(ndev); 607 626 } 608 627 609 628 static int aie2_get_aie_status(struct amdxdna_client *client, ··· 833 856 if (!drm_dev_enter(&xdna->ddev, &idx)) 834 857 return -ENODEV; 835 858 859 + ret = amdxdna_pm_resume_get(xdna); 860 + if (ret) 861 + goto dev_exit; 862 + 836 863 switch (args->param) { 837 864 case DRM_AMDXDNA_QUERY_AIE_STATUS: 838 865 ret = aie2_get_aie_status(client, args); ··· 863 882 XDNA_ERR(xdna, "Not supported request parameter %u", args->param); 864 883 ret = -EOPNOTSUPP; 865 884 } 885 + 886 + amdxdna_pm_suspend_put(xdna); 866 887 XDNA_DBG(xdna, "Got param %d", args->param); 867 888 889 + dev_exit: 868 890 drm_dev_exit(idx); 869 891 return ret; 870 892 } ··· 881 897 int ret; 882 898 883 899 drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); 900 + 901 + if (args->element_size > SZ_4K || args->num_element > SZ_1K) { 902 + XDNA_DBG(xdna, "Invalid element size %d or number of element %d", 903 + args->element_size, args->num_element); 904 + return -EINVAL; 905 + } 884 906 885 907 array_args.element_size = min(args->element_size, 886 908 sizeof(struct amdxdna_drm_hwctx_entry)); ··· 916 926 if (!drm_dev_enter(&xdna->ddev, &idx)) 917 927 return -ENODEV; 918 928 929 + ret = amdxdna_pm_resume_get(xdna); 930 + if (ret) 931 + goto dev_exit; 932 + 919 933 switch (args->param) { 920 934 case DRM_AMDXDNA_HW_CONTEXT_ALL: 921 935 ret = aie2_query_ctx_status_array(client, args); ··· 928 934 XDNA_ERR(xdna, "Not supported request parameter %u", args->param); 929 935 ret = -EOPNOTSUPP; 930 936 } 937 + 938 + amdxdna_pm_suspend_put(xdna); 931 939 XDNA_DBG(xdna, "Got param %d", args->param); 932 940 941 + dev_exit: 933 942 drm_dev_exit(idx); 934 943 return ret; 935 944 } ··· 971 974 if (!drm_dev_enter(&xdna->ddev, &idx)) 972 975 return -ENODEV; 973 976 977 + ret = amdxdna_pm_resume_get(xdna); 978 + if (ret) 979 + goto dev_exit; 980 + 974 981 switch (args->param) { 975 982 case DRM_AMDXDNA_SET_POWER_MODE: 976 983 ret = aie2_set_power_mode(client, args); ··· 985 984 break; 986 985 } 987 986 987 + amdxdna_pm_suspend_put(xdna); 988 + dev_exit: 988 989 drm_dev_exit(idx); 989 990 return ret; 990 991 }
+2 -1
drivers/accel/amdxdna/aie2_pci.h
··· 272 272 int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled); 273 273 int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size, 274 274 void *handle, int (*cb)(void*, void __iomem *, size_t)); 275 - int aie2_config_cu(struct amdxdna_hwctx *hwctx); 275 + int aie2_config_cu(struct amdxdna_hwctx *hwctx, 276 + int (*notify_cb)(void *, void __iomem *, size_t)); 276 277 int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, 277 278 int (*notify_cb)(void *, void __iomem *, size_t)); 278 279 int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
+24 -4
drivers/accel/amdxdna/aie2_smu.c
··· 11 11 12 12 #include "aie2_pci.h" 13 13 #include "amdxdna_pci_drv.h" 14 + #include "amdxdna_pm.h" 14 15 15 16 #define SMU_RESULT_OK 1 16 17 ··· 60 59 u32 freq; 61 60 int ret; 62 61 62 + ret = amdxdna_pm_resume_get(ndev->xdna); 63 + if (ret) 64 + return ret; 65 + 63 66 ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ, 64 67 ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq); 65 68 if (ret) { 66 69 XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n", 67 70 ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret); 68 - return ret; 71 + goto suspend_put; 69 72 } 70 73 ndev->npuclk_freq = freq; 71 74 ··· 78 73 if (ret) { 79 74 XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n", 80 75 ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret); 81 - return ret; 76 + goto suspend_put; 82 77 } 78 + 79 + amdxdna_pm_suspend_put(ndev->xdna); 83 80 ndev->hclk_freq = freq; 84 81 ndev->dpm_level = dpm_level; 85 82 ··· 89 82 ndev->npuclk_freq, ndev->hclk_freq); 90 83 91 84 return 0; 85 + 86 + suspend_put: 87 + amdxdna_pm_suspend_put(ndev->xdna); 88 + return ret; 92 89 } 93 90 94 91 int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level) 95 92 { 96 93 int ret; 97 94 95 + ret = amdxdna_pm_resume_get(ndev->xdna); 96 + if (ret) 97 + return ret; 98 + 98 99 ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL); 99 100 if (ret) { 100 101 XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ", 101 102 dpm_level, ret); 102 - return ret; 103 + goto suspend_put; 103 104 } 104 105 105 106 ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL); 106 107 if (ret) { 107 108 XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d", 108 109 dpm_level, ret); 109 - return ret; 110 + goto suspend_put; 110 111 } 111 112 113 + amdxdna_pm_suspend_put(ndev->xdna); 112 114 ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk; 113 115 ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk; 114 116 ndev->dpm_level = dpm_level; ··· 126 110 ndev->npuclk_freq, ndev->hclk_freq); 127 111 128 112 return 0; 113 + 114 + suspend_put: 115 + amdxdna_pm_suspend_put(ndev->xdna); 116 + return ret; 129 117 } 130 118 131 119 int aie2_smu_init(struct amdxdna_dev_hdl *ndev)
+29 -33
drivers/accel/amdxdna/amdxdna_ctx.c
··· 161 161 if (args->ext || args->ext_flags) 162 162 return -EINVAL; 163 163 164 - if (!drm_dev_enter(dev, &idx)) 165 - return -ENODEV; 166 - 167 164 hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL); 168 - if (!hwctx) { 169 - ret = -ENOMEM; 170 - goto exit; 171 - } 165 + if (!hwctx) 166 + return -ENOMEM; 172 167 173 168 if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) { 174 169 XDNA_ERR(xdna, "Access QoS info failed"); 175 - ret = -EFAULT; 176 - goto free_hwctx; 170 + kfree(hwctx); 171 + return -EFAULT; 177 172 } 178 173 179 174 hwctx->client = client; ··· 176 181 hwctx->num_tiles = args->num_tiles; 177 182 hwctx->mem_size = args->mem_size; 178 183 hwctx->max_opc = args->max_opc; 184 + 185 + guard(mutex)(&xdna->dev_lock); 186 + 187 + if (!drm_dev_enter(dev, &idx)) { 188 + ret = -ENODEV; 189 + goto free_hwctx; 190 + } 191 + 192 + ret = xdna->dev_info->ops->hwctx_init(hwctx); 193 + if (ret) { 194 + XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret); 195 + goto dev_exit; 196 + } 197 + 198 + hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->fw_ctx_id); 199 + if (!hwctx->name) { 200 + ret = -ENOMEM; 201 + goto fini_hwctx; 202 + } 203 + 179 204 ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx, 180 205 XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID), 181 206 &client->next_hwctxid, GFP_KERNEL); 182 207 if (ret < 0) { 183 208 XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret); 184 - goto free_hwctx; 185 - } 186 - 187 - hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id); 188 - if (!hwctx->name) { 189 - ret = -ENOMEM; 190 - goto rm_id; 191 - } 192 - 193 - mutex_lock(&xdna->dev_lock); 194 - ret = xdna->dev_info->ops->hwctx_init(hwctx); 195 - if (ret) { 196 - mutex_unlock(&xdna->dev_lock); 197 - XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret); 198 209 goto free_name; 199 210 } 211 + 200 212 args->handle = hwctx->id; 201 213 args->syncobj_handle = hwctx->syncobj_hdl; 202 - mutex_unlock(&xdna->dev_lock); 203 214 204 215 atomic64_set(&hwctx->job_submit_cnt, 0); 205 216 atomic64_set(&hwctx->job_free_cnt, 0); ··· 215 214 216 215 free_name: 217 216 kfree(hwctx->name); 218 - rm_id: 219 - xa_erase(&client->hwctx_xa, hwctx->id); 217 + fini_hwctx: 218 + xdna->dev_info->ops->hwctx_fini(hwctx); 219 + dev_exit: 220 + drm_dev_exit(idx); 220 221 free_hwctx: 221 222 kfree(hwctx); 222 - exit: 223 - drm_dev_exit(idx); 224 223 return ret; 225 224 } 226 225 ··· 432 431 goto unlock_srcu; 433 432 } 434 433 435 - if (hwctx->status != HWCTX_STAT_READY) { 436 - XDNA_ERR(xdna, "HW Context is not ready"); 437 - ret = -EINVAL; 438 - goto unlock_srcu; 439 - } 440 434 441 435 job->hwctx = hwctx; 442 436 job->mm = current->mm;
+20 -27
drivers/accel/amdxdna/amdxdna_gem.c
··· 392 392 .vunmap = drm_gem_dmabuf_vunmap, 393 393 }; 394 394 395 - static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map) 395 + static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr) 396 396 { 397 - struct amdxdna_gem_obj *abo = to_xdna_obj(obj); 398 - 399 - iosys_map_clear(map); 400 - 401 - dma_resv_assert_held(obj->resv); 397 + struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); 398 + int ret; 402 399 403 400 if (is_import_bo(abo)) 404 - dma_buf_vmap(abo->dma_buf, map); 401 + ret = dma_buf_vmap_unlocked(abo->dma_buf, &map); 405 402 else 406 - drm_gem_shmem_object_vmap(obj, map); 403 + ret = drm_gem_vmap(to_gobj(abo), &map); 407 404 408 - if (!map->vaddr) 409 - return -ENOMEM; 410 - 411 - return 0; 405 + *vaddr = map.vaddr; 406 + return ret; 412 407 } 413 408 414 - static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 409 + static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo) 415 410 { 416 - struct amdxdna_gem_obj *abo = to_xdna_obj(obj); 411 + struct iosys_map map; 417 412 418 - dma_resv_assert_held(obj->resv); 413 + if (!abo->mem.kva) 414 + return; 415 + 416 + iosys_map_set_vaddr(&map, abo->mem.kva); 419 417 420 418 if (is_import_bo(abo)) 421 - dma_buf_vunmap(abo->dma_buf, map); 419 + dma_buf_vunmap_unlocked(abo->dma_buf, &map); 422 420 else 423 - drm_gem_shmem_object_vunmap(obj, map); 421 + drm_gem_vunmap(to_gobj(abo), &map); 424 422 } 425 423 426 424 static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags) ··· 453 455 { 454 456 struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); 455 457 struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); 456 - struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva); 457 458 458 459 XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); 459 460 ··· 465 468 if (abo->type == AMDXDNA_BO_DEV_HEAP) 466 469 drm_mm_takedown(&abo->mm); 467 470 468 - drm_gem_vunmap(gobj, &map); 471 + amdxdna_gem_obj_vunmap(abo); 469 472 mutex_destroy(&abo->lock); 470 473 471 474 if (is_import_bo(abo)) { ··· 486 489 .pin = drm_gem_shmem_object_pin, 487 490 .unpin = drm_gem_shmem_object_unpin, 488 491 .get_sg_table = drm_gem_shmem_object_get_sg_table, 489 - .vmap = amdxdna_gem_obj_vmap, 490 - .vunmap = amdxdna_gem_obj_vunmap, 492 + .vmap = drm_gem_shmem_object_vmap, 493 + .vunmap = drm_gem_shmem_object_vunmap, 491 494 .mmap = amdxdna_gem_obj_mmap, 492 495 .vm_ops = &drm_gem_shmem_vm_ops, 493 496 .export = amdxdna_gem_prime_export, ··· 660 663 struct drm_file *filp) 661 664 { 662 665 struct amdxdna_client *client = filp->driver_priv; 663 - struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); 664 666 struct amdxdna_dev *xdna = to_xdna_dev(dev); 665 667 struct amdxdna_gem_obj *abo; 666 668 int ret; ··· 688 692 abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base; 689 693 drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size); 690 694 691 - ret = drm_gem_vmap(to_gobj(abo), &map); 695 + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); 692 696 if (ret) { 693 697 XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret); 694 698 goto release_obj; 695 699 } 696 - abo->mem.kva = map.vaddr; 697 700 698 701 client->dev_heap = abo; 699 702 drm_gem_object_get(to_gobj(abo)); ··· 743 748 struct amdxdna_drm_create_bo *args, 744 749 struct drm_file *filp) 745 750 { 746 - struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); 747 751 struct amdxdna_dev *xdna = to_xdna_dev(dev); 748 752 struct amdxdna_gem_obj *abo; 749 753 int ret; ··· 764 770 abo->type = AMDXDNA_BO_CMD; 765 771 abo->client = filp->driver_priv; 766 772 767 - ret = drm_gem_vmap(to_gobj(abo), &map); 773 + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); 768 774 if (ret) { 769 775 XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); 770 776 goto release_obj; 771 777 } 772 - abo->mem.kva = map.vaddr; 773 778 774 779 return abo; 775 780
+8 -5
drivers/accel/amdxdna/amdxdna_mailbox.c
··· 194 194 { 195 195 MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x", 196 196 mb_msg->pkg.header.id, mb_msg->pkg.header.opcode); 197 - mb_msg->notify_cb(mb_msg->handle, NULL, 0); 197 + if (mb_msg->notify_cb) 198 + mb_msg->notify_cb(mb_msg->handle, NULL, 0); 198 199 kfree(mb_msg); 199 200 } 200 201 ··· 249 248 { 250 249 struct mailbox_msg *mb_msg; 251 250 int msg_id; 252 - int ret; 251 + int ret = 0; 253 252 254 253 msg_id = header->id; 255 254 if (!mailbox_validate_msgid(msg_id)) { ··· 266 265 267 266 MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x", 268 267 header->opcode, header->total_size, header->id); 269 - ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size); 270 - if (unlikely(ret)) 271 - MB_ERR(mb_chann, "Message callback ret %d", ret); 268 + if (mb_msg->notify_cb) { 269 + ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size); 270 + if (unlikely(ret)) 271 + MB_ERR(mb_chann, "Message callback ret %d", ret); 272 + } 272 273 273 274 kfree(mb_msg); 274 275 return ret;
+5 -51
drivers/accel/amdxdna/amdxdna_pci_drv.c
··· 13 13 #include <drm/gpu_scheduler.h> 14 14 #include <linux/iommu.h> 15 15 #include <linux/pci.h> 16 - #include <linux/pm_runtime.h> 17 16 18 17 #include "amdxdna_ctx.h" 19 18 #include "amdxdna_gem.h" 20 19 #include "amdxdna_pci_drv.h" 21 - 22 - #define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */ 20 + #include "amdxdna_pm.h" 23 21 24 22 MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin"); 25 23 MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin"); ··· 59 61 struct amdxdna_client *client; 60 62 int ret; 61 63 62 - ret = pm_runtime_resume_and_get(ddev->dev); 63 - if (ret) { 64 - XDNA_ERR(xdna, "Failed to get rpm, ret %d", ret); 65 - return ret; 66 - } 67 - 68 64 client = kzalloc(sizeof(*client), GFP_KERNEL); 69 - if (!client) { 70 - ret = -ENOMEM; 71 - goto put_rpm; 72 - } 65 + if (!client) 66 + return -ENOMEM; 73 67 74 68 client->pid = pid_nr(rcu_access_pointer(filp->pid)); 75 69 client->xdna = xdna; ··· 96 106 iommu_sva_unbind_device(client->sva); 97 107 failed: 98 108 kfree(client); 99 - put_rpm: 100 - pm_runtime_mark_last_busy(ddev->dev); 101 - pm_runtime_put_autosuspend(ddev->dev); 102 109 103 110 return ret; 104 111 } ··· 117 130 118 131 XDNA_DBG(xdna, "pid %d closed", client->pid); 119 132 kfree(client); 120 - pm_runtime_mark_last_busy(ddev->dev); 121 - pm_runtime_put_autosuspend(ddev->dev); 122 133 } 123 134 124 135 static int amdxdna_flush(struct file *f, fl_owner_t id) ··· 295 310 goto failed_dev_fini; 296 311 } 297 312 298 - pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY); 299 - pm_runtime_use_autosuspend(dev); 300 - pm_runtime_allow(dev); 301 - 302 313 ret = drm_dev_register(&xdna->ddev, 0); 303 314 if (ret) { 304 315 XDNA_ERR(xdna, "DRM register failed, ret %d", ret); 305 - pm_runtime_forbid(dev); 306 316 goto failed_sysfs_fini; 307 317 } 308 318 309 - pm_runtime_mark_last_busy(dev); 310 - pm_runtime_put_autosuspend(dev); 311 319 return 0; 312 320 313 321 failed_sysfs_fini: ··· 317 339 static void amdxdna_remove(struct pci_dev *pdev) 318 340 { 319 341 struct amdxdna_dev *xdna = pci_get_drvdata(pdev); 320 - struct device *dev = &pdev->dev; 321 342 struct amdxdna_client *client; 322 343 323 344 destroy_workqueue(xdna->notifier_wq); 324 - 325 - pm_runtime_get_noresume(dev); 326 - pm_runtime_forbid(dev); 327 345 328 346 drm_dev_unplug(&xdna->ddev); 329 347 amdxdna_sysfs_fini(xdna); ··· 339 365 mutex_unlock(&xdna->dev_lock); 340 366 } 341 367 342 - static int amdxdna_pmops_suspend(struct device *dev) 343 - { 344 - struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); 345 - 346 - if (!xdna->dev_info->ops->suspend) 347 - return -EOPNOTSUPP; 348 - 349 - return xdna->dev_info->ops->suspend(xdna); 350 - } 351 - 352 - static int amdxdna_pmops_resume(struct device *dev) 353 - { 354 - struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); 355 - 356 - if (!xdna->dev_info->ops->resume) 357 - return -EOPNOTSUPP; 358 - 359 - return xdna->dev_info->ops->resume(xdna); 360 - } 361 - 362 368 static const struct dev_pm_ops amdxdna_pm_ops = { 363 - SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume) 364 - RUNTIME_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume, NULL) 369 + SYSTEM_SLEEP_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume) 370 + RUNTIME_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume, NULL) 365 371 }; 366 372 367 373 static struct pci_driver amdxdna_pci_driver = {
+2
drivers/accel/amdxdna/amdxdna_pci_drv.h
··· 6 6 #ifndef _AMDXDNA_PCI_DRV_H_ 7 7 #define _AMDXDNA_PCI_DRV_H_ 8 8 9 + #include <drm/drm_print.h> 9 10 #include <linux/workqueue.h> 10 11 #include <linux/xarray.h> 11 12 ··· 100 99 struct amdxdna_fw_ver fw_ver; 101 100 struct rw_semaphore notifier_lock; /* for mmu notifier*/ 102 101 struct workqueue_struct *notifier_wq; 102 + bool rpm_on; 103 103 }; 104 104 105 105 /*
+94
drivers/accel/amdxdna/amdxdna_pm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2025, Advanced Micro Devices, Inc. 4 + */ 5 + 6 + #include <drm/amdxdna_accel.h> 7 + #include <drm/drm_drv.h> 8 + #include <linux/pm_runtime.h> 9 + 10 + #include "amdxdna_pm.h" 11 + 12 + #define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */ 13 + 14 + int amdxdna_pm_suspend(struct device *dev) 15 + { 16 + struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev)); 17 + int ret = -EOPNOTSUPP; 18 + bool rpm; 19 + 20 + if (xdna->dev_info->ops->suspend) { 21 + rpm = xdna->rpm_on; 22 + xdna->rpm_on = false; 23 + ret = xdna->dev_info->ops->suspend(xdna); 24 + xdna->rpm_on = rpm; 25 + } 26 + 27 + XDNA_DBG(xdna, "Suspend done ret %d", ret); 28 + return ret; 29 + } 30 + 31 + int amdxdna_pm_resume(struct device *dev) 32 + { 33 + struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev)); 34 + int ret = -EOPNOTSUPP; 35 + bool rpm; 36 + 37 + if (xdna->dev_info->ops->resume) { 38 + rpm = xdna->rpm_on; 39 + xdna->rpm_on = false; 40 + ret = xdna->dev_info->ops->resume(xdna); 41 + xdna->rpm_on = rpm; 42 + } 43 + 44 + XDNA_DBG(xdna, "Resume done ret %d", ret); 45 + return ret; 46 + } 47 + 48 + int amdxdna_pm_resume_get(struct amdxdna_dev *xdna) 49 + { 50 + struct device *dev = xdna->ddev.dev; 51 + int ret; 52 + 53 + if (!xdna->rpm_on) 54 + return 0; 55 + 56 + ret = pm_runtime_resume_and_get(dev); 57 + if (ret) { 58 + XDNA_ERR(xdna, "Resume failed: %d", ret); 59 + pm_runtime_set_suspended(dev); 60 + } 61 + 62 + return ret; 63 + } 64 + 65 + void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna) 66 + { 67 + struct device *dev = xdna->ddev.dev; 68 + 69 + if (!xdna->rpm_on) 70 + return; 71 + 72 + pm_runtime_put_autosuspend(dev); 73 + } 74 + 75 + void amdxdna_pm_init(struct amdxdna_dev *xdna) 76 + { 77 + struct device *dev = xdna->ddev.dev; 78 + 79 + pm_runtime_set_active(dev); 80 + pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY); 81 + pm_runtime_use_autosuspend(dev); 82 + pm_runtime_allow(dev); 83 + pm_runtime_put_autosuspend(dev); 84 + xdna->rpm_on = true; 85 + } 86 + 87 + void amdxdna_pm_fini(struct amdxdna_dev *xdna) 88 + { 89 + struct device *dev = xdna->ddev.dev; 90 + 91 + xdna->rpm_on = false; 92 + pm_runtime_get_noresume(dev); 93 + pm_runtime_forbid(dev); 94 + }
+18
drivers/accel/amdxdna/amdxdna_pm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2025, Advanced Micro Devices, Inc. 4 + */ 5 + 6 + #ifndef _AMDXDNA_PM_H_ 7 + #define _AMDXDNA_PM_H_ 8 + 9 + #include "amdxdna_pci_drv.h" 10 + 11 + int amdxdna_pm_suspend(struct device *dev); 12 + int amdxdna_pm_resume(struct device *dev); 13 + int amdxdna_pm_resume_get(struct amdxdna_dev *xdna); 14 + void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna); 15 + void amdxdna_pm_init(struct amdxdna_dev *xdna); 16 + void amdxdna_pm_fini(struct amdxdna_dev *xdna); 17 + 18 + #endif /* _AMDXDNA_PM_H_ */
+14 -24
drivers/accel/ivpu/ivpu_debugfs.c
··· 398 398 399 399 DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n"); 400 400 401 + static void print_priority_band(struct seq_file *s, struct ivpu_hw_info *hw, 402 + int band, const char *name) 403 + { 404 + seq_printf(s, "%-9s: grace_period %9u process_grace_period %9u process_quantum %9u\n", 405 + name, 406 + hw->hws.grace_period[band], 407 + hw->hws.process_grace_period[band], 408 + hw->hws.process_quantum[band]); 409 + } 410 + 401 411 static int priority_bands_show(struct seq_file *s, void *v) 402 412 { 403 413 struct ivpu_device *vdev = s->private; 404 414 struct ivpu_hw_info *hw = vdev->hw; 405 415 406 - for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE; 407 - band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) { 408 - switch (band) { 409 - case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE: 410 - seq_puts(s, "Idle: "); 411 - break; 412 - 413 - case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL: 414 - seq_puts(s, "Normal: "); 415 - break; 416 - 417 - case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS: 418 - seq_puts(s, "Focus: "); 419 - break; 420 - 421 - case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME: 422 - seq_puts(s, "Realtime: "); 423 - break; 424 - } 425 - 426 - seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n", 427 - hw->hws.grace_period[band], hw->hws.process_grace_period[band], 428 - hw->hws.process_quantum[band]); 429 - } 416 + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE, "Idle"); 417 + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL, "Normal"); 418 + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS, "Focus"); 419 + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME, "Realtime"); 430 420 431 421 return 0; 432 422 }
+4 -2
drivers/accel/ivpu/ivpu_drv.c
··· 200 200 case DRM_IVPU_PARAM_CAPABILITIES: 201 201 args->value = ivpu_is_capable(vdev, args->index); 202 202 break; 203 + case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE: 204 + args->value = ivpu_fw_preempt_buf_size(vdev); 205 + break; 203 206 default: 204 207 ret = -EINVAL; 205 208 break; ··· 380 377 drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter)); 381 378 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); 382 379 383 - /* Update boot params located at first 4KB of FW memory */ 384 - ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem)); 380 + ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp)); 385 381 386 382 ret = ivpu_hw_boot_fw(vdev); 387 383 if (ret) {
+143 -78
drivers/accel/ivpu/ivpu_fw.c
··· 17 17 #include "ivpu_ipc.h" 18 18 #include "ivpu_pm.h" 19 19 20 - #define FW_GLOBAL_MEM_START (2ull * SZ_1G) 21 - #define FW_GLOBAL_MEM_END (3ull * SZ_1G) 22 - #define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */ 23 - #define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */ 24 - #define FW_RUNTIME_MAX_SIZE SZ_512M 25 20 #define FW_SHAVE_NN_MAX_SIZE SZ_2M 26 - #define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START) 27 - #define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE) 28 21 #define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE) 22 + #define FW_PREEMPT_BUF_MIN_SIZE SZ_4K 23 + #define FW_PREEMPT_BUF_MAX_SIZE SZ_32M 29 24 30 25 #define WATCHDOG_MSS_REDIRECT 32 31 26 #define WATCHDOG_NCE_REDIRECT 33 ··· 126 131 return false; 127 132 } 128 133 129 - static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size) 134 + bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range) 130 135 { 131 - if (addr < range_start || addr + size > range_start + range_size) 136 + u64 addr_end; 137 + 138 + if (!range || check_add_overflow(addr, size, &addr_end)) 139 + return false; 140 + 141 + if (addr < range->start || addr_end > range->end) 132 142 return false; 133 143 134 144 return true; ··· 151 151 return VPU_SCHEDULING_MODE_HW; 152 152 } 153 153 154 + static void 155 + ivpu_preemption_config_parse(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr) 156 + { 157 + struct ivpu_fw_info *fw = vdev->fw; 158 + u32 primary_preempt_buf_size, secondary_preempt_buf_size; 159 + 160 + if (fw_hdr->preemption_buffer_1_max_size) 161 + primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size; 162 + else 163 + primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; 164 + 165 + if (fw_hdr->preemption_buffer_2_max_size) 166 + secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size; 167 + else 168 + secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; 169 + 170 + ivpu_dbg(vdev, FW_BOOT, "Preemption buffer size, primary: %u, secondary: %u\n", 171 + primary_preempt_buf_size, secondary_preempt_buf_size); 172 + 173 + if (primary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE || 174 + secondary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE) { 175 + ivpu_warn(vdev, "Preemption buffers size too small\n"); 176 + return; 177 + } 178 + 179 + if (primary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE || 180 + secondary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE) { 181 + ivpu_warn(vdev, "Preemption buffers size too big\n"); 182 + return; 183 + } 184 + 185 + if (fw->sched_mode != VPU_SCHEDULING_MODE_HW) 186 + return; 187 + 188 + if (ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE) 189 + return; 190 + 191 + vdev->fw->primary_preempt_buf_size = ALIGN(primary_preempt_buf_size, PAGE_SIZE); 192 + vdev->fw->secondary_preempt_buf_size = ALIGN(secondary_preempt_buf_size, PAGE_SIZE); 193 + } 194 + 154 195 static int ivpu_fw_parse(struct ivpu_device *vdev) 155 196 { 156 197 struct ivpu_fw_info *fw = vdev->fw; 157 198 const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data; 158 - u64 runtime_addr, image_load_addr, runtime_size, image_size; 199 + struct ivpu_addr_range fw_image_range; 200 + u64 boot_params_addr, boot_params_size; 201 + u64 fw_version_addr, fw_version_size; 202 + u64 runtime_addr, runtime_size; 203 + u64 image_load_addr, image_size; 159 204 160 205 if (fw->file->size <= FW_FILE_IMAGE_OFFSET) { 161 206 ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size); ··· 212 167 return -EINVAL; 213 168 } 214 169 215 - runtime_addr = fw_hdr->boot_params_load_address; 216 - runtime_size = fw_hdr->runtime_size; 217 - image_load_addr = fw_hdr->image_load_address; 218 - image_size = fw_hdr->image_size; 170 + boot_params_addr = fw_hdr->boot_params_load_address; 171 + boot_params_size = SZ_4K; 219 172 220 - if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) { 221 - ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr); 173 + if (!ivpu_is_within_range(boot_params_addr, boot_params_size, &vdev->hw->ranges.runtime)) { 174 + ivpu_err(vdev, "Invalid boot params address: 0x%llx\n", boot_params_addr); 222 175 return -EINVAL; 223 176 } 224 177 225 - if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) { 226 - ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size); 178 + fw_version_addr = fw_hdr->firmware_version_load_address; 179 + fw_version_size = ALIGN(fw_hdr->firmware_version_size, SZ_4K); 180 + 181 + if (fw_version_size != SZ_4K) { 182 + ivpu_err(vdev, "Invalid firmware version size: %u\n", 183 + fw_hdr->firmware_version_size); 184 + return -EINVAL; 185 + } 186 + 187 + if (!ivpu_is_within_range(fw_version_addr, fw_version_size, &vdev->hw->ranges.runtime)) { 188 + ivpu_err(vdev, "Invalid firmware version address: 0x%llx\n", fw_version_addr); 189 + return -EINVAL; 190 + } 191 + 192 + runtime_addr = fw_hdr->image_load_address; 193 + runtime_size = fw_hdr->runtime_size - boot_params_size - fw_version_size; 194 + 195 + image_load_addr = fw_hdr->image_load_address; 196 + image_size = fw_hdr->image_size; 197 + 198 + if (!ivpu_is_within_range(runtime_addr, runtime_size, &vdev->hw->ranges.runtime)) { 199 + ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx and size %llu\n", 200 + runtime_addr, runtime_size); 227 201 return -EINVAL; 228 202 } 229 203 ··· 251 187 return -EINVAL; 252 188 } 253 189 254 - if (image_load_addr < runtime_addr || 255 - image_load_addr + image_size > runtime_addr + runtime_size) { 256 - ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n", 190 + if (!ivpu_is_within_range(image_load_addr, image_size, &vdev->hw->ranges.runtime)) { 191 + ivpu_err(vdev, "Invalid firmware load address: 0x%llx and size %llu\n", 257 192 image_load_addr, image_size); 193 + return -EINVAL; 194 + } 195 + 196 + if (ivpu_hw_range_init(vdev, &fw_image_range, image_load_addr, image_size)) 197 + return -EINVAL; 198 + 199 + if (!ivpu_is_within_range(fw_hdr->entry_point, SZ_4K, &fw_image_range)) { 200 + ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point); 258 201 return -EINVAL; 259 202 } 260 203 ··· 270 199 return -EINVAL; 271 200 } 272 201 273 - if (fw_hdr->entry_point < image_load_addr || 274 - fw_hdr->entry_point >= image_load_addr + image_size) { 275 - ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point); 276 - return -EINVAL; 277 - } 278 202 ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n", 279 203 fw_hdr->header_version, fw_hdr->image_format); 280 204 ··· 283 217 if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3)) 284 218 return -EINVAL; 285 219 220 + fw->boot_params_addr = boot_params_addr; 221 + fw->boot_params_size = boot_params_size; 222 + fw->fw_version_addr = fw_version_addr; 223 + fw->fw_version_size = fw_version_size; 286 224 fw->runtime_addr = runtime_addr; 287 225 fw->runtime_size = runtime_size; 288 226 fw->image_load_offset = image_load_addr - runtime_addr; ··· 305 235 fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr); 306 236 ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS"); 307 237 308 - if (fw_hdr->preemption_buffer_1_max_size) 309 - fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size; 310 - else 311 - fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; 238 + ivpu_preemption_config_parse(vdev, fw_hdr); 239 + ivpu_dbg(vdev, FW_BOOT, "Mid-inference preemption %s supported\n", 240 + ivpu_fw_preempt_buf_size(vdev) ? "is" : "is not"); 312 241 313 - if (fw_hdr->preemption_buffer_2_max_size) 314 - fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size; 315 - else 316 - fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; 317 - ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n", 318 - fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size); 319 - 320 - if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address, 321 - fw_hdr->ro_section_size, 322 - fw_hdr->image_load_address, 323 - fw_hdr->image_size)) { 242 + if (fw_hdr->ro_section_start_address && 243 + !ivpu_is_within_range(fw_hdr->ro_section_start_address, fw_hdr->ro_section_size, 244 + &fw_image_range)) { 324 245 ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n", 325 246 fw_hdr->ro_section_start_address, fw_hdr->ro_section_size); 326 247 return -EINVAL; ··· 320 259 fw->read_only_addr = fw_hdr->ro_section_start_address; 321 260 fw->read_only_size = fw_hdr->ro_section_size; 322 261 323 - ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n", 324 - fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size); 325 - ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n", 326 - fw->runtime_addr, image_load_addr, fw->entry_point); 262 + ivpu_dbg(vdev, FW_BOOT, "Boot params: address 0x%llx, size %llu\n", 263 + fw->boot_params_addr, fw->boot_params_size); 264 + ivpu_dbg(vdev, FW_BOOT, "FW version: address 0x%llx, size %llu\n", 265 + fw->fw_version_addr, fw->fw_version_size); 266 + ivpu_dbg(vdev, FW_BOOT, "Runtime: address 0x%llx, size %u\n", 267 + fw->runtime_addr, fw->runtime_size); 268 + ivpu_dbg(vdev, FW_BOOT, "Image load offset: 0x%llx, size %u\n", 269 + fw->image_load_offset, fw->image_size); 327 270 ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n", 328 271 fw->read_only_addr, fw->read_only_size); 272 + ivpu_dbg(vdev, FW_BOOT, "FW entry point: 0x%llx\n", fw->entry_point); 273 + ivpu_dbg(vdev, FW_BOOT, "SHAVE NN size: %u\n", fw->shave_nn_size); 329 274 330 275 return 0; 331 276 } ··· 358 291 IVPU_PRINT_WA(disable_d0i3_msg); 359 292 } 360 293 361 - static int ivpu_fw_update_global_range(struct ivpu_device *vdev) 362 - { 363 - struct ivpu_fw_info *fw = vdev->fw; 364 - u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT); 365 - u64 size = FW_SHARED_MEM_SIZE; 366 - 367 - if (start + size > FW_GLOBAL_MEM_END) { 368 - ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size); 369 - return -EINVAL; 370 - } 371 - 372 - ivpu_hw_range_init(&vdev->hw->ranges.global, start, size); 373 - return 0; 374 - } 375 - 376 294 static int ivpu_fw_mem_init(struct ivpu_device *vdev) 377 295 { 378 296 struct ivpu_fw_info *fw = vdev->fw; 379 - struct ivpu_addr_range fw_range; 380 297 int log_verb_size; 381 298 int ret; 382 299 383 - ret = ivpu_fw_update_global_range(vdev); 384 - if (ret) 385 - return ret; 300 + fw->mem_bp = ivpu_bo_create_runtime(vdev, fw->boot_params_addr, fw->boot_params_size, 301 + DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); 302 + if (!fw->mem_bp) { 303 + ivpu_err(vdev, "Failed to create firmware boot params memory buffer\n"); 304 + return -ENOMEM; 305 + } 386 306 387 - fw_range.start = fw->runtime_addr; 388 - fw_range.end = fw->runtime_addr + fw->runtime_size; 389 - fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size, 390 - DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); 307 + fw->mem_fw_ver = ivpu_bo_create_runtime(vdev, fw->fw_version_addr, fw->fw_version_size, 308 + DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); 309 + if (!fw->mem_fw_ver) { 310 + ivpu_err(vdev, "Failed to create firmware version memory buffer\n"); 311 + ret = -ENOMEM; 312 + goto err_free_bp; 313 + } 314 + 315 + fw->mem = ivpu_bo_create_runtime(vdev, fw->runtime_addr, fw->runtime_size, 316 + DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); 391 317 if (!fw->mem) { 392 318 ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n"); 393 - return -ENOMEM; 319 + ret = -ENOMEM; 320 + goto err_free_fw_ver; 394 321 } 395 322 396 323 ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr, ··· 433 372 ivpu_bo_free(fw->mem_log_crit); 434 373 err_free_fw_mem: 435 374 ivpu_bo_free(fw->mem); 375 + err_free_fw_ver: 376 + ivpu_bo_free(fw->mem_fw_ver); 377 + err_free_bp: 378 + ivpu_bo_free(fw->mem_bp); 436 379 return ret; 437 380 } 438 381 ··· 452 387 ivpu_bo_free(fw->mem_log_verb); 453 388 ivpu_bo_free(fw->mem_log_crit); 454 389 ivpu_bo_free(fw->mem); 390 + ivpu_bo_free(fw->mem_fw_ver); 391 + ivpu_bo_free(fw->mem_bp); 455 392 456 393 fw->mem_log_verb = NULL; 457 394 fw->mem_log_crit = NULL; 458 395 fw->mem = NULL; 396 + fw->mem_fw_ver = NULL; 397 + fw->mem_bp = NULL; 459 398 } 460 399 461 400 int ivpu_fw_init(struct ivpu_device *vdev) ··· 552 483 ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n", 553 484 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg); 554 485 555 - ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n", 556 - boot_params->global_memory_allocator_base); 557 - ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n", 558 - boot_params->global_memory_allocator_size); 559 - 560 486 ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n", 561 487 boot_params->shave_nn_fw_base); 562 488 ··· 559 495 boot_params->watchdog_irq_mss); 560 496 ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n", 561 497 boot_params->watchdog_irq_nce); 562 - ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n", 563 - boot_params->host_to_vpu_irq); 564 - ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n", 565 - boot_params->job_done_irq); 566 498 567 499 ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n", 568 500 boot_params->host_version_id); ··· 606 546 boot_params->system_time_us); 607 547 ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n", 608 548 boot_params->power_profile); 549 + ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_uses_ecc_mca_signal = 0x%x\n", 550 + boot_params->vpu_uses_ecc_mca_signal); 609 551 } 610 552 611 553 void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params) ··· 634 572 return; 635 573 } 636 574 575 + memset(boot_params, 0, sizeof(*boot_params)); 637 576 vdev->pm->is_warmboot = false; 638 577 639 578 boot_params->magic = VPU_BOOT_PARAMS_MAGIC; ··· 710 647 boot_params->d0i3_entry_vpu_ts = 0; 711 648 if (IVPU_WA(disable_d0i2)) 712 649 boot_params->power_profile |= BIT(1); 650 + boot_params->vpu_uses_ecc_mca_signal = 651 + ivpu_hw_uses_ecc_mca_signal(vdev) ? VPU_BOOT_MCA_ECC_BOTH : 0; 713 652 714 653 boot_params->system_time_us = ktime_to_us(ktime_get_real()); 715 654 wmb(); /* Flush WC buffers after writing bootparams */
+13 -1
drivers/accel/ivpu/ivpu_fw.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * Copyright (C) 2020-2024 Intel Corporation 3 + * Copyright (C) 2020-2025 Intel Corporation 4 4 */ 5 5 6 6 #ifndef __IVPU_FW_H__ ··· 19 19 const struct firmware *file; 20 20 const char *name; 21 21 char version[FW_VERSION_STR_SIZE]; 22 + struct ivpu_bo *mem_bp; 23 + struct ivpu_bo *mem_fw_ver; 22 24 struct ivpu_bo *mem; 23 25 struct ivpu_bo *mem_shave_nn; 24 26 struct ivpu_bo *mem_log_crit; 25 27 struct ivpu_bo *mem_log_verb; 28 + u64 boot_params_addr; 29 + u64 boot_params_size; 30 + u64 fw_version_addr; 31 + u64 fw_version_size; 26 32 u64 runtime_addr; 27 33 u32 runtime_size; 28 34 u64 image_load_offset; ··· 48 42 u64 last_heartbeat; 49 43 }; 50 44 45 + bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range); 51 46 int ivpu_fw_init(struct ivpu_device *vdev); 52 47 void ivpu_fw_fini(struct ivpu_device *vdev); 53 48 void ivpu_fw_load(struct ivpu_device *vdev); ··· 57 50 static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev) 58 51 { 59 52 return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point; 53 + } 54 + 55 + static inline u32 ivpu_fw_preempt_buf_size(struct ivpu_device *vdev) 56 + { 57 + return vdev->fw->primary_preempt_buf_size + vdev->fw->secondary_preempt_buf_size; 60 58 } 61 59 62 60 #endif /* __IVPU_FW_H__ */
+86 -29
drivers/accel/ivpu/ivpu_gem.c
··· 15 15 #include <drm/drm_utils.h> 16 16 17 17 #include "ivpu_drv.h" 18 + #include "ivpu_fw.h" 18 19 #include "ivpu_gem.h" 19 20 #include "ivpu_hw.h" 20 21 #include "ivpu_mmu.h" ··· 28 27 static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action) 29 28 { 30 29 ivpu_dbg(vdev, BO, 31 - "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n", 32 - action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id, 30 + "%6s: bo %8p size %9zu ctx %d vpu_addr %9llx pages %d sgt %d mmu_mapped %d wc %d imported %d\n", 31 + action, bo, ivpu_bo_size(bo), bo->ctx_id, bo->vpu_addr, 33 32 (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc, 34 33 (bool)drm_gem_is_imported(&bo->base.base)); 35 34 } ··· 44 43 dma_resv_unlock(bo->base.base.resv); 45 44 } 46 45 46 + static struct sg_table *ivpu_bo_map_attachment(struct ivpu_device *vdev, struct ivpu_bo *bo) 47 + { 48 + struct sg_table *sgt = bo->base.sgt; 49 + 50 + drm_WARN_ON(&vdev->drm, !bo->base.base.import_attach); 51 + 52 + ivpu_bo_lock(bo); 53 + 54 + if (!sgt) { 55 + sgt = dma_buf_map_attachment(bo->base.base.import_attach, DMA_BIDIRECTIONAL); 56 + if (IS_ERR(sgt)) 57 + ivpu_err(vdev, "Failed to map BO in IOMMU: %ld\n", PTR_ERR(sgt)); 58 + else 59 + bo->base.sgt = sgt; 60 + } 61 + 62 + ivpu_bo_unlock(bo); 63 + 64 + return sgt; 65 + } 66 + 47 67 /* 48 - * ivpu_bo_pin() - pin the backing physical pages and map them to VPU. 68 + * ivpu_bo_bind() - pin the backing physical pages and map them to VPU. 49 69 * 50 70 * This function pins physical memory pages, then maps the physical pages 51 71 * to IOMMU address space and finally updates the VPU MMU page tables 52 72 * to allow the VPU to translate VPU address to IOMMU address. 53 73 */ 54 - int __must_check ivpu_bo_pin(struct ivpu_bo *bo) 74 + int __must_check ivpu_bo_bind(struct ivpu_bo *bo) 55 75 { 56 76 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 57 77 struct sg_table *sgt; 58 78 int ret = 0; 59 79 60 - ivpu_dbg_bo(vdev, bo, "pin"); 80 + ivpu_dbg_bo(vdev, bo, "bind"); 61 81 62 - sgt = drm_gem_shmem_get_pages_sgt(&bo->base); 82 + if (bo->base.base.import_attach) 83 + sgt = ivpu_bo_map_attachment(vdev, bo); 84 + else 85 + sgt = drm_gem_shmem_get_pages_sgt(&bo->base); 63 86 if (IS_ERR(sgt)) { 64 87 ret = PTR_ERR(sgt); 65 88 ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); ··· 124 99 ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node); 125 100 if (!ret) { 126 101 bo->ctx = ctx; 102 + bo->ctx_id = ctx->id; 127 103 bo->vpu_addr = bo->mm_node.start; 104 + ivpu_dbg_bo(vdev, bo, "vaddr"); 128 105 } else { 129 106 ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); 130 107 } ··· 142 115 { 143 116 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 144 117 145 - lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount)); 118 + dma_resv_assert_held(bo->base.base.resv); 146 119 147 120 if (bo->mmu_mapped) { 148 121 drm_WARN_ON(&vdev->drm, !bo->ctx); ··· 161 134 return; 162 135 163 136 if (bo->base.sgt) { 164 - dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); 165 - sg_free_table(bo->base.sgt); 166 - kfree(bo->base.sgt); 137 + if (bo->base.base.import_attach) { 138 + dma_buf_unmap_attachment(bo->base.base.import_attach, 139 + bo->base.sgt, DMA_BIDIRECTIONAL); 140 + } else { 141 + dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); 142 + sg_free_table(bo->base.sgt); 143 + kfree(bo->base.sgt); 144 + } 167 145 bo->base.sgt = NULL; 168 146 } 169 147 } ··· 214 182 struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, 215 183 struct dma_buf *dma_buf) 216 184 { 185 + struct ivpu_device *vdev = to_ivpu_device(dev); 217 186 struct device *attach_dev = dev->dev; 218 187 struct dma_buf_attachment *attach; 219 - struct sg_table *sgt; 220 188 struct drm_gem_object *obj; 189 + struct ivpu_bo *bo; 221 190 int ret; 222 191 223 192 attach = dma_buf_attach(dma_buf, attach_dev); ··· 227 194 228 195 get_dma_buf(dma_buf); 229 196 230 - sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); 231 - if (IS_ERR(sgt)) { 232 - ret = PTR_ERR(sgt); 233 - goto fail_detach; 234 - } 235 - 236 - obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); 197 + obj = drm_gem_shmem_prime_import_sg_table(dev, attach, NULL); 237 198 if (IS_ERR(obj)) { 238 199 ret = PTR_ERR(obj); 239 - goto fail_unmap; 200 + goto fail_detach; 240 201 } 241 202 242 203 obj->import_attach = attach; 243 204 obj->resv = dma_buf->resv; 244 205 206 + bo = to_ivpu_bo(obj); 207 + 208 + mutex_lock(&vdev->bo_list_lock); 209 + list_add_tail(&bo->bo_list_node, &vdev->bo_list); 210 + mutex_unlock(&vdev->bo_list_lock); 211 + 212 + ivpu_dbg(vdev, BO, "import: bo %8p size %9zu\n", bo, ivpu_bo_size(bo)); 213 + 245 214 return obj; 246 215 247 - fail_unmap: 248 - dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); 249 216 fail_detach: 250 217 dma_buf_detach(dma_buf, attach); 251 218 dma_buf_put(dma_buf); ··· 253 220 return ERR_PTR(ret); 254 221 } 255 222 256 - static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id) 223 + static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags) 257 224 { 258 225 struct drm_gem_shmem_object *shmem; 259 226 struct ivpu_bo *bo; ··· 271 238 return ERR_CAST(shmem); 272 239 273 240 bo = to_ivpu_bo(&shmem->base); 274 - bo->ctx_id = ctx_id; 275 241 bo->base.map_wc = flags & DRM_IVPU_BO_WC; 276 242 bo->flags = flags; 277 243 ··· 278 246 list_add_tail(&bo->bo_list_node, &vdev->bo_list); 279 247 mutex_unlock(&vdev->bo_list_lock); 280 248 281 - ivpu_dbg_bo(vdev, bo, "alloc"); 249 + ivpu_dbg(vdev, BO, " alloc: bo %8p size %9llu\n", bo, size); 282 250 283 251 return bo; 284 252 } ··· 313 281 314 282 ivpu_dbg_bo(vdev, bo, "free"); 315 283 284 + drm_WARN_ON(&vdev->drm, list_empty(&bo->bo_list_node)); 285 + 316 286 mutex_lock(&vdev->bo_list_lock); 317 287 list_del(&bo->bo_list_node); 318 288 mutex_unlock(&vdev->bo_list_lock); ··· 324 290 drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0); 325 291 drm_WARN_ON(&vdev->drm, bo->base.vaddr); 326 292 293 + ivpu_bo_lock(bo); 327 294 ivpu_bo_unbind_locked(bo); 295 + ivpu_bo_unlock(bo); 296 + 328 297 drm_WARN_ON(&vdev->drm, bo->mmu_mapped); 329 298 drm_WARN_ON(&vdev->drm, bo->ctx); 330 299 331 300 drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1); 301 + drm_WARN_ON(obj->dev, bo->base.base.vma_node.vm_files.rb_node); 332 302 drm_gem_shmem_free(&bo->base); 333 303 } 334 304 ··· 364 326 if (size == 0) 365 327 return -EINVAL; 366 328 367 - bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id); 329 + bo = ivpu_bo_alloc(vdev, size, args->flags); 368 330 if (IS_ERR(bo)) { 369 331 ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)", 370 332 bo, file_priv->ctx.id, args->size, args->flags); 371 333 return PTR_ERR(bo); 372 334 } 373 335 336 + drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 0); 337 + 374 338 ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); 375 - if (ret) 339 + if (ret) { 376 340 ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)", 377 341 bo, file_priv->ctx.id, args->size, args->flags); 378 - else 342 + } else { 379 343 args->vpu_addr = bo->vpu_addr; 344 + drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 1); 345 + } 380 346 381 347 drm_gem_object_put(&bo->base.base); 382 348 ··· 402 360 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end)); 403 361 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); 404 362 405 - bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID); 363 + bo = ivpu_bo_alloc(vdev, size, flags); 406 364 if (IS_ERR(bo)) { 407 365 ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", 408 366 bo, range->start, size, flags); ··· 413 371 if (ret) 414 372 goto err_put; 415 373 416 - ret = ivpu_bo_pin(bo); 374 + ret = ivpu_bo_bind(bo); 417 375 if (ret) 418 376 goto err_put; 419 377 ··· 431 389 err_put: 432 390 drm_gem_object_put(&bo->base.base); 433 391 return NULL; 392 + } 393 + 394 + struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags) 395 + { 396 + struct ivpu_addr_range range; 397 + 398 + if (!ivpu_is_within_range(addr, size, &vdev->hw->ranges.runtime)) { 399 + ivpu_err(vdev, "Invalid runtime BO address 0x%llx size %llu\n", addr, size); 400 + return NULL; 401 + } 402 + 403 + if (ivpu_hw_range_init(vdev, &range, addr, size)) 404 + return NULL; 405 + 406 + return ivpu_bo_create(vdev, &vdev->gctx, &range, size, flags); 434 407 } 435 408 436 409 struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags)
+8 -2
drivers/accel/ivpu/ivpu_gem.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * Copyright (C) 2020-2023 Intel Corporation 3 + * Copyright (C) 2020-2025 Intel Corporation 4 4 */ 5 5 #ifndef __IVPU_GEM_H__ 6 6 #define __IVPU_GEM_H__ ··· 24 24 bool mmu_mapped; 25 25 }; 26 26 27 - int ivpu_bo_pin(struct ivpu_bo *bo); 27 + int ivpu_bo_bind(struct ivpu_bo *bo); 28 28 void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); 29 29 30 30 struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size); 31 31 struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); 32 32 struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 33 33 struct ivpu_addr_range *range, u64 size, u32 flags); 34 + struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags); 34 35 struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags); 35 36 void ivpu_bo_free(struct ivpu_bo *bo); 36 37 ··· 95 94 return 0; 96 95 97 96 return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo)); 97 + } 98 + 99 + static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo) 100 + { 101 + return bo->flags & DRM_IVPU_BO_MAPPABLE; 98 102 } 99 103 100 104 #endif /* __IVPU_GEM_H__ */
+52 -7
drivers/accel/ivpu/ivpu_hw.c
··· 8 8 #include "ivpu_hw_btrs.h" 9 9 #include "ivpu_hw_ip.h" 10 10 11 + #include <asm/msr-index.h> 12 + #include <asm/msr.h> 11 13 #include <linux/dmi.h> 12 14 #include <linux/fault-inject.h> 13 15 #include <linux/pm_runtime.h> ··· 21 19 module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444); 22 20 MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>"); 23 21 #endif 22 + 23 + #define FW_SHARED_MEM_ALIGNMENT SZ_512K /* VPU MTRR limitation */ 24 + 25 + #define ECC_MCA_SIGNAL_ENABLE_MASK 0xff 24 26 25 27 static char *platform_to_str(u32 platform) 26 28 { ··· 153 147 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000; 154 148 } 155 149 150 + int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, u64 size) 151 + { 152 + u64 end; 153 + 154 + if (!range || check_add_overflow(start, size, &end)) { 155 + ivpu_err(vdev, "Invalid range: start 0x%llx size %llu\n", start, size); 156 + return -EINVAL; 157 + } 158 + 159 + range->start = start; 160 + range->end = end; 161 + 162 + return 0; 163 + } 164 + 156 165 static void memory_ranges_init(struct ivpu_device *vdev) 157 166 { 158 167 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { 159 - ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M); 160 - ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M); 161 - ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G); 162 - ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G); 168 + ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x84800000, SZ_64M); 169 + ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M); 170 + ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0xa0000000, 511 * SZ_1M); 171 + ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x180000000, SZ_2G); 172 + ivpu_hw_range_init(vdev, &vdev->hw->ranges.dma, 0x200000000, SZ_128G); 163 173 } else { 164 - ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M); 165 - ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G); 166 - ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G); 174 + ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x80000000, SZ_64M); 175 + ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M); 176 + ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x80000000, SZ_2G); 177 + ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0x100000000, SZ_256G); 167 178 vdev->hw->ranges.dma = vdev->hw->ranges.user; 168 179 } 180 + 181 + drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vdev->hw->ranges.global.start, 182 + FW_SHARED_MEM_ALIGNMENT)); 169 183 } 170 184 171 185 static int wp_enable(struct ivpu_device *vdev) ··· 398 372 399 373 pm_runtime_mark_last_busy(vdev->drm.dev); 400 374 return IRQ_HANDLED; 375 + } 376 + 377 + bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev) 378 + { 379 + unsigned long long msr_integrity_caps; 380 + int ret; 381 + 382 + if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX) 383 + return false; 384 + 385 + ret = rdmsrq_safe(MSR_INTEGRITY_CAPS, &msr_integrity_caps); 386 + if (ret) { 387 + ivpu_warn(vdev, "Error reading MSR_INTEGRITY_CAPS: %d", ret); 388 + return false; 389 + } 390 + 391 + ivpu_dbg(vdev, MISC, "MSR_INTEGRITY_CAPS: 0x%llx\n", msr_integrity_caps); 392 + 393 + return msr_integrity_caps & ECC_MCA_SIGNAL_ENABLE_MASK; 401 394 }
+4 -6
drivers/accel/ivpu/ivpu_hw.h
··· 21 21 bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq); 22 22 } irq; 23 23 struct { 24 + struct ivpu_addr_range runtime; 24 25 struct ivpu_addr_range global; 25 26 struct ivpu_addr_range user; 26 27 struct ivpu_addr_range shave; ··· 52 51 }; 53 52 54 53 int ivpu_hw_init(struct ivpu_device *vdev); 54 + int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, 55 + u64 size); 55 56 int ivpu_hw_power_up(struct ivpu_device *vdev); 56 57 int ivpu_hw_power_down(struct ivpu_device *vdev); 57 58 int ivpu_hw_reset(struct ivpu_device *vdev); ··· 63 60 void ivpu_hw_irq_enable(struct ivpu_device *vdev); 64 61 void ivpu_hw_irq_disable(struct ivpu_device *vdev); 65 62 irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr); 63 + bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev); 66 64 67 65 static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq) 68 66 { ··· 73 69 static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq) 74 70 { 75 71 return vdev->hw->irq.ip_irq_handler(vdev, irq); 76 - } 77 - 78 - static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size) 79 - { 80 - range->start = start; 81 - range->end = start + size; 82 72 } 83 73 84 74 static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
+1 -1
drivers/accel/ivpu/ivpu_hw_btrs.c
··· 752 752 } 753 753 } 754 754 755 - void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent) 755 + void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent) 756 756 { 757 757 u32 val = 0; 758 758 u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE;
+1 -1
drivers/accel/ivpu/ivpu_hw_btrs.h
··· 36 36 bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq); 37 37 bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq); 38 38 int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable); 39 - void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent); 39 + void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent); 40 40 u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev); 41 41 u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev); 42 42 u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
+73 -35
drivers/accel/ivpu/ivpu_job.c
··· 34 34 static int ivpu_preemption_buffers_create(struct ivpu_device *vdev, 35 35 struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) 36 36 { 37 - u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE); 38 - u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE); 39 - 40 - if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW || 41 - ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE) 37 + if (ivpu_fw_preempt_buf_size(vdev) == 0) 42 38 return 0; 43 39 44 40 cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user, 45 - primary_size, DRM_IVPU_BO_WC); 41 + vdev->fw->primary_preempt_buf_size, 42 + DRM_IVPU_BO_WC); 46 43 if (!cmdq->primary_preempt_buf) { 47 44 ivpu_err(vdev, "Failed to create primary preemption buffer\n"); 48 45 return -ENOMEM; 49 46 } 50 47 51 48 cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma, 52 - secondary_size, DRM_IVPU_BO_WC); 49 + vdev->fw->secondary_preempt_buf_size, 50 + DRM_IVPU_BO_WC); 53 51 if (!cmdq->secondary_preempt_buf) { 54 52 ivpu_err(vdev, "Failed to create secondary preemption buffer\n"); 55 53 goto err_free_primary; ··· 64 66 static void ivpu_preemption_buffers_free(struct ivpu_device *vdev, 65 67 struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) 66 68 { 67 - if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW) 68 - return; 69 - 70 69 if (cmdq->primary_preempt_buf) 71 70 ivpu_bo_free(cmdq->primary_preempt_buf); 72 71 if (cmdq->secondary_preempt_buf) 73 72 ivpu_bo_free(cmdq->secondary_preempt_buf); 74 73 } 75 74 75 + static int ivpu_preemption_job_init(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, 76 + struct ivpu_cmdq *cmdq, struct ivpu_job *job) 77 + { 78 + int ret; 79 + 80 + /* Use preemption buffer provided by the user space */ 81 + if (job->primary_preempt_buf) 82 + return 0; 83 + 84 + if (!cmdq->primary_preempt_buf) { 85 + /* Allocate per command queue preemption buffers */ 86 + ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); 87 + if (ret) 88 + return ret; 89 + } 90 + 91 + /* Use preemption buffers allocated by the kernel */ 92 + job->primary_preempt_buf = cmdq->primary_preempt_buf; 93 + job->secondary_preempt_buf = cmdq->secondary_preempt_buf; 94 + 95 + return 0; 96 + } 97 + 76 98 static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) 77 99 { 78 100 struct ivpu_device *vdev = file_priv->vdev; 79 101 struct ivpu_cmdq *cmdq; 80 - int ret; 81 102 82 103 cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL); 83 104 if (!cmdq) ··· 105 88 cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); 106 89 if (!cmdq->mem) 107 90 goto err_free_cmdq; 108 - 109 - ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); 110 - if (ret) 111 - ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n"); 112 91 113 92 return cmdq; 114 93 ··· 232 219 ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id, 233 220 cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem)); 234 221 235 - if (!ret) 222 + if (!ret) { 236 223 ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n", 237 224 cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority); 238 - else 225 + } else { 239 226 xa_erase(&vdev->db_xa, cmdq->db_id); 227 + cmdq->db_id = 0; 228 + } 240 229 241 230 return ret; 242 231 } ··· 442 427 if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION)) 443 428 entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK; 444 429 445 - if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { 446 - if (cmdq->primary_preempt_buf) { 447 - entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr; 448 - entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf); 449 - } 430 + if (job->primary_preempt_buf) { 431 + entry->primary_preempt_buf_addr = job->primary_preempt_buf->vpu_addr; 432 + entry->primary_preempt_buf_size = ivpu_bo_size(job->primary_preempt_buf); 433 + } 450 434 451 - if (cmdq->secondary_preempt_buf) { 452 - entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr; 453 - entry->secondary_preempt_buf_size = 454 - ivpu_bo_size(cmdq->secondary_preempt_buf); 455 - } 435 + if (job->secondary_preempt_buf) { 436 + entry->secondary_preempt_buf_addr = job->secondary_preempt_buf->vpu_addr; 437 + entry->secondary_preempt_buf_size = ivpu_bo_size(job->secondary_preempt_buf); 456 438 } 457 439 458 440 wmb(); /* Ensure that tail is updated after filling entry */ ··· 673 661 goto err_unlock; 674 662 } 675 663 664 + ret = ivpu_preemption_job_init(vdev, file_priv, cmdq, job); 665 + if (ret) { 666 + ivpu_err(vdev, "Failed to initialize preemption buffers for job %d: %d\n", 667 + job->job_id, ret); 668 + goto err_unlock; 669 + } 670 + 676 671 job->cmdq_id = cmdq->id; 677 672 678 673 is_first_job = xa_empty(&vdev->submitted_jobs_xa); ··· 733 714 734 715 static int 735 716 ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles, 736 - u32 buf_count, u32 commands_offset) 717 + u32 buf_count, u32 commands_offset, u32 preempt_buffer_index) 737 718 { 738 719 struct ivpu_file_priv *file_priv = job->file_priv; 739 720 struct ivpu_device *vdev = file_priv->vdev; ··· 751 732 752 733 job->bos[i] = to_ivpu_bo(obj); 753 734 754 - ret = ivpu_bo_pin(job->bos[i]); 735 + ret = ivpu_bo_bind(job->bos[i]); 755 736 if (ret) 756 737 return ret; 757 738 } ··· 768 749 } 769 750 770 751 job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset; 752 + 753 + if (preempt_buffer_index) { 754 + struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index]; 755 + 756 + if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) { 757 + ivpu_warn(vdev, "Preemption buffer is too small\n"); 758 + return -EINVAL; 759 + } 760 + if (ivpu_bo_is_mappable(preempt_bo)) { 761 + ivpu_warn(vdev, "Preemption buffer cannot be mappable\n"); 762 + return -EINVAL; 763 + } 764 + job->primary_preempt_buf = preempt_bo; 765 + } 771 766 772 767 ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count, 773 768 &acquire_ctx); ··· 813 780 814 781 static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id, 815 782 u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset, 816 - u8 priority) 783 + u32 preempt_buffer_index, u8 priority) 817 784 { 818 785 struct ivpu_device *vdev = file_priv->vdev; 819 786 struct ivpu_job *job; ··· 845 812 goto err_exit_dev; 846 813 } 847 814 848 - ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset); 815 + ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset, 816 + preempt_buffer_index); 849 817 if (ret) { 850 818 ivpu_err(vdev, "Failed to prepare job: %d\n", ret); 851 819 goto err_destroy_job; ··· 900 866 priority = ivpu_job_to_jsm_priority(args->priority); 901 867 902 868 return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine, 903 - (void __user *)args->buffers_ptr, args->commands_offset, priority); 869 + (void __user *)args->buffers_ptr, args->commands_offset, 0, priority); 904 870 } 905 871 906 872 int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ··· 917 883 if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) 918 884 return -EINVAL; 919 885 886 + if (args->preempt_buffer_index >= args->buffer_count) 887 + return -EINVAL; 888 + 920 889 if (!IS_ALIGNED(args->commands_offset, 8)) 921 890 return -EINVAL; 922 891 ··· 930 893 return -EBADFD; 931 894 932 895 return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE, 933 - (void __user *)args->buffers_ptr, args->commands_offset, 0); 896 + (void __user *)args->buffers_ptr, args->commands_offset, 897 + args->preempt_buffer_index, 0); 934 898 } 935 899 936 900 int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ··· 1050 1012 1051 1013 if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) 1052 1014 if (ivpu_jsm_reset_engine(vdev, 0)) 1053 - return; 1015 + goto runtime_put; 1054 1016 1055 1017 mutex_lock(&vdev->context_list_lock); 1056 1018 xa_for_each(&vdev->context_xa, ctx_id, file_priv) { ··· 1074 1036 goto runtime_put; 1075 1037 1076 1038 if (ivpu_jsm_hws_resume_engine(vdev, 0)) 1077 - return; 1039 + goto runtime_put; 1078 1040 /* 1079 1041 * In hardware scheduling mode NPU already has stopped processing jobs 1080 1042 * and won't send us any further notifications, thus we have to free job related resources
+29 -17
drivers/accel/ivpu/ivpu_job.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * Copyright (C) 2020-2024 Intel Corporation 3 + * Copyright (C) 2020-2025 Intel Corporation 4 4 */ 5 5 6 6 #ifndef __IVPU_JOB_H__ ··· 15 15 struct ivpu_file_priv; 16 16 17 17 /** 18 - * struct ivpu_cmdq - Object representing device queue used to send jobs. 19 - * @jobq: Pointer to job queue memory shared with the device 20 - * @mem: Memory allocated for the job queue, shared with device 21 - * @entry_count Number of job entries in the queue 22 - * @db_id: Doorbell assigned to this job queue 23 - * @db_registered: True if doorbell is registered in device 18 + * struct ivpu_cmdq - Represents a command queue for submitting jobs to the VPU. 19 + * Tracks queue memory, preemption buffers, and metadata for job management. 20 + * @jobq: Pointer to job queue memory shared with the device 21 + * @primary_preempt_buf: Primary preemption buffer for this queue (optional) 22 + * @secondary_preempt_buf: Secondary preemption buffer for this queue (optional) 23 + * @mem: Memory allocated for the job queue, shared with device 24 + * @entry_count: Number of job entries in the queue 25 + * @id: Unique command queue ID 26 + * @db_id: Doorbell ID assigned to this job queue 27 + * @priority: Priority level of the command queue 28 + * @is_legacy: True if this is a legacy command queue 24 29 */ 25 30 struct ivpu_cmdq { 26 31 struct vpu_job_queue *jobq; ··· 40 35 }; 41 36 42 37 /** 43 - * struct ivpu_job - KMD object that represents batchbuffer / DMA buffer. 44 - * Each batch / DMA buffer is a job to be submitted and executed by the VPU FW. 45 - * This is a unit of execution, and be tracked by the job_id for 46 - * any status reporting from VPU FW through IPC JOB RET/DONE message. 47 - * @file_priv: The client that submitted this job 48 - * @job_id: Job ID for KMD tracking and job status reporting from VPU FW 49 - * @status: Status of the Job from IPC JOB RET/DONE message 50 - * @batch_buffer: CPU vaddr points to the batch buffer memory allocated for the job 51 - * @submit_status_offset: Offset within batch buffer where job completion handler 52 - will update the job status 38 + * struct ivpu_job - Representing a batch or DMA buffer submitted to the VPU. 39 + * Each job is a unit of execution, tracked by job_id for status reporting from VPU FW. 40 + * The structure holds all resources and metadata needed for job submission, execution, 41 + * and completion handling. 42 + * @vdev: Pointer to the VPU device 43 + * @file_priv: The client context that submitted this job 44 + * @done_fence: Fence signaled when job completes 45 + * @cmd_buf_vpu_addr: VPU address of the command buffer for this job 46 + * @cmdq_id: Command queue ID used for submission 47 + * @job_id: Unique job ID for tracking and status reporting 48 + * @engine_idx: Engine index for job execution 49 + * @primary_preempt_buf: Primary preemption buffer for job 50 + * @secondary_preempt_buf: Secondary preemption buffer for job (optional) 51 + * @bo_count: Number of buffer objects associated with this job 52 + * @bos: Array of buffer objects used by the job (batch buffer is at index 0) 53 53 */ 54 54 struct ivpu_job { 55 55 struct ivpu_device *vdev; ··· 64 54 u32 cmdq_id; 65 55 u32 job_id; 66 56 u32 engine_idx; 57 + struct ivpu_bo *primary_preempt_buf; 58 + struct ivpu_bo *secondary_preempt_buf; 67 59 size_t bo_count; 68 60 struct ivpu_bo *bos[] __counted_by(bo_count); 69 61 };
+1 -1
drivers/accel/ivpu/ivpu_mmu_context.c
··· 568 568 mutex_init(&ctx->lock); 569 569 570 570 if (!context_id) { 571 - start = vdev->hw->ranges.global.start; 571 + start = vdev->hw->ranges.runtime.start; 572 572 end = vdev->hw->ranges.shave.end; 573 573 } else { 574 574 start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);
+8 -3
drivers/accel/ivpu/ivpu_pm.c
··· 54 54 static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev) 55 55 { 56 56 struct ivpu_fw_info *fw = vdev->fw; 57 - struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem); 57 + struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem_bp); 58 58 59 59 if (!bp->save_restore_ret_address) { 60 60 ivpu_pm_prepare_cold_boot(vdev); ··· 502 502 else 503 503 ret = ivpu_pm_dct_disable(vdev); 504 504 505 - if (!ret) 506 - ivpu_hw_btrs_dct_set_status(vdev, enable, vdev->pm->dct_active_percent); 505 + if (!ret) { 506 + /* Convert percent to U1.7 format */ 507 + u8 val = DIV_ROUND_CLOSEST(vdev->pm->dct_active_percent * 128, 100); 508 + 509 + ivpu_hw_btrs_dct_set_status(vdev, enable, val); 510 + } 511 + 507 512 }
+326 -187
drivers/accel/ivpu/vpu_jsm_api.h
··· 1 1 /* SPDX-License-Identifier: MIT */ 2 2 /* 3 - * Copyright (c) 2020-2024, Intel Corporation. 3 + * Copyright (c) 2020-2025, Intel Corporation. 4 + */ 5 + 6 + /** 7 + * @addtogroup Jsm 8 + * @{ 4 9 */ 5 10 6 11 /** 7 12 * @file 8 13 * @brief JSM shared definitions 9 - * 10 - * @ingroup Jsm 11 - * @brief JSM shared definitions 12 - * @{ 13 14 */ 14 15 #ifndef VPU_JSM_API_H 15 16 #define VPU_JSM_API_H ··· 23 22 /* 24 23 * Minor version changes when API backward compatibility is preserved. 25 24 */ 26 - #define VPU_JSM_API_VER_MINOR 29 25 + #define VPU_JSM_API_VER_MINOR 32 27 26 28 27 /* 29 28 * API header changed (field names, documentation, formatting) but API itself has not been changed 30 29 */ 31 - #define VPU_JSM_API_VER_PATCH 0 30 + #define VPU_JSM_API_VER_PATCH 5 32 31 33 32 /* 34 33 * Index in the API version table ··· 72 71 #define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU 73 72 #define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU 74 73 #define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU 75 - /* Job status returned when the job was preempted mid-inference */ 74 + /* @deprecated (use VPU_JSM_STATUS_PREEMPTED_MID_COMMAND instead) */ 76 75 #define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU 76 + /* Job status returned when the job was preempted mid-command */ 77 + #define VPU_JSM_STATUS_PREEMPTED_MID_COMMAND 0xDU 77 78 #define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU 79 + #define VPU_JSM_STATUS_MVNCI_PREEMPTION_TIMED_OUT 0xFU 78 80 79 81 /* 80 82 * Host <-> VPU IPC channels. ··· 138 134 * 2. Native fence queues are only supported on VPU 40xx onwards. 139 135 */ 140 136 VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U), 141 - 142 137 /* 143 138 * Enable turbo mode for testing NPU performance; not recommended for regular usage. 144 139 */ 145 - VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U) 140 + VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U), 141 + /* 142 + * Queue error detection mode flag 143 + * For 'interactive' queues (this bit not set), the FW will identify queues that have not 144 + * completed a job inside the TDR timeout as in error as part of engine reset sequence. 145 + * For 'non-interactive' queues (this bit set), the FW will identify queues that have not 146 + * progressed the heartbeat inside the non-interactive no-progress timeout as in error as 147 + * part of engine reset sequence. Additionally, there is an upper limit applied to these 148 + * queues: even if they progress the heartbeat, if they run longer than non-interactive 149 + * timeout, then the FW will also identify them as in error. 150 + */ 151 + VPU_JOB_QUEUE_FLAGS_NON_INTERACTIVE = (1 << 3U) 146 152 }; 147 153 148 154 /* ··· 223 209 */ 224 210 #define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2 225 211 226 - /* 212 + /** 227 213 * Job scheduling priority bands for both hardware scheduling and OS scheduling. 228 214 */ 229 215 enum vpu_job_scheduling_priority_band { ··· 234 220 VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4, 235 221 }; 236 222 237 - /* 223 + /** 238 224 * Job format. 239 225 * Jobs defines the actual workloads to be executed by a given engine. 240 226 */ 241 227 struct vpu_job_queue_entry { 242 - /**< Address of VPU commands batch buffer */ 228 + /** Address of VPU commands batch buffer */ 243 229 u64 batch_buf_addr; 244 - /**< Job ID */ 230 + /** Job ID */ 245 231 u32 job_id; 246 - /**< Flags bit field, see VPU_JOB_FLAGS_* above */ 232 + /** Flags bit field, see VPU_JOB_FLAGS_* above */ 247 233 u32 flags; 248 234 /** 249 235 * Doorbell ring timestamp taken by KMD from SoC's global system clock, in ··· 251 237 * to match other profiling timestamps. 252 238 */ 253 239 u64 doorbell_timestamp; 254 - /**< Extra id for job tracking, used only in the firmware perf traces */ 240 + /** Extra id for job tracking, used only in the firmware perf traces */ 255 241 u64 host_tracking_id; 256 - /**< Address of the primary preemption buffer to use for this job */ 242 + /** Address of the primary preemption buffer to use for this job */ 257 243 u64 primary_preempt_buf_addr; 258 - /**< Size of the primary preemption buffer to use for this job */ 244 + /** Size of the primary preemption buffer to use for this job */ 259 245 u32 primary_preempt_buf_size; 260 - /**< Size of secondary preemption buffer to use for this job */ 246 + /** Size of secondary preemption buffer to use for this job */ 261 247 u32 secondary_preempt_buf_size; 262 - /**< Address of secondary preemption buffer to use for this job */ 248 + /** Address of secondary preemption buffer to use for this job */ 263 249 u64 secondary_preempt_buf_addr; 264 250 u64 reserved_0; 265 251 }; 266 252 267 - /* 253 + /** 268 254 * Inline command format. 269 255 * Inline commands are the commands executed at scheduler level (typically, 270 256 * synchronization directives). Inline command and job objects must be of ··· 272 258 */ 273 259 struct vpu_inline_cmd { 274 260 u64 reserved_0; 275 - /* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */ 261 + /** Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */ 276 262 u32 type; 277 - /* Flags bit field, see VPU_JOB_FLAGS_* above. */ 263 + /** Flags bit field, see VPU_JOB_FLAGS_* above. */ 278 264 u32 flags; 279 - /* Inline command payload. Depends on inline command type. */ 280 - union { 281 - /* Fence (wait and signal) commands' payload. */ 282 - struct { 283 - /* Fence object handle. */ 265 + /** Inline command payload. Depends on inline command type. */ 266 + union payload { 267 + /** Fence (wait and signal) commands' payload. */ 268 + struct fence { 269 + /** Fence object handle. */ 284 270 u64 fence_handle; 285 - /* User VA of the current fence value. */ 271 + /** User VA of the current fence value. */ 286 272 u64 current_value_va; 287 - /* User VA of the monitored fence value (read-only). */ 273 + /** User VA of the monitored fence value (read-only). */ 288 274 u64 monitored_value_va; 289 - /* Value to wait for or write in fence location. */ 275 + /** Value to wait for or write in fence location. */ 290 276 u64 value; 291 - /* User VA of the log buffer in which to add log entry on completion. */ 277 + /** User VA of the log buffer in which to add log entry on completion. */ 292 278 u64 log_buffer_va; 293 - /* NPU private data. */ 279 + /** NPU private data. */ 294 280 u64 npu_private_data; 295 281 } fence; 296 - /* Other commands do not have a payload. */ 297 - /* Payload definition for future inline commands can be inserted here. */ 282 + /** 283 + * Other commands do not have a payload: 284 + * Payload definition for future inline commands can be inserted here. 285 + */ 298 286 u64 reserved_1[6]; 299 287 } payload; 300 288 }; 301 289 302 - /* 290 + /** 303 291 * Job queue slots can be populated either with job objects or inline command objects. 304 292 */ 305 293 union vpu_jobq_slot { ··· 309 293 struct vpu_inline_cmd inline_cmd; 310 294 }; 311 295 312 - /* 296 + /** 313 297 * Job queue control registers. 314 298 */ 315 299 struct vpu_job_queue_header { ··· 317 301 u32 head; 318 302 u32 tail; 319 303 u32 flags; 320 - /* Set to 1 to indicate priority_band field is valid */ 304 + /** Set to 1 to indicate priority_band field is valid */ 321 305 u32 priority_band_valid; 322 - /* 306 + /** 323 307 * Priority for the work of this job queue, valid only if the HWS is NOT used 324 - * and the `priority_band_valid` is set to 1. It is applied only during 325 - * the VPU_JSM_MSG_REGISTER_DB message processing. 326 - * The device firmware might use the `priority_band` to optimize the power 308 + * and the @ref priority_band_valid is set to 1. It is applied only during 309 + * the @ref VPU_JSM_MSG_REGISTER_DB message processing. 310 + * The device firmware might use the priority_band to optimize the power 327 311 * management logic, but it will not affect the order of jobs. 328 312 * Available priority bands: @see enum vpu_job_scheduling_priority_band 329 313 */ 330 314 u32 priority_band; 331 - /* Inside realtime band assigns a further priority, limited to 0..31 range */ 315 + /** Inside realtime band assigns a further priority, limited to 0..31 range */ 332 316 u32 realtime_priority_level; 333 317 u32 reserved_0[9]; 334 318 }; ··· 353 337 VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2, 354 338 }; 355 339 356 - /* 340 + /** 357 341 * HWS specific log buffer header details. 358 342 * Total size is 32 bytes. 359 343 */ 360 344 struct vpu_hws_log_buffer_header { 361 - /* Written by VPU after adding a log entry. Initialised by host to 0. */ 345 + /** Written by VPU after adding a log entry. Initialised by host to 0. */ 362 346 u32 first_free_entry_index; 363 - /* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */ 347 + /** Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */ 364 348 u32 wraparound_count; 365 - /* 349 + /** 366 350 * This is the number of buffers that can be stored in the log buffer provided by the host. 367 351 * It is written by host before passing buffer to VPU. VPU should consider it read-only. 368 352 */ ··· 370 354 u64 reserved[2]; 371 355 }; 372 356 373 - /* 357 + /** 374 358 * HWS specific log buffer entry details. 375 359 * Total size is 32 bytes. 376 360 */ 377 361 struct vpu_hws_log_buffer_entry { 378 - /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */ 362 + /** VPU timestamp must be an invariant timer tick (not impacted by DVFS) */ 379 363 u64 vpu_timestamp; 380 - /* 364 + /** 381 365 * Operation type: 382 366 * 0 - context state change 383 367 * 1 - queue new work ··· 387 371 */ 388 372 u32 operation_type; 389 373 u32 reserved; 390 - /* Operation data depends on operation type */ 374 + /** Operation data depends on operation type */ 391 375 u64 operation_data[2]; 392 376 }; 393 377 ··· 397 381 VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2 398 382 }; 399 383 400 - /* HWS native fence log buffer header. */ 384 + /** HWS native fence log buffer header. */ 401 385 struct vpu_hws_native_fence_log_header { 402 386 union { 403 387 struct { 404 - /* Index of the first free entry in buffer. */ 388 + /** Index of the first free entry in buffer. */ 405 389 u32 first_free_entry_idx; 406 - /* Incremented each time NPU wraps around the buffer to write next entry. */ 390 + /** 391 + * Incremented each time NPU wraps around 392 + * the buffer to write next entry. 393 + */ 407 394 u32 wraparound_count; 408 395 }; 409 - /* Field allowing atomic update of both fields above. */ 396 + /** Field allowing atomic update of both fields above. */ 410 397 u64 atomic_wraparound_and_entry_idx; 411 398 }; 412 - /* Log buffer type, see enum vpu_hws_native_fence_log_type. */ 399 + /** Log buffer type, see enum vpu_hws_native_fence_log_type. */ 413 400 u64 type; 414 - /* Allocated number of entries in the log buffer. */ 401 + /** Allocated number of entries in the log buffer. */ 415 402 u64 entry_nb; 416 403 u64 reserved[2]; 417 404 }; 418 405 419 - /* Native fence log operation types. */ 406 + /** Native fence log operation types. */ 420 407 enum vpu_hws_native_fence_log_op { 421 408 VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0, 422 409 VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1 423 410 }; 424 411 425 - /* HWS native fence log entry. */ 412 + /** HWS native fence log entry. */ 426 413 struct vpu_hws_native_fence_log_entry { 427 - /* Newly signaled/unblocked fence value. */ 414 + /** Newly signaled/unblocked fence value. */ 428 415 u64 fence_value; 429 - /* Native fence object handle to which this operation belongs. */ 416 + /** Native fence object handle to which this operation belongs. */ 430 417 u64 fence_handle; 431 - /* Operation type, see enum vpu_hws_native_fence_log_op. */ 418 + /** Operation type, see enum vpu_hws_native_fence_log_op. */ 432 419 u64 op_type; 433 420 u64 reserved_0; 434 - /* 421 + /** 435 422 * VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence 436 423 * wait was started (in NPU SysTime). 437 424 */ 438 425 u64 fence_wait_start_ts; 439 426 u64 reserved_1; 440 - /* Timestamp at which fence operation was completed (in NPU SysTime). */ 427 + /** Timestamp at which fence operation was completed (in NPU SysTime). */ 441 428 u64 fence_end_ts; 442 429 }; 443 430 444 - /* Native fence log buffer. */ 431 + /** Native fence log buffer. */ 445 432 struct vpu_hws_native_fence_log_buffer { 446 433 struct vpu_hws_native_fence_log_header header; 447 434 struct vpu_hws_native_fence_log_entry entry[]; ··· 469 450 * after preemption or when resubmitting jobs to the queue. 470 451 */ 471 452 VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101, 453 + /** 454 + * OS scheduling doorbell register command 455 + * @see vpu_ipc_msg_payload_register_db 456 + */ 472 457 VPU_JSM_MSG_REGISTER_DB = 0x1102, 458 + /** 459 + * OS scheduling doorbell unregister command 460 + * @see vpu_ipc_msg_payload_unregister_db 461 + */ 473 462 VPU_JSM_MSG_UNREGISTER_DB = 0x1103, 463 + /** 464 + * Query engine heartbeat. Heartbeat is expected to increase monotonically 465 + * and increase while work is being progressed by NPU. 466 + * @see vpu_ipc_msg_payload_query_engine_hb 467 + */ 474 468 VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104, 475 469 VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105, 476 470 VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106, ··· 509 477 * aborted and removed from internal scheduling queues. All doorbells assigned 510 478 * to the host_ssid are unregistered and any internal FW resources belonging to 511 479 * the host_ssid are released. 480 + * @see vpu_ipc_msg_payload_ssid_release 512 481 */ 513 482 VPU_JSM_MSG_SSID_RELEASE = 0x110e, 514 483 /** ··· 537 504 * @see vpu_jsm_metric_streamer_start 538 505 */ 539 506 VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112, 540 - /** Control command: Priority band setup */ 507 + /** 508 + * Control command: Priority band setup 509 + * @see vpu_ipc_msg_payload_hws_priority_band_setup 510 + */ 541 511 VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113, 542 - /** Control command: Create command queue */ 512 + /** 513 + * Control command: Create command queue 514 + * @see vpu_ipc_msg_payload_hws_create_cmdq 515 + */ 543 516 VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114, 544 - /** Control command: Destroy command queue */ 517 + /** 518 + * Control command: Destroy command queue 519 + * @see vpu_ipc_msg_payload_hws_destroy_cmdq 520 + */ 545 521 VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115, 546 - /** Control command: Set context scheduling properties */ 522 + /** 523 + * Control command: Set context scheduling properties 524 + * @see vpu_ipc_msg_payload_hws_set_context_sched_properties 525 + */ 547 526 VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116, 548 - /* 527 + /** 549 528 * Register a doorbell to notify VPU of new work. The doorbell may later be 550 529 * deallocated or reassigned to another context. 530 + * @see vpu_jsm_hws_register_db 551 531 */ 552 532 VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117, 553 - /** Control command: Log buffer setting */ 533 + /** 534 + * Control command: Log buffer setting 535 + * @see vpu_ipc_msg_payload_hws_set_scheduling_log 536 + */ 554 537 VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118, 555 - /* Control command: Suspend command queue. */ 538 + /** 539 + * Control command: Suspend command queue. 540 + * @see vpu_ipc_msg_payload_hws_suspend_cmdq 541 + */ 556 542 VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119, 557 - /* Control command: Resume command queue */ 543 + /** 544 + * Control command: Resume command queue 545 + * @see vpu_ipc_msg_payload_hws_resume_cmdq 546 + */ 558 547 VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a, 559 - /* Control command: Resume engine after reset */ 548 + /** 549 + * Control command: Resume engine after reset 550 + * @see vpu_ipc_msg_payload_hws_resume_engine 551 + */ 560 552 VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b, 561 553 /* Control command: Enable survivability/DCT mode */ 562 554 VPU_JSM_MSG_DCT_ENABLE = 0x111c, ··· 598 540 VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD, 599 541 /** 600 542 * Control dyndbg behavior by executing a dyndbg command; equivalent to 601 - * Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`. 543 + * Linux command: 544 + * @verbatim echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control @endverbatim 602 545 */ 603 546 VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201, 604 547 /** ··· 609 550 610 551 /* IPC Device -> Host, Job completion */ 611 552 VPU_JSM_MSG_JOB_DONE = 0x2100, 612 - /* IPC Device -> Host, Fence signalled */ 553 + /** 554 + * IPC Device -> Host, Fence signalled 555 + * @see vpu_ipc_msg_payload_native_fence_signalled 556 + */ 613 557 VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101, 614 558 615 559 /* IPC Device -> Host, Async command completion */ 616 560 VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200, 561 + /** 562 + * IPC Device -> Host, engine reset complete 563 + * @see vpu_ipc_msg_payload_engine_reset_done 564 + */ 617 565 VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE, 618 566 VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201, 619 567 VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202, 620 568 VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203, 569 + /** 570 + * Response to query engine heartbeat. 571 + * @see vpu_ipc_msg_payload_query_engine_hb_done 572 + */ 621 573 VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204, 622 574 VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205, 623 575 VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206, ··· 645 575 VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c, 646 576 /** Response to VPU_JSM_MSG_TRACE_GET_NAME. */ 647 577 VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d, 648 - /** Response to VPU_JSM_MSG_SSID_RELEASE. */ 578 + /** 579 + * Response to VPU_JSM_MSG_SSID_RELEASE. 580 + * @see vpu_ipc_msg_payload_ssid_release 581 + */ 649 582 VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e, 650 583 /** 651 584 * Response to VPU_JSM_MSG_METRIC_STREAMER_START. ··· 678 605 /** 679 606 * Asynchronous event sent from the VPU to the host either when the current 680 607 * metric buffer is full or when the VPU has collected a multiple of 681 - * @notify_sample_count samples as indicated through the start command 682 - * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected 683 - * metric data. 608 + * @ref vpu_jsm_metric_streamer_start::notify_sample_count samples as indicated 609 + * through the start command (VPU_JSM_MSG_METRIC_STREAMER_START). Returns 610 + * information about collected metric data. 684 611 * @see vpu_jsm_metric_streamer_done 685 612 */ 686 613 VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213, 687 - /** Response to control command: Priority band setup */ 614 + /** 615 + * Response to control command: Priority band setup 616 + * @see vpu_ipc_msg_payload_hws_priority_band_setup 617 + */ 688 618 VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214, 689 - /** Response to control command: Create command queue */ 619 + /** 620 + * Response to control command: Create command queue 621 + * @see vpu_ipc_msg_payload_hws_create_cmdq_rsp 622 + */ 690 623 VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215, 691 - /** Response to control command: Destroy command queue */ 624 + /** 625 + * Response to control command: Destroy command queue 626 + * @see vpu_ipc_msg_payload_hws_destroy_cmdq 627 + */ 692 628 VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216, 693 - /** Response to control command: Set context scheduling properties */ 629 + /** 630 + * Response to control command: Set context scheduling properties 631 + * @see vpu_ipc_msg_payload_hws_set_context_sched_properties 632 + */ 694 633 VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217, 695 - /** Response to control command: Log buffer setting */ 634 + /** 635 + * Response to control command: Log buffer setting 636 + * @see vpu_ipc_msg_payload_hws_set_scheduling_log 637 + */ 696 638 VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218, 697 - /* IPC Device -> Host, HWS notify index entry of log buffer written */ 639 + /** 640 + * IPC Device -> Host, HWS notify index entry of log buffer written 641 + * @see vpu_ipc_msg_payload_hws_scheduling_log_notification 642 + */ 698 643 VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219, 699 - /* IPC Device -> Host, HWS completion of a context suspend request */ 644 + /** 645 + * IPC Device -> Host, HWS completion of a context suspend request 646 + * @see vpu_ipc_msg_payload_hws_suspend_cmdq 647 + */ 700 648 VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a, 701 - /* Response to control command: Resume command queue */ 649 + /** 650 + * Response to control command: Resume command queue 651 + * @see vpu_ipc_msg_payload_hws_resume_cmdq 652 + */ 702 653 VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b, 703 - /* Response to control command: Resume engine command response */ 654 + /** 655 + * Response to control command: Resume engine command response 656 + * @see vpu_ipc_msg_payload_hws_resume_engine 657 + */ 704 658 VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c, 705 659 /* Response to control command: Enable survivability/DCT mode */ 706 660 VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d, ··· 770 670 u32 preempt_id; 771 671 }; 772 672 773 - /* 774 - * @brief Register doorbell command structure. 673 + /** 674 + * Register doorbell command structure. 775 675 * This structure supports doorbell registration for only OS scheduling. 776 676 * @see VPU_JSM_MSG_REGISTER_DB 777 677 */ 778 678 struct vpu_ipc_msg_payload_register_db { 779 - /* Index of the doorbell to register. */ 679 + /** Index of the doorbell to register. */ 780 680 u32 db_idx; 781 - /* Reserved */ 681 + /** Reserved */ 782 682 u32 reserved_0; 783 - /* Virtual address in Global GTT pointing to the start of job queue. */ 683 + /** Virtual address in Global GTT pointing to the start of job queue. */ 784 684 u64 jobq_base; 785 - /* Size of the job queue in bytes. */ 685 + /** Size of the job queue in bytes. */ 786 686 u32 jobq_size; 787 - /* Host sub-stream ID for the context assigned to the doorbell. */ 687 + /** Host sub-stream ID for the context assigned to the doorbell. */ 788 688 u32 host_ssid; 789 689 }; 790 690 791 691 /** 792 - * @brief Unregister doorbell command structure. 692 + * Unregister doorbell command structure. 793 693 * Request structure to unregister a doorbell for both HW and OS scheduling. 794 694 * @see VPU_JSM_MSG_UNREGISTER_DB 795 695 */ 796 696 struct vpu_ipc_msg_payload_unregister_db { 797 - /* Index of the doorbell to unregister. */ 697 + /** Index of the doorbell to unregister. */ 798 698 u32 db_idx; 799 - /* Reserved */ 699 + /** Reserved */ 800 700 u32 reserved_0; 801 701 }; 802 702 703 + /** 704 + * Heartbeat request structure 705 + * @see VPU_JSM_MSG_QUERY_ENGINE_HB 706 + */ 803 707 struct vpu_ipc_msg_payload_query_engine_hb { 804 - /* Engine to return heartbeat value. */ 708 + /** Engine to return heartbeat value. */ 805 709 u32 engine_idx; 806 - /* Reserved */ 710 + /** Reserved */ 807 711 u32 reserved_0; 808 712 }; 809 713 ··· 827 723 u32 reserved_0; 828 724 }; 829 725 726 + /** 727 + * Structure for requesting ssid release 728 + * @see VPU_JSM_MSG_SSID_RELEASE 729 + */ 830 730 struct vpu_ipc_msg_payload_ssid_release { 831 - /* Host sub-stream ID for the context to be released. */ 731 + /** Host sub-stream ID for the context to be released. */ 832 732 u32 host_ssid; 833 - /* Reserved */ 733 + /** Reserved */ 834 734 u32 reserved_0; 835 735 }; 836 736 ··· 860 752 u64 sampling_rate; 861 753 /** 862 754 * If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message 863 - * after every @notify_sample_count samples is collected or dropped by the VPU. 755 + * after every @ref notify_sample_count samples is collected or dropped by the VPU. 864 756 * If set to UINT_MAX the VPU will only generate a notification when the metric 865 757 * buffer is full. If set to 0 the VPU will never generate a notification. 866 758 */ ··· 870 762 * Address and size of the buffer where the VPU will write metric data. The 871 763 * VPU writes all counters from enabled metric groups one after another. If 872 764 * there is no space left to write data at the next sample period the VPU 873 - * will switch to the next buffer (@see next_buffer_addr) and will optionally 874 - * send a notification to the host driver if @notify_sample_count is non-zero. 875 - * If @next_buffer_addr is NULL the VPU will stop collecting metric data. 765 + * will switch to the next buffer (@ref next_buffer_addr) and will optionally 766 + * send a notification to the host driver if @ref notify_sample_count is non-zero. 767 + * If @ref next_buffer_addr is NULL the VPU will stop collecting metric data. 876 768 */ 877 769 u64 buffer_addr; 878 770 u64 buffer_size; ··· 952 844 u64 cmdq_id; 953 845 }; 954 846 955 - /* 847 + /** 956 848 * Notification message upon native fence signalling. 957 849 * @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED 958 850 */ 959 851 struct vpu_ipc_msg_payload_native_fence_signalled { 960 - /* Engine ID. */ 852 + /** Engine ID. */ 961 853 u32 engine_idx; 962 - /* Host SSID. */ 854 + /** Host SSID. */ 963 855 u32 host_ssid; 964 - /* CMDQ ID */ 856 + /** CMDQ ID */ 965 857 u64 cmdq_id; 966 - /* Fence object handle. */ 858 + /** Fence object handle. */ 967 859 u64 fence_handle; 968 860 }; 969 861 862 + /** 863 + * vpu_ipc_msg_payload_engine_reset_done will contain an array of this structure 864 + * which contains which queues caused reset if FW was able to detect any error. 865 + * @see vpu_ipc_msg_payload_engine_reset_done 866 + */ 970 867 struct vpu_jsm_engine_reset_context { 971 - /* Host SSID */ 868 + /** Host SSID */ 972 869 u32 host_ssid; 973 - /* Zero Padding */ 870 + /** Zero Padding */ 974 871 u32 reserved_0; 975 - /* Command queue id */ 872 + /** Command queue id */ 976 873 u64 cmdq_id; 977 - /* See VPU_ENGINE_RESET_CONTEXT_* defines */ 874 + /** See VPU_ENGINE_RESET_CONTEXT_* defines */ 978 875 u64 flags; 979 876 }; 980 877 878 + /** 879 + * Engine reset response. 880 + * @see VPU_JSM_MSG_ENGINE_RESET_DONE 881 + */ 981 882 struct vpu_ipc_msg_payload_engine_reset_done { 982 - /* Engine ordinal */ 883 + /** Engine ordinal */ 983 884 u32 engine_idx; 984 - /* Number of impacted contexts */ 885 + /** Number of impacted contexts */ 985 886 u32 num_impacted_contexts; 986 - /* Array of impacted command queue ids and their flags */ 887 + /** Array of impacted command queue ids and their flags */ 987 888 struct vpu_jsm_engine_reset_context 988 889 impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS]; 989 890 }; ··· 1029 912 u32 reserved_0; 1030 913 }; 1031 914 915 + /** 916 + * Structure for heartbeat response 917 + * @see VPU_JSM_MSG_QUERY_ENGINE_HB_DONE 918 + */ 1032 919 struct vpu_ipc_msg_payload_query_engine_hb_done { 1033 - /* Engine returning heartbeat value. */ 920 + /** Engine returning heartbeat value. */ 1034 921 u32 engine_idx; 1035 - /* Reserved */ 922 + /** Reserved */ 1036 923 u32 reserved_0; 1037 - /* Heartbeat value. */ 924 + /** Heartbeat value. */ 1038 925 u64 heartbeat; 1039 926 }; 1040 927 ··· 1058 937 u8 power_limit[16]; 1059 938 }; 1060 939 1061 - /* HWS priority band setup request / response */ 940 + /** 941 + * HWS priority band setup request / response 942 + * @see VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP 943 + */ 1062 944 struct vpu_ipc_msg_payload_hws_priority_band_setup { 1063 945 /* 1064 946 * Grace period in 100ns units when preempting another priority band for ··· 1088 964 * TDR timeout value in milliseconds. Default value of 0 meaning no timeout. 1089 965 */ 1090 966 u32 tdr_timeout; 967 + /* Non-interactive queue timeout for no progress of heartbeat in milliseconds. 968 + * Default value of 0 meaning no timeout. 969 + */ 970 + u32 non_interactive_no_progress_timeout; 971 + /* 972 + * Non-interactive queue upper limit timeout value in milliseconds. Default 973 + * value of 0 meaning no timeout. 974 + */ 975 + u32 non_interactive_timeout; 1091 976 }; 1092 977 1093 - /* 978 + /** 1094 979 * @brief HWS create command queue request. 1095 980 * Host will create a command queue via this command. 1096 981 * Note: Cmdq group is a handle of an object which 1097 982 * may contain one or more command queues. 1098 983 * @see VPU_JSM_MSG_CREATE_CMD_QUEUE 1099 - * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP 1100 984 */ 1101 985 struct vpu_ipc_msg_payload_hws_create_cmdq { 1102 986 /* Process id */ ··· 1125 993 u32 reserved_0; 1126 994 }; 1127 995 1128 - /* 1129 - * @brief HWS create command queue response. 1130 - * @see VPU_JSM_MSG_CREATE_CMD_QUEUE 996 + /** 997 + * HWS create command queue response. 1131 998 * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP 1132 999 */ 1133 1000 struct vpu_ipc_msg_payload_hws_create_cmdq_rsp { 1134 - /* Process id */ 1001 + /** Process id */ 1135 1002 u64 process_id; 1136 - /* Host SSID */ 1003 + /** Host SSID */ 1137 1004 u32 host_ssid; 1138 - /* Engine for which queue is being created */ 1005 + /** Engine for which queue is being created */ 1139 1006 u32 engine_idx; 1140 - /* Command queue group */ 1007 + /** Command queue group */ 1141 1008 u64 cmdq_group; 1142 - /* Command queue id */ 1009 + /** Command queue id */ 1143 1010 u64 cmdq_id; 1144 1011 }; 1145 1012 1146 - /* HWS destroy command queue request / response */ 1013 + /** 1014 + * HWS destroy command queue request / response 1015 + * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE 1016 + * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP 1017 + */ 1147 1018 struct vpu_ipc_msg_payload_hws_destroy_cmdq { 1148 - /* Host SSID */ 1019 + /** Host SSID */ 1149 1020 u32 host_ssid; 1150 - /* Zero Padding */ 1021 + /** Zero Padding */ 1151 1022 u32 reserved; 1152 - /* Command queue id */ 1023 + /** Command queue id */ 1153 1024 u64 cmdq_id; 1154 1025 }; 1155 1026 1156 - /* HWS set context scheduling properties request / response */ 1027 + /** 1028 + * HWS set context scheduling properties request / response 1029 + * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES 1030 + * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP 1031 + */ 1157 1032 struct vpu_ipc_msg_payload_hws_set_context_sched_properties { 1158 - /* Host SSID */ 1033 + /** Host SSID */ 1159 1034 u32 host_ssid; 1160 - /* Zero Padding */ 1035 + /** Zero Padding */ 1161 1036 u32 reserved_0; 1162 - /* Command queue id */ 1037 + /** Command queue id */ 1163 1038 u64 cmdq_id; 1164 - /* 1039 + /** 1165 1040 * Priority band to assign to work of this context. 1166 1041 * Available priority bands: @see enum vpu_job_scheduling_priority_band 1167 1042 */ 1168 1043 u32 priority_band; 1169 - /* Inside realtime band assigns a further priority */ 1044 + /** Inside realtime band assigns a further priority */ 1170 1045 u32 realtime_priority_level; 1171 - /* Priority relative to other contexts in the same process */ 1046 + /** Priority relative to other contexts in the same process */ 1172 1047 s32 in_process_priority; 1173 - /* Zero padding / Reserved */ 1048 + /** Zero padding / Reserved */ 1174 1049 u32 reserved_1; 1175 - /* 1050 + /** 1176 1051 * Context quantum relative to other contexts of same priority in the same process 1177 1052 * Minimum value supported by NPU is 1ms (10000 in 100ns units). 1178 1053 */ 1179 1054 u64 context_quantum; 1180 - /* Grace period when preempting context of the same priority within the same process */ 1055 + /** Grace period when preempting context of the same priority within the same process */ 1181 1056 u64 grace_period_same_priority; 1182 - /* Grace period when preempting context of a lower priority within the same process */ 1057 + /** Grace period when preempting context of a lower priority within the same process */ 1183 1058 u64 grace_period_lower_priority; 1184 1059 }; 1185 1060 1186 - /* 1187 - * @brief Register doorbell command structure. 1061 + /** 1062 + * Register doorbell command structure. 1188 1063 * This structure supports doorbell registration for both HW and OS scheduling. 1189 1064 * Note: Queue base and size are added here so that the same structure can be used for 1190 1065 * OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored ··· 1200 1061 * @see VPU_JSM_MSG_HWS_REGISTER_DB 1201 1062 */ 1202 1063 struct vpu_jsm_hws_register_db { 1203 - /* Index of the doorbell to register. */ 1064 + /** Index of the doorbell to register. */ 1204 1065 u32 db_id; 1205 - /* Host sub-stream ID for the context assigned to the doorbell. */ 1066 + /** Host sub-stream ID for the context assigned to the doorbell. */ 1206 1067 u32 host_ssid; 1207 - /* ID of the command queue associated with the doorbell. */ 1068 + /** ID of the command queue associated with the doorbell. */ 1208 1069 u64 cmdq_id; 1209 - /* Virtual address pointing to the start of command queue. */ 1070 + /** Virtual address pointing to the start of command queue. */ 1210 1071 u64 cmdq_base; 1211 - /* Size of the command queue in bytes. */ 1072 + /** Size of the command queue in bytes. */ 1212 1073 u64 cmdq_size; 1213 1074 }; 1214 1075 1215 - /* 1216 - * @brief Structure to set another buffer to be used for scheduling-related logging. 1076 + /** 1077 + * Structure to set another buffer to be used for scheduling-related logging. 1217 1078 * The size of the logging buffer and the number of entries is defined as part of the 1218 1079 * buffer itself as described next. 1219 1080 * The log buffer received from the host is made up of; 1220 - * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'. 1081 + * - header: 32 bytes in size, as shown in @ref vpu_hws_log_buffer_header. 1221 1082 * The header contains the number of log entries in the buffer. 1222 1083 * - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in 1223 - * 'struct vpu_hws_log_buffer_entry'. 1084 + * @ref vpu_hws_log_buffer_entry. 1224 1085 * The entry contains the VPU timestamp, operation type and data. 1225 1086 * The host should provide the notify index value of log buffer to VPU. This is a 1226 1087 * value defined within the log buffer and when written to will generate the ··· 1234 1095 * @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION 1235 1096 */ 1236 1097 struct vpu_ipc_msg_payload_hws_set_scheduling_log { 1237 - /* Engine ordinal */ 1098 + /** Engine ordinal */ 1238 1099 u32 engine_idx; 1239 - /* Host SSID */ 1100 + /** Host SSID */ 1240 1101 u32 host_ssid; 1241 - /* 1102 + /** 1242 1103 * VPU log buffer virtual address. 1243 1104 * Set to 0 to disable logging for this engine. 1244 1105 */ 1245 1106 u64 vpu_log_buffer_va; 1246 - /* 1107 + /** 1247 1108 * Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION 1248 1109 * is generated when an event log is written to this index. 1249 1110 */ 1250 1111 u64 notify_index; 1251 - /* 1112 + /** 1252 1113 * Field is now deprecated, will be removed when KMD is updated to support removal 1253 1114 */ 1254 1115 u32 enable_extra_events; 1255 - /* Zero Padding */ 1116 + /** Zero Padding */ 1256 1117 u32 reserved_0; 1257 1118 }; 1258 1119 1259 - /* 1260 - * @brief The scheduling log notification is generated by VPU when it writes 1120 + /** 1121 + * The scheduling log notification is generated by VPU when it writes 1261 1122 * an event into the log buffer at the notify_index. VPU notifies host with 1262 1123 * VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous 1263 1124 * message from VPU to host. ··· 1265 1126 * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG 1266 1127 */ 1267 1128 struct vpu_ipc_msg_payload_hws_scheduling_log_notification { 1268 - /* Engine ordinal */ 1129 + /** Engine ordinal */ 1269 1130 u32 engine_idx; 1270 - /* Zero Padding */ 1131 + /** Zero Padding */ 1271 1132 u32 reserved_0; 1272 1133 }; 1273 1134 1274 - /* 1275 - * @brief HWS suspend command queue request and done structure. 1135 + /** 1136 + * HWS suspend command queue request and done structure. 1276 1137 * Host will request the suspend of contexts and VPU will; 1277 1138 * - Suspend all work on this context 1278 1139 * - Preempt any running work ··· 1291 1152 * @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE 1292 1153 */ 1293 1154 struct vpu_ipc_msg_payload_hws_suspend_cmdq { 1294 - /* Host SSID */ 1155 + /** Host SSID */ 1295 1156 u32 host_ssid; 1296 - /* Zero Padding */ 1157 + /** Zero Padding */ 1297 1158 u32 reserved_0; 1298 - /* Command queue id */ 1159 + /** Command queue id */ 1299 1160 u64 cmdq_id; 1300 - /* 1161 + /** 1301 1162 * Suspend fence value - reported by the VPU suspend context 1302 1163 * completed once suspend is complete. 1303 1164 */ 1304 1165 u64 suspend_fence_value; 1305 1166 }; 1306 1167 1307 - /* 1308 - * @brief HWS Resume command queue request / response structure. 1168 + /** 1169 + * HWS Resume command queue request / response structure. 1309 1170 * Host will request the resume of a context; 1310 1171 * - VPU will resume all work on this context 1311 1172 * - Scheduler will allow this context to be scheduled ··· 1313 1174 * @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP 1314 1175 */ 1315 1176 struct vpu_ipc_msg_payload_hws_resume_cmdq { 1316 - /* Host SSID */ 1177 + /** Host SSID */ 1317 1178 u32 host_ssid; 1318 - /* Zero Padding */ 1179 + /** Zero Padding */ 1319 1180 u32 reserved_0; 1320 - /* Command queue id */ 1181 + /** Command queue id */ 1321 1182 u64 cmdq_id; 1322 1183 }; 1323 1184 1324 - /* 1325 - * @brief HWS Resume engine request / response structure. 1326 - * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume. 1185 + /** 1186 + * HWS Resume engine request / response structure. 1187 + * After a HWS engine reset, all scheduling is stopped on VPU until an engine resume. 1327 1188 * Host shall send this command to resume scheduling of any valid queue. 1328 - * @see VPU_JSM_MSG_HWS_RESUME_ENGINE 1189 + * @see VPU_JSM_MSG_HWS_ENGINE_RESUME 1329 1190 * @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE 1330 1191 */ 1331 1192 struct vpu_ipc_msg_payload_hws_resume_engine { 1332 - /* Engine to be resumed */ 1193 + /** Engine to be resumed */ 1333 1194 u32 engine_idx; 1334 - /* Reserved */ 1195 + /** Reserved */ 1335 1196 u32 reserved_0; 1336 1197 }; 1337 1198 ··· 1465 1326 /** 1466 1327 * Metric group description placed in the metric buffer after successful completion 1467 1328 * of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more 1468 - * @vpu_jsm_metric_counter_descriptor records. 1329 + * @ref vpu_jsm_metric_counter_descriptor records. 1469 1330 * @see VPU_JSM_MSG_METRIC_STREAMER_INFO 1470 1331 */ 1471 1332 struct vpu_jsm_metric_group_descriptor {
+2 -1
drivers/gpu/drm/Makefile
··· 150 150 drm_plane_helper.o \ 151 151 drm_probe_helper.o \ 152 152 drm_self_refresh_helper.o \ 153 - drm_simple_kms_helper.o 153 + drm_simple_kms_helper.o \ 154 + drm_vblank_helper.o 154 155 drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o 155 156 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o 156 157 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 198 198 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj); 199 199 200 200 amdgpu_hmm_unregister(aobj); 201 - ttm_bo_put(&aobj->tbo); 201 + ttm_bo_fini(&aobj->tbo); 202 202 } 203 203 204 204 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
+2 -1
drivers/gpu/drm/ast/Makefile
··· 6 6 ast-y := \ 7 7 ast_2000.o \ 8 8 ast_2100.o \ 9 + ast_2200.o \ 9 10 ast_2300.o \ 11 + ast_2400.o \ 10 12 ast_2500.o \ 11 13 ast_2600.o \ 12 14 ast_cursor.o \ ··· 16 14 ast_dp501.o \ 17 15 ast_dp.o \ 18 16 ast_drv.o \ 19 - ast_main.o \ 20 17 ast_mm.o \ 21 18 ast_mode.o \ 22 19 ast_post.o \
+101
drivers/gpu/drm/ast/ast_2000.c
··· 27 27 */ 28 28 29 29 #include <linux/delay.h> 30 + #include <linux/pci.h> 31 + 32 + #include <drm/drm_drv.h> 30 33 31 34 #include "ast_drv.h" 32 35 #include "ast_post.h" ··· 149 146 } 150 147 151 148 return 0; 149 + } 150 + 151 + /* 152 + * Mode setting 153 + */ 154 + 155 + const struct ast_vbios_dclk_info ast_2000_dclk_table[] = { 156 + {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */ 157 + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ 158 + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ 159 + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ 160 + {0xee, 0x67, 0x01}, /* 04: VCLK40 */ 161 + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ 162 + {0xc6, 0x64, 0x01}, /* 06: VCLK50 */ 163 + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ 164 + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ 165 + {0x7b, 0x63, 0x00}, /* 09: VCLK75 */ 166 + {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */ 167 + {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */ 168 + {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */ 169 + {0x85, 0x24, 0x00}, /* 0d: VCLK135 */ 170 + {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */ 171 + {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */ 172 + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ 173 + {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ 174 + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ 175 + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ 176 + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ 177 + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ 178 + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ 179 + {0x77, 0x58, 0x80}, /* 17: VCLK119 */ 180 + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ 181 + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ 182 + {0x3b, 0x2c, 0x81}, /* 1a: VCLK118_25 */ 183 + }; 184 + 185 + /* 186 + * Device initialization 187 + */ 188 + 189 + void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post) 190 + { 191 + enum ast_tx_chip tx_chip = AST_TX_NONE; 192 + u8 vgacra3; 193 + 194 + /* 195 + * VGACRA3 Enhanced Color Mode Register, check if DVO is already 196 + * enabled, in that case, assume we have a SIL164 TMDS transmitter 197 + * 198 + * Don't make that assumption if we the chip wasn't enabled and 199 + * is at power-on reset, otherwise we'll incorrectly "detect" a 200 + * SIL164 when there is none. 201 + */ 202 + if (!need_post) { 203 + vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff); 204 + if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED) 205 + tx_chip = AST_TX_SIL164; 206 + } 207 + 208 + __ast_device_set_tx_chip(ast, tx_chip); 209 + } 210 + 211 + struct drm_device *ast_2000_device_create(struct pci_dev *pdev, 212 + const struct drm_driver *drv, 213 + enum ast_chip chip, 214 + enum ast_config_mode config_mode, 215 + void __iomem *regs, 216 + void __iomem *ioregs, 217 + bool need_post) 218 + { 219 + struct drm_device *dev; 220 + struct ast_device *ast; 221 + int ret; 222 + 223 + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); 224 + if (IS_ERR(ast)) 225 + return ERR_CAST(ast); 226 + dev = &ast->base; 227 + 228 + ast_device_init(ast, chip, config_mode, regs, ioregs); 229 + 230 + ast_2000_detect_tx_chip(ast, need_post); 231 + 232 + if (need_post) { 233 + ret = ast_post_gpu(ast); 234 + if (ret) 235 + return ERR_PTR(ret); 236 + } 237 + 238 + ret = ast_mm_init(ast); 239 + if (ret) 240 + return ERR_PTR(ret); 241 + 242 + ret = ast_mode_config_init(ast); 243 + if (ret) 244 + return ERR_PTR(ret); 245 + 246 + return dev; 152 247 }
+85
drivers/gpu/drm/ast/ast_2100.c
··· 27 27 */ 28 28 29 29 #include <linux/delay.h> 30 + #include <linux/pci.h> 31 + 32 + #include <drm/drm_drv.h> 30 33 31 34 #include "ast_drv.h" 32 35 #include "ast_post.h" ··· 388 385 } 389 386 390 387 return 0; 388 + } 389 + 390 + /* 391 + * Widescreen detection 392 + */ 393 + 394 + /* Try to detect WSXGA+ on Gen2+ */ 395 + bool __ast_2100_detect_wsxga_p(struct ast_device *ast) 396 + { 397 + u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0); 398 + 399 + if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC)) 400 + return true; 401 + if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN) 402 + return true; 403 + 404 + return false; 405 + } 406 + 407 + /* Try to detect WUXGA on Gen2+ */ 408 + bool __ast_2100_detect_wuxga(struct ast_device *ast) 409 + { 410 + u8 vgacrd1; 411 + 412 + if (ast->support_fullhd) { 413 + vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1); 414 + if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA)) 415 + return true; 416 + } 417 + 418 + return false; 419 + } 420 + 421 + static void ast_2100_detect_widescreen(struct ast_device *ast) 422 + { 423 + if (__ast_2100_detect_wsxga_p(ast)) { 424 + ast->support_wsxga_p = true; 425 + if (ast->chip == AST2100) 426 + ast->support_fullhd = true; 427 + } 428 + if (__ast_2100_detect_wuxga(ast)) 429 + ast->support_wuxga = true; 430 + } 431 + 432 + struct drm_device *ast_2100_device_create(struct pci_dev *pdev, 433 + const struct drm_driver *drv, 434 + enum ast_chip chip, 435 + enum ast_config_mode config_mode, 436 + void __iomem *regs, 437 + void __iomem *ioregs, 438 + bool need_post) 439 + { 440 + struct drm_device *dev; 441 + struct ast_device *ast; 442 + int ret; 443 + 444 + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); 445 + if (IS_ERR(ast)) 446 + return ERR_CAST(ast); 447 + dev = &ast->base; 448 + 449 + ast_device_init(ast, chip, config_mode, regs, ioregs); 450 + 451 + ast_2000_detect_tx_chip(ast, need_post); 452 + 453 + if (need_post) { 454 + ret = ast_post_gpu(ast); 455 + if (ret) 456 + return ERR_PTR(ret); 457 + } 458 + 459 + ret = ast_mm_init(ast); 460 + if (ret) 461 + return ERR_PTR(ret); 462 + 463 + ast_2100_detect_widescreen(ast); 464 + 465 + ret = ast_mode_config_init(ast); 466 + if (ret) 467 + return ERR_PTR(ret); 468 + 469 + return dev; 391 470 }
+85
drivers/gpu/drm/ast/ast_2200.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2012 Red Hat Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + * 21 + * The above copyright notice and this permission notice (including the 22 + * next paragraph) shall be included in all copies or substantial portions 23 + * of the Software. 24 + */ 25 + /* 26 + * Authors: Dave Airlie <airlied@redhat.com> 27 + */ 28 + 29 + #include <linux/pci.h> 30 + 31 + #include <drm/drm_drv.h> 32 + 33 + #include "ast_drv.h" 34 + 35 + static void ast_2200_detect_widescreen(struct ast_device *ast) 36 + { 37 + if (__ast_2100_detect_wsxga_p(ast)) { 38 + ast->support_wsxga_p = true; 39 + if (ast->chip == AST2200) 40 + ast->support_fullhd = true; 41 + } 42 + if (__ast_2100_detect_wuxga(ast)) 43 + ast->support_wuxga = true; 44 + } 45 + 46 + struct drm_device *ast_2200_device_create(struct pci_dev *pdev, 47 + const struct drm_driver *drv, 48 + enum ast_chip chip, 49 + enum ast_config_mode config_mode, 50 + void __iomem *regs, 51 + void __iomem *ioregs, 52 + bool need_post) 53 + { 54 + struct drm_device *dev; 55 + struct ast_device *ast; 56 + int ret; 57 + 58 + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); 59 + if (IS_ERR(ast)) 60 + return ERR_CAST(ast); 61 + dev = &ast->base; 62 + 63 + ast_device_init(ast, chip, config_mode, regs, ioregs); 64 + 65 + ast_2000_detect_tx_chip(ast, need_post); 66 + 67 + if (need_post) { 68 + ret = ast_post_gpu(ast); 69 + if (ret) 70 + return ERR_PTR(ret); 71 + } 72 + 73 + ret = ast_mm_init(ast); 74 + if (ret) 75 + return ERR_PTR(ret); 76 + 77 + ast_2200_detect_widescreen(ast); 78 + 79 + ret = ast_mode_config_init(ast); 80 + if (ret) 81 + return ERR_PTR(ret); 82 + 83 + return dev; 84 + } 85 +
+128
drivers/gpu/drm/ast/ast_2300.c
··· 27 27 */ 28 28 29 29 #include <linux/delay.h> 30 + #include <linux/pci.h> 31 + #include <linux/sizes.h> 32 + 33 + #include <drm/drm_drv.h> 34 + #include <drm/drm_managed.h> 35 + #include <drm/drm_print.h> 30 36 31 37 #include "ast_drv.h" 32 38 #include "ast_post.h" ··· 1331 1325 } 1332 1326 1333 1327 return 0; 1328 + } 1329 + 1330 + /* 1331 + * Device initialization 1332 + */ 1333 + 1334 + void ast_2300_detect_tx_chip(struct ast_device *ast) 1335 + { 1336 + enum ast_tx_chip tx_chip = AST_TX_NONE; 1337 + struct drm_device *dev = &ast->base; 1338 + u8 vgacrd1; 1339 + 1340 + /* 1341 + * On AST GEN4+, look at the configuration set by the SoC in 1342 + * the SOC scratch register #1 bits 11:8 (interestingly marked 1343 + * as "reserved" in the spec) 1344 + */ 1345 + vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 1346 + AST_IO_VGACRD1_TX_TYPE_MASK); 1347 + switch (vgacrd1) { 1348 + /* 1349 + * GEN4 to GEN6 1350 + */ 1351 + case AST_IO_VGACRD1_TX_SIL164_VBIOS: 1352 + tx_chip = AST_TX_SIL164; 1353 + break; 1354 + case AST_IO_VGACRD1_TX_DP501_VBIOS: 1355 + ast->dp501_fw_addr = drmm_kzalloc(dev, SZ_32K, GFP_KERNEL); 1356 + if (ast->dp501_fw_addr) { 1357 + /* backup firmware */ 1358 + if (ast_backup_fw(ast, ast->dp501_fw_addr, SZ_32K)) { 1359 + drmm_kfree(dev, ast->dp501_fw_addr); 1360 + ast->dp501_fw_addr = NULL; 1361 + } 1362 + } 1363 + fallthrough; 1364 + case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW: 1365 + tx_chip = AST_TX_DP501; 1366 + break; 1367 + /* 1368 + * GEN7+ 1369 + */ 1370 + case AST_IO_VGACRD1_TX_ASTDP: 1371 + tx_chip = AST_TX_ASTDP; 1372 + break; 1373 + /* 1374 + * Several of the listed TX chips are not explicitly supported 1375 + * by the ast driver. If these exist in real-world devices, they 1376 + * are most likely reported as VGA or SIL164 outputs. We warn here 1377 + * to get bug reports for these devices. If none come in for some 1378 + * time, we can begin to fail device probing on these values. 1379 + */ 1380 + case AST_IO_VGACRD1_TX_ITE66121_VBIOS: 1381 + drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); 1382 + break; 1383 + case AST_IO_VGACRD1_TX_CH7003_VBIOS: 1384 + drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); 1385 + break; 1386 + case AST_IO_VGACRD1_TX_ANX9807_VBIOS: 1387 + drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); 1388 + break; 1389 + } 1390 + 1391 + __ast_device_set_tx_chip(ast, tx_chip); 1392 + } 1393 + 1394 + static void ast_2300_detect_widescreen(struct ast_device *ast) 1395 + { 1396 + if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1300) { 1397 + ast->support_wsxga_p = true; 1398 + ast->support_fullhd = true; 1399 + } 1400 + if (__ast_2100_detect_wuxga(ast)) 1401 + ast->support_wuxga = true; 1402 + } 1403 + 1404 + struct drm_device *ast_2300_device_create(struct pci_dev *pdev, 1405 + const struct drm_driver *drv, 1406 + enum ast_chip chip, 1407 + enum ast_config_mode config_mode, 1408 + void __iomem *regs, 1409 + void __iomem *ioregs, 1410 + bool need_post) 1411 + { 1412 + struct drm_device *dev; 1413 + struct ast_device *ast; 1414 + int ret; 1415 + 1416 + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); 1417 + if (IS_ERR(ast)) 1418 + return ERR_CAST(ast); 1419 + dev = &ast->base; 1420 + 1421 + ast_device_init(ast, chip, config_mode, regs, ioregs); 1422 + 1423 + ast_2300_detect_tx_chip(ast); 1424 + 1425 + if (need_post) { 1426 + ret = ast_post_gpu(ast); 1427 + if (ret) 1428 + return ERR_PTR(ret); 1429 + } 1430 + 1431 + ret = ast_mm_init(ast); 1432 + if (ret) 1433 + return ERR_PTR(ret); 1434 + 1435 + /* map reserved buffer */ 1436 + ast->dp501_fw_buf = NULL; 1437 + if (ast->vram_size < pci_resource_len(pdev, 0)) { 1438 + ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); 1439 + if (!ast->dp501_fw_buf) 1440 + drm_info(dev, "failed to map reserved buffer!\n"); 1441 + } 1442 + 1443 + ast_2300_detect_widescreen(ast); 1444 + 1445 + ret = ast_mode_config_init(ast); 1446 + if (ret) 1447 + return ERR_PTR(ret); 1448 + 1449 + return dev; 1334 1450 }
+93
drivers/gpu/drm/ast/ast_2400.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2012 Red Hat Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + * 21 + * The above copyright notice and this permission notice (including the 22 + * next paragraph) shall be included in all copies or substantial portions 23 + * of the Software. 24 + * 25 + */ 26 + /* 27 + * Authors: Dave Airlie <airlied@redhat.com> 28 + */ 29 + 30 + #include <linux/pci.h> 31 + 32 + #include <drm/drm_drv.h> 33 + #include <drm/drm_print.h> 34 + 35 + #include "ast_drv.h" 36 + 37 + static void ast_2400_detect_widescreen(struct ast_device *ast) 38 + { 39 + if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1400) { 40 + ast->support_wsxga_p = true; 41 + ast->support_fullhd = true; 42 + } 43 + if (__ast_2100_detect_wuxga(ast)) 44 + ast->support_wuxga = true; 45 + } 46 + 47 + struct drm_device *ast_2400_device_create(struct pci_dev *pdev, 48 + const struct drm_driver *drv, 49 + enum ast_chip chip, 50 + enum ast_config_mode config_mode, 51 + void __iomem *regs, 52 + void __iomem *ioregs, 53 + bool need_post) 54 + { 55 + struct drm_device *dev; 56 + struct ast_device *ast; 57 + int ret; 58 + 59 + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); 60 + if (IS_ERR(ast)) 61 + return ERR_CAST(ast); 62 + dev = &ast->base; 63 + 64 + ast_device_init(ast, chip, config_mode, regs, ioregs); 65 + 66 + ast_2300_detect_tx_chip(ast); 67 + 68 + if (need_post) { 69 + ret = ast_post_gpu(ast); 70 + if (ret) 71 + return ERR_PTR(ret); 72 + } 73 + 74 + ret = ast_mm_init(ast); 75 + if (ret) 76 + return ERR_PTR(ret); 77 + 78 + /* map reserved buffer */ 79 + ast->dp501_fw_buf = NULL; 80 + if (ast->vram_size < pci_resource_len(pdev, 0)) { 81 + ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); 82 + if (!ast->dp501_fw_buf) 83 + drm_info(dev, "failed to map reserved buffer!\n"); 84 + } 85 + 86 + ast_2400_detect_widescreen(ast); 87 + 88 + ret = ast_mode_config_init(ast); 89 + if (ret) 90 + return ERR_PTR(ret); 91 + 92 + return dev; 93 + }
+98
drivers/gpu/drm/ast/ast_2500.c
··· 27 27 */ 28 28 29 29 #include <linux/delay.h> 30 + #include <linux/pci.h> 30 31 32 + #include <drm/drm_drv.h> 31 33 #include <drm/drm_print.h> 32 34 33 35 #include "ast_drv.h" ··· 568 566 } 569 567 570 568 return 0; 569 + } 570 + 571 + /* 572 + * Mode setting 573 + */ 574 + 575 + const struct ast_vbios_dclk_info ast_2500_dclk_table[] = { 576 + {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */ 577 + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ 578 + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ 579 + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ 580 + {0xee, 0x67, 0x01}, /* 04: VCLK40 */ 581 + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ 582 + {0xc6, 0x64, 0x01}, /* 06: VCLK50 */ 583 + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ 584 + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ 585 + {0x7b, 0x63, 0x00}, /* 09: VCLK75 */ 586 + {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */ 587 + {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */ 588 + {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */ 589 + {0x85, 0x24, 0x00}, /* 0d: VCLK135 */ 590 + {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */ 591 + {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */ 592 + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ 593 + {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ 594 + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ 595 + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ 596 + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ 597 + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ 598 + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ 599 + {0x58, 0x01, 0x42}, /* 17: VCLK119 */ 600 + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ 601 + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ 602 + {0x44, 0x20, 0x43}, /* 1a: VCLK118_25 */ 603 + }; 604 + 605 + /* 606 + * Device initialization 607 + */ 608 + 609 + static void ast_2500_detect_widescreen(struct ast_device *ast) 610 + { 611 + if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST2510) { 612 + ast->support_wsxga_p = true; 613 + ast->support_fullhd = true; 614 + } 615 + if (__ast_2100_detect_wuxga(ast)) 616 + ast->support_wuxga = true; 617 + } 618 + 619 + struct drm_device *ast_2500_device_create(struct pci_dev *pdev, 620 + const struct drm_driver *drv, 621 + enum ast_chip chip, 622 + enum ast_config_mode config_mode, 623 + void __iomem *regs, 624 + void __iomem *ioregs, 625 + bool need_post) 626 + { 627 + struct drm_device *dev; 628 + struct ast_device *ast; 629 + int ret; 630 + 631 + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); 632 + if (IS_ERR(ast)) 633 + return ERR_CAST(ast); 634 + dev = &ast->base; 635 + 636 + ast_device_init(ast, chip, config_mode, regs, ioregs); 637 + 638 + ast_2300_detect_tx_chip(ast); 639 + 640 + if (need_post) { 641 + ret = ast_post_gpu(ast); 642 + if (ret) 643 + return ERR_PTR(ret); 644 + } 645 + 646 + ret = ast_mm_init(ast); 647 + if (ret) 648 + return ERR_PTR(ret); 649 + 650 + /* map reserved buffer */ 651 + ast->dp501_fw_buf = NULL; 652 + if (ast->vram_size < pci_resource_len(pdev, 0)) { 653 + ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); 654 + if (!ast->dp501_fw_buf) 655 + drm_info(dev, "failed to map reserved buffer!\n"); 656 + } 657 + 658 + ast_2500_detect_widescreen(ast); 659 + 660 + ret = ast_mode_config_init(ast); 661 + if (ret) 662 + return ERR_PTR(ret); 663 + 664 + return dev; 571 665 }
+63
drivers/gpu/drm/ast/ast_2600.c
··· 26 26 * Authors: Dave Airlie <airlied@redhat.com> 27 27 */ 28 28 29 + #include <linux/pci.h> 30 + 31 + #include <drm/drm_drv.h> 32 + 29 33 #include "ast_drv.h" 30 34 #include "ast_post.h" 31 35 ··· 45 41 return ast_dp_launch(ast); 46 42 47 43 return 0; 44 + } 45 + 46 + /* 47 + * Device initialization 48 + */ 49 + 50 + static void ast_2600_detect_widescreen(struct ast_device *ast) 51 + { 52 + ast->support_wsxga_p = true; 53 + ast->support_fullhd = true; 54 + if (__ast_2100_detect_wuxga(ast)) 55 + ast->support_wuxga = true; 56 + } 57 + 58 + struct drm_device *ast_2600_device_create(struct pci_dev *pdev, 59 + const struct drm_driver *drv, 60 + enum ast_chip chip, 61 + enum ast_config_mode config_mode, 62 + void __iomem *regs, 63 + void __iomem *ioregs, 64 + bool need_post) 65 + { 66 + struct drm_device *dev; 67 + struct ast_device *ast; 68 + int ret; 69 + 70 + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); 71 + if (IS_ERR(ast)) 72 + return ERR_CAST(ast); 73 + dev = &ast->base; 74 + 75 + ast_device_init(ast, chip, config_mode, regs, ioregs); 76 + 77 + ast_2300_detect_tx_chip(ast); 78 + 79 + switch (ast->tx_chip) { 80 + case AST_TX_ASTDP: 81 + ret = ast_post_gpu(ast); 82 + break; 83 + default: 84 + ret = 0; 85 + if (need_post) 86 + ret = ast_post_gpu(ast); 87 + break; 88 + } 89 + if (ret) 90 + return ERR_PTR(ret); 91 + 92 + ret = ast_mm_init(ast); 93 + if (ret) 94 + return ERR_PTR(ret); 95 + 96 + ast_2600_detect_widescreen(ast); 97 + 98 + ret = ast_mode_config_init(ast); 99 + if (ret) 100 + return ERR_PTR(ret); 101 + 102 + return dev; 48 103 }
+64 -3
drivers/gpu/drm/ast/ast_drv.c
··· 37 37 #include <drm/drm_fbdev_shmem.h> 38 38 #include <drm/drm_gem_shmem_helper.h> 39 39 #include <drm/drm_module.h> 40 + #include <drm/drm_print.h> 40 41 #include <drm/drm_probe_helper.h> 41 42 42 43 #include "ast_drv.h" ··· 46 45 47 46 MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); 48 47 module_param_named(modeset, ast_modeset, int, 0400); 48 + 49 + void ast_device_init(struct ast_device *ast, 50 + enum ast_chip chip, 51 + enum ast_config_mode config_mode, 52 + void __iomem *regs, 53 + void __iomem *ioregs) 54 + { 55 + ast->chip = chip; 56 + ast->config_mode = config_mode; 57 + ast->regs = regs; 58 + ast->ioregs = ioregs; 59 + } 60 + 61 + void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip) 62 + { 63 + static const char * const info_str[] = { 64 + "analog VGA", 65 + "Sil164 TMDS transmitter", 66 + "DP501 DisplayPort transmitter", 67 + "ASPEED DisplayPort transmitter", 68 + }; 69 + 70 + drm_info(&ast->base, "Using %s\n", info_str[tx_chip]); 71 + 72 + ast->tx_chip = tx_chip; 73 + } 49 74 50 75 /* 51 76 * DRM driver ··· 293 266 *chip_out = chip; 294 267 *config_mode_out = config_mode; 295 268 296 - return 0; 269 + return __AST_CHIP_GEN(chip); 297 270 } 298 271 299 272 static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ··· 304 277 void __iomem *ioregs; 305 278 enum ast_config_mode config_mode; 306 279 enum ast_chip chip; 280 + unsigned int chip_gen; 307 281 struct drm_device *drm; 308 282 bool need_post = false; 309 283 ··· 377 349 return ret; 378 350 379 351 ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode); 380 - if (ret) 352 + if (ret < 0) 381 353 return ret; 354 + chip_gen = ret; 382 355 383 - drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post); 356 + switch (chip_gen) { 357 + case 1: 358 + drm = ast_2000_device_create(pdev, &ast_driver, chip, config_mode, 359 + regs, ioregs, need_post); 360 + break; 361 + case 2: 362 + drm = ast_2100_device_create(pdev, &ast_driver, chip, config_mode, 363 + regs, ioregs, need_post); 364 + break; 365 + case 3: 366 + drm = ast_2200_device_create(pdev, &ast_driver, chip, config_mode, 367 + regs, ioregs, need_post); 368 + break; 369 + case 4: 370 + drm = ast_2300_device_create(pdev, &ast_driver, chip, config_mode, 371 + regs, ioregs, need_post); 372 + break; 373 + case 5: 374 + drm = ast_2400_device_create(pdev, &ast_driver, chip, config_mode, 375 + regs, ioregs, need_post); 376 + break; 377 + case 6: 378 + drm = ast_2500_device_create(pdev, &ast_driver, chip, config_mode, 379 + regs, ioregs, need_post); 380 + break; 381 + case 7: 382 + drm = ast_2600_device_create(pdev, &ast_driver, chip, config_mode, 383 + regs, ioregs, need_post); 384 + break; 385 + default: 386 + dev_err(&pdev->dev, "Gen%d not supported\n", chip_gen); 387 + return -ENODEV; 388 + } 384 389 if (IS_ERR(drm)) 385 390 return PTR_ERR(drm); 386 391 pci_set_drvdata(pdev, drm);
+67 -8
drivers/gpu/drm/ast/ast_drv.h
··· 217 217 return container_of(dev, struct ast_device, base); 218 218 } 219 219 220 - struct drm_device *ast_device_create(struct pci_dev *pdev, 221 - const struct drm_driver *drv, 222 - enum ast_chip chip, 223 - enum ast_config_mode config_mode, 224 - void __iomem *regs, 225 - void __iomem *ioregs, 226 - bool need_post); 227 - 228 220 static inline unsigned long __ast_gen(struct ast_device *ast) 229 221 { 230 222 return __AST_CHIP_GEN(ast->chip); ··· 407 415 408 416 int ast_mm_init(struct ast_device *ast); 409 417 418 + /* ast_drv.c */ 419 + void ast_device_init(struct ast_device *ast, 420 + enum ast_chip chip, 421 + enum ast_config_mode config_mode, 422 + void __iomem *regs, 423 + void __iomem *ioregs); 424 + void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip); 425 + 410 426 /* ast_2000.c */ 411 427 int ast_2000_post(struct ast_device *ast); 428 + extern const struct ast_vbios_dclk_info ast_2000_dclk_table[]; 429 + void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post); 430 + struct drm_device *ast_2000_device_create(struct pci_dev *pdev, 431 + const struct drm_driver *drv, 432 + enum ast_chip chip, 433 + enum ast_config_mode config_mode, 434 + void __iomem *regs, 435 + void __iomem *ioregs, 436 + bool need_post); 412 437 413 438 /* ast_2100.c */ 414 439 int ast_2100_post(struct ast_device *ast); 440 + bool __ast_2100_detect_wsxga_p(struct ast_device *ast); 441 + bool __ast_2100_detect_wuxga(struct ast_device *ast); 442 + struct drm_device *ast_2100_device_create(struct pci_dev *pdev, 443 + const struct drm_driver *drv, 444 + enum ast_chip chip, 445 + enum ast_config_mode config_mode, 446 + void __iomem *regs, 447 + void __iomem *ioregs, 448 + bool need_post); 449 + 450 + /* ast_2200.c */ 451 + struct drm_device *ast_2200_device_create(struct pci_dev *pdev, 452 + const struct drm_driver *drv, 453 + enum ast_chip chip, 454 + enum ast_config_mode config_mode, 455 + void __iomem *regs, 456 + void __iomem *ioregs, 457 + bool need_post); 415 458 416 459 /* ast_2300.c */ 417 460 int ast_2300_post(struct ast_device *ast); 461 + void ast_2300_detect_tx_chip(struct ast_device *ast); 462 + struct drm_device *ast_2300_device_create(struct pci_dev *pdev, 463 + const struct drm_driver *drv, 464 + enum ast_chip chip, 465 + enum ast_config_mode config_mode, 466 + void __iomem *regs, 467 + void __iomem *ioregs, 468 + bool need_post); 469 + 470 + /* ast_2400.c */ 471 + struct drm_device *ast_2400_device_create(struct pci_dev *pdev, 472 + const struct drm_driver *drv, 473 + enum ast_chip chip, 474 + enum ast_config_mode config_mode, 475 + void __iomem *regs, 476 + void __iomem *ioregs, 477 + bool need_post); 418 478 419 479 /* ast_2500.c */ 420 480 void ast_2500_patch_ahb(void __iomem *regs); 421 481 int ast_2500_post(struct ast_device *ast); 482 + extern const struct ast_vbios_dclk_info ast_2500_dclk_table[]; 483 + struct drm_device *ast_2500_device_create(struct pci_dev *pdev, 484 + const struct drm_driver *drv, 485 + enum ast_chip chip, 486 + enum ast_config_mode config_mode, 487 + void __iomem *regs, 488 + void __iomem *ioregs, 489 + bool need_post); 422 490 423 491 /* ast_2600.c */ 424 492 int ast_2600_post(struct ast_device *ast); 493 + struct drm_device *ast_2600_device_create(struct pci_dev *pdev, 494 + const struct drm_driver *drv, 495 + enum ast_chip chip, 496 + enum ast_config_mode config_mode, 497 + void __iomem *regs, 498 + void __iomem *ioregs, 499 + bool need_post); 425 500 426 501 /* ast post */ 427 502 int ast_post_gpu(struct ast_device *ast);
-268
drivers/gpu/drm/ast/ast_main.c
··· 1 - /* 2 - * Copyright 2012 Red Hat Inc. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the 6 - * "Software"), to deal in the Software without restriction, including 7 - * without limitation the rights to use, copy, modify, merge, publish, 8 - * distribute, sub license, and/or sell copies of the Software, and to 9 - * permit persons to whom the Software is furnished to do so, subject to 10 - * the following conditions: 11 - * 12 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 15 - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 16 - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 17 - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 18 - * USE OR OTHER DEALINGS IN THE SOFTWARE. 19 - * 20 - * The above copyright notice and this permission notice (including the 21 - * next paragraph) shall be included in all copies or substantial portions 22 - * of the Software. 23 - * 24 - */ 25 - /* 26 - * Authors: Dave Airlie <airlied@redhat.com> 27 - */ 28 - 29 - #include <linux/of.h> 30 - #include <linux/pci.h> 31 - 32 - #include <drm/drm_atomic_helper.h> 33 - #include <drm/drm_drv.h> 34 - #include <drm/drm_gem.h> 35 - #include <drm/drm_managed.h> 36 - 37 - #include "ast_drv.h" 38 - 39 - /* Try to detect WSXGA+ on Gen2+ */ 40 - static bool __ast_2100_detect_wsxga_p(struct ast_device *ast) 41 - { 42 - u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0); 43 - 44 - if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC)) 45 - return true; 46 - if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN) 47 - return true; 48 - 49 - return false; 50 - } 51 - 52 - /* Try to detect WUXGA on Gen2+ */ 53 - static bool __ast_2100_detect_wuxga(struct ast_device *ast) 54 - { 55 - u8 vgacrd1; 56 - 57 - if (ast->support_fullhd) { 58 - vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1); 59 - if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA)) 60 - return true; 61 - } 62 - 63 - return false; 64 - } 65 - 66 - static void ast_detect_widescreen(struct ast_device *ast) 67 - { 68 - ast->support_wsxga_p = false; 69 - ast->support_fullhd = false; 70 - ast->support_wuxga = false; 71 - 72 - if (AST_GEN(ast) >= 7) { 73 - ast->support_wsxga_p = true; 74 - ast->support_fullhd = true; 75 - if (__ast_2100_detect_wuxga(ast)) 76 - ast->support_wuxga = true; 77 - } else if (AST_GEN(ast) >= 6) { 78 - if (__ast_2100_detect_wsxga_p(ast)) 79 - ast->support_wsxga_p = true; 80 - else if (ast->chip == AST2510) 81 - ast->support_wsxga_p = true; 82 - if (ast->support_wsxga_p) 83 - ast->support_fullhd = true; 84 - if (__ast_2100_detect_wuxga(ast)) 85 - ast->support_wuxga = true; 86 - } else if (AST_GEN(ast) >= 5) { 87 - if (__ast_2100_detect_wsxga_p(ast)) 88 - ast->support_wsxga_p = true; 89 - else if (ast->chip == AST1400) 90 - ast->support_wsxga_p = true; 91 - if (ast->support_wsxga_p) 92 - ast->support_fullhd = true; 93 - if (__ast_2100_detect_wuxga(ast)) 94 - ast->support_wuxga = true; 95 - } else if (AST_GEN(ast) >= 4) { 96 - if (__ast_2100_detect_wsxga_p(ast)) 97 - ast->support_wsxga_p = true; 98 - else if (ast->chip == AST1300) 99 - ast->support_wsxga_p = true; 100 - if (ast->support_wsxga_p) 101 - ast->support_fullhd = true; 102 - if (__ast_2100_detect_wuxga(ast)) 103 - ast->support_wuxga = true; 104 - } else if (AST_GEN(ast) >= 3) { 105 - if (__ast_2100_detect_wsxga_p(ast)) 106 - ast->support_wsxga_p = true; 107 - if (ast->support_wsxga_p) { 108 - if (ast->chip == AST2200) 109 - ast->support_fullhd = true; 110 - } 111 - if (__ast_2100_detect_wuxga(ast)) 112 - ast->support_wuxga = true; 113 - } else if (AST_GEN(ast) >= 2) { 114 - if (__ast_2100_detect_wsxga_p(ast)) 115 - ast->support_wsxga_p = true; 116 - if (ast->support_wsxga_p) { 117 - if (ast->chip == AST2100) 118 - ast->support_fullhd = true; 119 - } 120 - if (__ast_2100_detect_wuxga(ast)) 121 - ast->support_wuxga = true; 122 - } 123 - } 124 - 125 - static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) 126 - { 127 - static const char * const info_str[] = { 128 - "analog VGA", 129 - "Sil164 TMDS transmitter", 130 - "DP501 DisplayPort transmitter", 131 - "ASPEED DisplayPort transmitter", 132 - }; 133 - 134 - struct drm_device *dev = &ast->base; 135 - u8 vgacra3, vgacrd1; 136 - 137 - /* Check 3rd Tx option (digital output afaik) */ 138 - ast->tx_chip = AST_TX_NONE; 139 - 140 - if (AST_GEN(ast) <= 3) { 141 - /* 142 - * VGACRA3 Enhanced Color Mode Register, check if DVO is already 143 - * enabled, in that case, assume we have a SIL164 TMDS transmitter 144 - * 145 - * Don't make that assumption if we the chip wasn't enabled and 146 - * is at power-on reset, otherwise we'll incorrectly "detect" a 147 - * SIL164 when there is none. 148 - */ 149 - if (!need_post) { 150 - vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff); 151 - if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED) 152 - ast->tx_chip = AST_TX_SIL164; 153 - } 154 - } else { 155 - /* 156 - * On AST GEN4+, look at the configuration set by the SoC in 157 - * the SOC scratch register #1 bits 11:8 (interestingly marked 158 - * as "reserved" in the spec) 159 - */ 160 - vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 161 - AST_IO_VGACRD1_TX_TYPE_MASK); 162 - switch (vgacrd1) { 163 - /* 164 - * GEN4 to GEN6 165 - */ 166 - case AST_IO_VGACRD1_TX_SIL164_VBIOS: 167 - ast->tx_chip = AST_TX_SIL164; 168 - break; 169 - case AST_IO_VGACRD1_TX_DP501_VBIOS: 170 - ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL); 171 - if (ast->dp501_fw_addr) { 172 - /* backup firmware */ 173 - if (ast_backup_fw(ast, ast->dp501_fw_addr, 32*1024)) { 174 - drmm_kfree(dev, ast->dp501_fw_addr); 175 - ast->dp501_fw_addr = NULL; 176 - } 177 - } 178 - fallthrough; 179 - case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW: 180 - ast->tx_chip = AST_TX_DP501; 181 - break; 182 - /* 183 - * GEN7+ 184 - */ 185 - case AST_IO_VGACRD1_TX_ASTDP: 186 - ast->tx_chip = AST_TX_ASTDP; 187 - break; 188 - /* 189 - * Several of the listed TX chips are not explicitly supported 190 - * by the ast driver. If these exist in real-world devices, they 191 - * are most likely reported as VGA or SIL164 outputs. We warn here 192 - * to get bug reports for these devices. If none come in for some 193 - * time, we can begin to fail device probing on these values. 194 - */ 195 - case AST_IO_VGACRD1_TX_ITE66121_VBIOS: 196 - drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n", 197 - vgacrd1, AST_GEN(ast)); 198 - break; 199 - case AST_IO_VGACRD1_TX_CH7003_VBIOS: 200 - drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n", 201 - vgacrd1, AST_GEN(ast)); 202 - break; 203 - case AST_IO_VGACRD1_TX_ANX9807_VBIOS: 204 - drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n", 205 - vgacrd1, AST_GEN(ast)); 206 - break; 207 - } 208 - } 209 - 210 - drm_info(dev, "Using %s\n", info_str[ast->tx_chip]); 211 - } 212 - 213 - struct drm_device *ast_device_create(struct pci_dev *pdev, 214 - const struct drm_driver *drv, 215 - enum ast_chip chip, 216 - enum ast_config_mode config_mode, 217 - void __iomem *regs, 218 - void __iomem *ioregs, 219 - bool need_post) 220 - { 221 - struct drm_device *dev; 222 - struct ast_device *ast; 223 - int ret; 224 - 225 - ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); 226 - if (IS_ERR(ast)) 227 - return ERR_CAST(ast); 228 - dev = &ast->base; 229 - 230 - ast->chip = chip; 231 - ast->config_mode = config_mode; 232 - ast->regs = regs; 233 - ast->ioregs = ioregs; 234 - 235 - ast_detect_tx_chip(ast, need_post); 236 - switch (ast->tx_chip) { 237 - case AST_TX_ASTDP: 238 - ret = ast_post_gpu(ast); 239 - break; 240 - default: 241 - ret = 0; 242 - if (need_post) 243 - ret = ast_post_gpu(ast); 244 - break; 245 - } 246 - if (ret) 247 - return ERR_PTR(ret); 248 - 249 - ret = ast_mm_init(ast); 250 - if (ret) 251 - return ERR_PTR(ret); 252 - 253 - /* map reserved buffer */ 254 - ast->dp501_fw_buf = NULL; 255 - if (ast->vram_size < pci_resource_len(pdev, 0)) { 256 - ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); 257 - if (!ast->dp501_fw_buf) 258 - drm_info(dev, "failed to map reserved buffer!\n"); 259 - } 260 - 261 - ast_detect_widescreen(ast); 262 - 263 - ret = ast_mode_config_init(ast); 264 - if (ret) 265 - return ERR_PTR(ret); 266 - 267 - return dev; 268 - }
+2 -2
drivers/gpu/drm/ast/ast_mode.c
··· 373 373 const struct ast_vbios_dclk_info *clk_info; 374 374 375 375 if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) 376 - clk_info = &dclk_table_ast2500[vmode->dclk_index]; 376 + clk_info = &ast_2500_dclk_table[vmode->dclk_index]; 377 377 else 378 - clk_info = &dclk_table[vmode->dclk_index]; 378 + clk_info = &ast_2000_dclk_table[vmode->dclk_index]; 379 379 380 380 ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc0, 0x00, clk_info->param1); 381 381 ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc1, 0x00, clk_info->param2);
-60
drivers/gpu/drm/ast/ast_tables.h
··· 33 33 #define HiCModeIndex 3 34 34 #define TrueCModeIndex 4 35 35 36 - static const struct ast_vbios_dclk_info dclk_table[] = { 37 - {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ 38 - {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ 39 - {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ 40 - {0x76, 0x63, 0x01}, /* 03: VCLK36 */ 41 - {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ 42 - {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ 43 - {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ 44 - {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ 45 - {0x80, 0x64, 0x00}, /* 08: VCLK65 */ 46 - {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ 47 - {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ 48 - {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ 49 - {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ 50 - {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ 51 - {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ 52 - {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ 53 - {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ 54 - {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ 55 - {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ 56 - {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ 57 - {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ 58 - {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ 59 - {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ 60 - {0x77, 0x58, 0x80}, /* 17: VCLK119 */ 61 - {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ 62 - {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ 63 - {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */ 64 - }; 65 - 66 - static const struct ast_vbios_dclk_info dclk_table_ast2500[] = { 67 - {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ 68 - {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ 69 - {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ 70 - {0x76, 0x63, 0x01}, /* 03: VCLK36 */ 71 - {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ 72 - {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ 73 - {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ 74 - {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ 75 - {0x80, 0x64, 0x00}, /* 08: VCLK65 */ 76 - {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ 77 - {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ 78 - {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ 79 - {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ 80 - {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ 81 - {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ 82 - {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ 83 - {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ 84 - {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ 85 - {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ 86 - {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ 87 - {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ 88 - {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ 89 - {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ 90 - {0x58, 0x01, 0x42}, /* 17: VCLK119 */ 91 - {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ 92 - {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ 93 - {0x44, 0x20, 0x43}, /* 1A: VCLK118_25 */ 94 - }; 95 - 96 36 static const struct ast_vbios_stdtable vbios_stdtable[] = { 97 37 /* MD_2_3_400 */ 98 38 {
+11
drivers/gpu/drm/bridge/imx/Kconfig
··· 18 18 depends on OF 19 19 depends on COMMON_CLK 20 20 select DRM_DW_HDMI 21 + imply DRM_IMX8MP_HDMI_PAI 21 22 imply DRM_IMX8MP_HDMI_PVI 22 23 imply PHY_FSL_SAMSUNG_HDMI_PHY 23 24 help 24 25 Choose this to enable support for the internal HDMI encoder found 25 26 on the i.MX8MP SoC. 27 + 28 + config DRM_IMX8MP_HDMI_PAI 29 + tristate "Freescale i.MX8MP HDMI PAI bridge support" 30 + depends on OF 31 + select DRM_DW_HDMI 32 + select REGMAP 33 + select REGMAP_MMIO 34 + help 35 + Choose this to enable support for the internal HDMI TX Parallel 36 + Audio Interface found on the Freescale i.MX8MP SoC. 26 37 27 38 config DRM_IMX8MP_HDMI_PVI 28 39 tristate "Freescale i.MX8MP HDMI PVI bridge support"
+1
drivers/gpu/drm/bridge/imx/Makefile
··· 1 1 obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o 2 2 obj-$(CONFIG_DRM_IMX_LEGACY_BRIDGE) += imx-legacy-bridge.o 3 3 obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o 4 + obj-$(CONFIG_DRM_IMX8MP_HDMI_PAI) += imx8mp-hdmi-pai.o 4 5 obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o 5 6 obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o 6 7 obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o
+158
drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright 2025 NXP 4 + */ 5 + 6 + #include <linux/bitfield.h> 7 + #include <linux/component.h> 8 + #include <linux/module.h> 9 + #include <linux/of_platform.h> 10 + #include <linux/platform_device.h> 11 + #include <linux/regmap.h> 12 + #include <drm/bridge/dw_hdmi.h> 13 + #include <sound/asoundef.h> 14 + 15 + #define HTX_PAI_CTRL 0x00 16 + #define ENABLE BIT(0) 17 + 18 + #define HTX_PAI_CTRL_EXT 0x04 19 + #define WTMK_HIGH_MASK GENMASK(31, 24) 20 + #define WTMK_LOW_MASK GENMASK(23, 16) 21 + #define NUM_CH_MASK GENMASK(10, 8) 22 + #define WTMK_HIGH(n) FIELD_PREP(WTMK_HIGH_MASK, (n)) 23 + #define WTMK_LOW(n) FIELD_PREP(WTMK_LOW_MASK, (n)) 24 + #define NUM_CH(n) FIELD_PREP(NUM_CH_MASK, (n) - 1) 25 + 26 + #define HTX_PAI_FIELD_CTRL 0x08 27 + #define PRE_SEL GENMASK(28, 24) 28 + #define D_SEL GENMASK(23, 20) 29 + #define V_SEL GENMASK(19, 15) 30 + #define U_SEL GENMASK(14, 10) 31 + #define C_SEL GENMASK(9, 5) 32 + #define P_SEL GENMASK(4, 0) 33 + 34 + struct imx8mp_hdmi_pai { 35 + struct regmap *regmap; 36 + }; 37 + 38 + static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel, 39 + int width, int rate, int non_pcm, 40 + int iec958) 41 + { 42 + const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi); 43 + struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio; 44 + int val; 45 + 46 + /* PAI set control extended */ 47 + val = WTMK_HIGH(3) | WTMK_LOW(3); 48 + val |= NUM_CH(channel); 49 + regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL_EXT, val); 50 + 51 + /* IEC60958 format */ 52 + if (iec958) { 53 + val = FIELD_PREP_CONST(P_SEL, 54 + __bf_shf(IEC958_SUBFRAME_PARITY)); 55 + val |= FIELD_PREP_CONST(C_SEL, 56 + __bf_shf(IEC958_SUBFRAME_CHANNEL_STATUS)); 57 + val |= FIELD_PREP_CONST(U_SEL, 58 + __bf_shf(IEC958_SUBFRAME_USER_DATA)); 59 + val |= FIELD_PREP_CONST(V_SEL, 60 + __bf_shf(IEC958_SUBFRAME_VALIDITY)); 61 + val |= FIELD_PREP_CONST(D_SEL, 62 + __bf_shf(IEC958_SUBFRAME_SAMPLE_24_MASK)); 63 + val |= FIELD_PREP_CONST(PRE_SEL, 64 + __bf_shf(IEC958_SUBFRAME_PREAMBLE_MASK)); 65 + } else { 66 + /* 67 + * The allowed PCM widths are 24bit and 32bit, as they are supported 68 + * by aud2htx module. 69 + * for 24bit, D_SEL = 0, select all the bits. 70 + * for 32bit, D_SEL = 8, select 24bit in MSB. 71 + */ 72 + val = FIELD_PREP(D_SEL, width - 24); 73 + } 74 + 75 + regmap_write(hdmi_pai->regmap, HTX_PAI_FIELD_CTRL, val); 76 + 77 + /* PAI start running */ 78 + regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, ENABLE); 79 + } 80 + 81 + static void imx8mp_hdmi_pai_disable(struct dw_hdmi *dw_hdmi) 82 + { 83 + const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi); 84 + struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio; 85 + 86 + /* Stop PAI */ 87 + regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0); 88 + } 89 + 90 + static const struct regmap_config imx8mp_hdmi_pai_regmap_config = { 91 + .reg_bits = 32, 92 + .reg_stride = 4, 93 + .val_bits = 32, 94 + .max_register = HTX_PAI_FIELD_CTRL, 95 + }; 96 + 97 + static int imx8mp_hdmi_pai_bind(struct device *dev, struct device *master, void *data) 98 + { 99 + struct platform_device *pdev = to_platform_device(dev); 100 + struct dw_hdmi_plat_data *plat_data = data; 101 + struct imx8mp_hdmi_pai *hdmi_pai; 102 + struct resource *res; 103 + void __iomem *base; 104 + 105 + hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL); 106 + if (!hdmi_pai) 107 + return -ENOMEM; 108 + 109 + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 110 + if (IS_ERR(base)) 111 + return PTR_ERR(base); 112 + 113 + hdmi_pai->regmap = devm_regmap_init_mmio_clk(dev, "apb", base, 114 + &imx8mp_hdmi_pai_regmap_config); 115 + if (IS_ERR(hdmi_pai->regmap)) { 116 + dev_err(dev, "regmap init failed\n"); 117 + return PTR_ERR(hdmi_pai->regmap); 118 + } 119 + 120 + plat_data->enable_audio = imx8mp_hdmi_pai_enable; 121 + plat_data->disable_audio = imx8mp_hdmi_pai_disable; 122 + plat_data->priv_audio = hdmi_pai; 123 + 124 + return 0; 125 + } 126 + 127 + static const struct component_ops imx8mp_hdmi_pai_ops = { 128 + .bind = imx8mp_hdmi_pai_bind, 129 + }; 130 + 131 + static int imx8mp_hdmi_pai_probe(struct platform_device *pdev) 132 + { 133 + return component_add(&pdev->dev, &imx8mp_hdmi_pai_ops); 134 + } 135 + 136 + static void imx8mp_hdmi_pai_remove(struct platform_device *pdev) 137 + { 138 + component_del(&pdev->dev, &imx8mp_hdmi_pai_ops); 139 + } 140 + 141 + static const struct of_device_id imx8mp_hdmi_pai_of_table[] = { 142 + { .compatible = "fsl,imx8mp-hdmi-pai" }, 143 + { /* Sentinel */ } 144 + }; 145 + MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pai_of_table); 146 + 147 + static struct platform_driver imx8mp_hdmi_pai_platform_driver = { 148 + .probe = imx8mp_hdmi_pai_probe, 149 + .remove = imx8mp_hdmi_pai_remove, 150 + .driver = { 151 + .name = "imx8mp-hdmi-pai", 152 + .of_match_table = imx8mp_hdmi_pai_of_table, 153 + }, 154 + }; 155 + module_platform_driver(imx8mp_hdmi_pai_platform_driver); 156 + 157 + MODULE_DESCRIPTION("i.MX8MP HDMI PAI driver"); 158 + MODULE_LICENSE("GPL");
+60 -5
drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
··· 5 5 */ 6 6 7 7 #include <linux/clk.h> 8 + #include <linux/component.h> 8 9 #include <linux/mod_devicetable.h> 9 10 #include <linux/module.h> 10 11 #include <linux/platform_device.h> 11 12 #include <drm/bridge/dw_hdmi.h> 12 13 #include <drm/drm_modes.h> 14 + #include <drm/drm_of.h> 13 15 14 16 struct imx8mp_hdmi { 15 17 struct dw_hdmi_plat_data plat_data; ··· 81 79 .update_hpd = dw_hdmi_phy_update_hpd, 82 80 }; 83 81 82 + static int imx8mp_dw_hdmi_bind(struct device *dev) 83 + { 84 + struct platform_device *pdev = to_platform_device(dev); 85 + struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev); 86 + int ret; 87 + 88 + ret = component_bind_all(dev, &hdmi->plat_data); 89 + if (ret) 90 + return dev_err_probe(dev, ret, "component_bind_all failed!\n"); 91 + 92 + hdmi->dw_hdmi = dw_hdmi_probe(pdev, &hdmi->plat_data); 93 + if (IS_ERR(hdmi->dw_hdmi)) { 94 + component_unbind_all(dev, &hdmi->plat_data); 95 + return PTR_ERR(hdmi->dw_hdmi); 96 + } 97 + 98 + return 0; 99 + } 100 + 101 + static void imx8mp_dw_hdmi_unbind(struct device *dev) 102 + { 103 + struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev); 104 + 105 + dw_hdmi_remove(hdmi->dw_hdmi); 106 + 107 + component_unbind_all(dev, &hdmi->plat_data); 108 + } 109 + 110 + static const struct component_master_ops imx8mp_dw_hdmi_ops = { 111 + .bind = imx8mp_dw_hdmi_bind, 112 + .unbind = imx8mp_dw_hdmi_unbind, 113 + }; 114 + 84 115 static int imx8mp_dw_hdmi_probe(struct platform_device *pdev) 85 116 { 86 117 struct device *dev = &pdev->dev; 87 118 struct dw_hdmi_plat_data *plat_data; 119 + struct component_match *match = NULL; 120 + struct device_node *remote; 88 121 struct imx8mp_hdmi *hdmi; 89 122 90 123 hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); ··· 139 102 plat_data->priv_data = hdmi; 140 103 plat_data->phy_force_vendor = true; 141 104 142 - hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data); 143 - if (IS_ERR(hdmi->dw_hdmi)) 144 - return PTR_ERR(hdmi->dw_hdmi); 145 - 146 105 platform_set_drvdata(pdev, hdmi); 106 + 107 + /* port@2 is for hdmi_pai device */ 108 + remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0); 109 + if (!remote) { 110 + hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data); 111 + if (IS_ERR(hdmi->dw_hdmi)) 112 + return PTR_ERR(hdmi->dw_hdmi); 113 + } else { 114 + drm_of_component_match_add(dev, &match, component_compare_of, remote); 115 + 116 + of_node_put(remote); 117 + 118 + return component_master_add_with_match(dev, &imx8mp_dw_hdmi_ops, match); 119 + } 147 120 148 121 return 0; 149 122 } ··· 161 114 static void imx8mp_dw_hdmi_remove(struct platform_device *pdev) 162 115 { 163 116 struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev); 117 + struct device_node *remote; 164 118 165 - dw_hdmi_remove(hdmi->dw_hdmi); 119 + remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0); 120 + if (remote) { 121 + of_node_put(remote); 122 + 123 + component_master_del(&pdev->dev, &imx8mp_dw_hdmi_ops); 124 + } else { 125 + dw_hdmi_remove(hdmi->dw_hdmi); 126 + } 166 127 } 167 128 168 129 static int imx8mp_dw_hdmi_pm_suspend(struct device *dev)
+1 -6
drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
··· 683 683 pm_runtime_disable(&pdev->dev); 684 684 } 685 685 686 - static int imx8qxp_ldb_runtime_suspend(struct device *dev) 687 - { 688 - return 0; 689 - } 690 - 691 686 static int imx8qxp_ldb_runtime_resume(struct device *dev) 692 687 { 693 688 struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev); ··· 695 700 } 696 701 697 702 static const struct dev_pm_ops imx8qxp_ldb_pm_ops = { 698 - RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL) 703 + RUNTIME_PM_OPS(NULL, imx8qxp_ldb_runtime_resume, NULL) 699 704 }; 700 705 701 706 static const struct of_device_id imx8qxp_ldb_dt_ids[] = {
+5
drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
··· 90 90 params->iec.status[0] & IEC958_AES0_NONAUDIO); 91 91 dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width); 92 92 93 + if (daifmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE) 94 + dw_hdmi_set_sample_iec958(dw->data.hdmi, 1); 95 + else 96 + dw_hdmi_set_sample_iec958(dw->data.hdmi, 0); 97 + 93 98 return 0; 94 99 } 95 100
+17 -1
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
··· 177 177 178 178 spinlock_t audio_lock; 179 179 struct mutex audio_mutex; 180 + unsigned int sample_iec958; 180 181 unsigned int sample_non_pcm; 181 182 unsigned int sample_width; 182 183 unsigned int sample_rate; ··· 198 197 struct device *codec_dev; 199 198 enum drm_connector_status last_connector_result; 200 199 }; 200 + 201 + const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi) 202 + { 203 + return hdmi->plat_data; 204 + } 205 + EXPORT_SYMBOL_GPL(dw_hdmi_to_plat_data); 201 206 202 207 #define HDMI_IH_PHY_STAT0_RX_SENSE \ 203 208 (HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \ ··· 719 712 } 720 713 EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_non_pcm); 721 714 715 + void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958) 716 + { 717 + mutex_lock(&hdmi->audio_mutex); 718 + hdmi->sample_iec958 = iec958; 719 + mutex_unlock(&hdmi->audio_mutex); 720 + } 721 + EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_iec958); 722 + 722 723 void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate) 723 724 { 724 725 mutex_lock(&hdmi->audio_mutex); ··· 858 843 hdmi->channels, 859 844 hdmi->sample_width, 860 845 hdmi->sample_rate, 861 - hdmi->sample_non_pcm); 846 + hdmi->sample_non_pcm, 847 + hdmi->sample_iec958); 862 848 } 863 849 864 850 static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi)
+112
drivers/gpu/drm/bridge/ti-sn65dsi86.c
··· 106 106 #define SN_PWM_EN_INV_REG 0xA5 107 107 #define SN_PWM_INV_MASK BIT(0) 108 108 #define SN_PWM_EN_MASK BIT(1) 109 + 110 + #define SN_IRQ_EN_REG 0xE0 111 + #define IRQ_EN BIT(0) 112 + 113 + #define SN_IRQ_EVENTS_EN_REG 0xE6 114 + #define HPD_INSERTION_EN BIT(1) 115 + #define HPD_REMOVAL_EN BIT(2) 116 + 109 117 #define SN_AUX_CMD_STATUS_REG 0xF4 110 118 #define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3) 111 119 #define AUX_IRQ_STATUS_AUX_SHORT BIT(5) 112 120 #define AUX_IRQ_STATUS_NAT_I2C_FAIL BIT(6) 121 + #define SN_IRQ_STATUS_REG 0xF5 122 + #define HPD_REMOVAL_STATUS BIT(2) 123 + #define HPD_INSERTION_STATUS BIT(1) 113 124 114 125 #define MIN_DSI_CLK_FREQ_MHZ 40 115 126 ··· 163 152 * @ln_assign: Value to program to the LN_ASSIGN register. 164 153 * @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG. 165 154 * @comms_enabled: If true then communication over the aux channel is enabled. 155 + * @hpd_enabled: If true then HPD events are enabled. 166 156 * @comms_mutex: Protects modification of comms_enabled. 157 + * @hpd_mutex: Protects modification of hpd_enabled. 167 158 * 168 159 * @gchip: If we expose our GPIOs, this is used. 169 160 * @gchip_output: A cache of whether we've set GPIOs to output. This ··· 203 190 u8 ln_assign; 204 191 u8 ln_polrs; 205 192 bool comms_enabled; 193 + bool hpd_enabled; 206 194 struct mutex comms_mutex; 195 + struct mutex hpd_mutex; 207 196 208 197 #if defined(CONFIG_OF_GPIO) 209 198 struct gpio_chip gchip; ··· 235 220 .cache_type = REGCACHE_NONE, 236 221 .max_register = 0xFF, 237 222 }; 223 + 224 + static int ti_sn65dsi86_read_u8(struct ti_sn65dsi86 *pdata, unsigned int reg, 225 + u8 *val) 226 + { 227 + int ret; 228 + unsigned int reg_val; 229 + 230 + ret = regmap_read(pdata->regmap, reg, &reg_val); 231 + if (ret) { 232 + dev_err(pdata->dev, "fail to read raw reg %#x: %d\n", 233 + reg, ret); 234 + return ret; 235 + } 236 + *val = (u8)reg_val; 237 + 238 + return 0; 239 + } 238 240 239 241 static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata, 240 242 unsigned int reg, u16 *val) ··· 411 379 static int __maybe_unused ti_sn65dsi86_resume(struct device *dev) 412 380 { 413 381 struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev); 382 + const struct i2c_client *client = to_i2c_client(pdata->dev); 414 383 int ret; 415 384 416 385 ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies); ··· 445 412 */ 446 413 if (pdata->refclk) 447 414 ti_sn65dsi86_enable_comms(pdata, NULL); 415 + 416 + if (client->irq) { 417 + ret = regmap_update_bits(pdata->regmap, SN_IRQ_EN_REG, IRQ_EN, 418 + IRQ_EN); 419 + if (ret) 420 + dev_err(pdata->dev, "Failed to enable IRQ events: %d\n", ret); 421 + } 448 422 449 423 return ret; 450 424 } ··· 1251 1211 static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge) 1252 1212 { 1253 1213 struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); 1214 + const struct i2c_client *client = to_i2c_client(pdata->dev); 1215 + int ret; 1254 1216 1255 1217 /* 1256 1218 * Device needs to be powered on before reading the HPD state ··· 1261 1219 */ 1262 1220 1263 1221 pm_runtime_get_sync(pdata->dev); 1222 + 1223 + mutex_lock(&pdata->hpd_mutex); 1224 + pdata->hpd_enabled = true; 1225 + mutex_unlock(&pdata->hpd_mutex); 1226 + 1227 + if (client->irq) { 1228 + ret = regmap_set_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG, 1229 + HPD_REMOVAL_EN | HPD_INSERTION_EN); 1230 + if (ret) 1231 + dev_err(pdata->dev, "Failed to enable HPD events: %d\n", ret); 1232 + } 1264 1233 } 1265 1234 1266 1235 static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge) 1267 1236 { 1268 1237 struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); 1238 + const struct i2c_client *client = to_i2c_client(pdata->dev); 1239 + int ret; 1240 + 1241 + if (client->irq) { 1242 + ret = regmap_clear_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG, 1243 + HPD_REMOVAL_EN | HPD_INSERTION_EN); 1244 + if (ret) 1245 + dev_err(pdata->dev, "Failed to disable HPD events: %d\n", ret); 1246 + } 1247 + 1248 + mutex_lock(&pdata->hpd_mutex); 1249 + pdata->hpd_enabled = false; 1250 + mutex_unlock(&pdata->hpd_mutex); 1269 1251 1270 1252 pm_runtime_put_autosuspend(pdata->dev); 1271 1253 } ··· 1373 1307 } 1374 1308 1375 1309 return 0; 1310 + } 1311 + 1312 + static irqreturn_t ti_sn_bridge_interrupt(int irq, void *private) 1313 + { 1314 + struct ti_sn65dsi86 *pdata = private; 1315 + struct drm_device *dev = pdata->bridge.dev; 1316 + u8 status; 1317 + int ret; 1318 + bool hpd_event; 1319 + 1320 + ret = ti_sn65dsi86_read_u8(pdata, SN_IRQ_STATUS_REG, &status); 1321 + if (ret) { 1322 + dev_err(pdata->dev, "Failed to read IRQ status: %d\n", ret); 1323 + return IRQ_NONE; 1324 + } 1325 + 1326 + hpd_event = status & (HPD_REMOVAL_STATUS | HPD_INSERTION_STATUS); 1327 + 1328 + dev_dbg(pdata->dev, "(SN_IRQ_STATUS_REG = %#x)\n", status); 1329 + if (!status) 1330 + return IRQ_NONE; 1331 + 1332 + ret = regmap_write(pdata->regmap, SN_IRQ_STATUS_REG, status); 1333 + if (ret) { 1334 + dev_err(pdata->dev, "Failed to clear IRQ status: %d\n", ret); 1335 + return IRQ_NONE; 1336 + } 1337 + 1338 + /* Only send the HPD event if we are bound with a device. */ 1339 + mutex_lock(&pdata->hpd_mutex); 1340 + if (pdata->hpd_enabled && hpd_event) 1341 + drm_kms_helper_hotplug_event(dev); 1342 + mutex_unlock(&pdata->hpd_mutex); 1343 + 1344 + return IRQ_HANDLED; 1376 1345 } 1377 1346 1378 1347 static int ti_sn_bridge_probe(struct auxiliary_device *adev, ··· 2032 1931 dev_set_drvdata(dev, pdata); 2033 1932 pdata->dev = dev; 2034 1933 1934 + mutex_init(&pdata->hpd_mutex); 2035 1935 mutex_init(&pdata->comms_mutex); 2036 1936 2037 1937 pdata->regmap = devm_regmap_init_i2c(client, ··· 2072 1970 /* The ID string is stored backwards */ 2073 1971 if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf))) 2074 1972 return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n"); 1973 + 1974 + if (client->irq) { 1975 + ret = devm_request_threaded_irq(pdata->dev, client->irq, NULL, 1976 + ti_sn_bridge_interrupt, 1977 + IRQF_ONESHOT, 1978 + dev_name(pdata->dev), pdata); 1979 + 1980 + if (ret) 1981 + return dev_err_probe(dev, ret, "failed to request interrupt\n"); 1982 + } 2075 1983 2076 1984 /* 2077 1985 * Break ourselves up into a collection of aux devices. The only real
+5 -6
drivers/gpu/drm/display/drm_bridge_connector.c
··· 137 137 { 138 138 struct drm_bridge_connector *bridge_connector = 139 139 to_drm_bridge_connector(connector); 140 - struct drm_bridge *bridge; 141 140 142 141 /* Notify all bridges in the pipeline of hotplug events. */ 143 - drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) { 142 + drm_for_each_bridge_in_chain_scoped(bridge_connector->encoder, bridge) { 144 143 if (bridge->funcs->hpd_notify) 145 144 bridge->funcs->hpd_notify(bridge, status); 146 145 } ··· 638 639 struct drm_bridge_connector *bridge_connector; 639 640 struct drm_connector *connector; 640 641 struct i2c_adapter *ddc = NULL; 641 - struct drm_bridge *bridge, *panel_bridge = NULL; 642 + struct drm_bridge *panel_bridge = NULL; 642 643 unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB); 643 644 unsigned int max_bpc = 8; 644 645 bool support_hdcp = false; ··· 666 667 * detection are available, we don't support hotplug detection at all. 667 668 */ 668 669 connector_type = DRM_MODE_CONNECTOR_Unknown; 669 - drm_for_each_bridge_in_chain(encoder, bridge) { 670 + drm_for_each_bridge_in_chain_scoped(encoder, bridge) { 670 671 if (!bridge->interlace_allowed) 671 672 connector->interlace_allowed = false; 672 673 if (!bridge->ycbcr_420_allowed) ··· 817 818 818 819 if (bridge_connector->bridge_hdmi_cec && 819 820 bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) { 820 - bridge = bridge_connector->bridge_hdmi_cec; 821 + struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec; 821 822 822 823 ret = drmm_connector_hdmi_cec_notifier_register(connector, 823 824 NULL, ··· 828 829 829 830 if (bridge_connector->bridge_hdmi_cec && 830 831 bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) { 831 - bridge = bridge_connector->bridge_hdmi_cec; 832 + struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec; 832 833 833 834 ret = drmm_connector_hdmi_cec_register(connector, 834 835 &drm_bridge_connector_hdmi_cec_funcs,
+57 -19
drivers/gpu/drm/display/drm_dp_helper.c
··· 29 29 #include <linux/init.h> 30 30 #include <linux/iopoll.h> 31 31 #include <linux/kernel.h> 32 + #include <linux/minmax.h> 32 33 #include <linux/module.h> 33 34 #include <linux/sched.h> 34 35 #include <linux/seq_file.h> ··· 123 122 return true; 124 123 } 125 124 EXPORT_SYMBOL(drm_dp_clock_recovery_ok); 125 + 126 + bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE]) 127 + { 128 + u8 lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED); 129 + 130 + return lane_align & DP_POST_LT_ADJ_REQ_IN_PROGRESS; 131 + } 132 + EXPORT_SYMBOL(drm_dp_post_lt_adj_req_in_progress); 126 133 127 134 u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], 128 135 int lane) ··· 4137 4128 { 4138 4129 int fxp, fxp_min, fxp_max, fxp_actual, f = 1; 4139 4130 int ret; 4140 - u8 pn, pn_min, pn_max; 4131 + u8 pn, pn_min, pn_max, bit_count; 4141 4132 4142 4133 if (!bl->aux_set) 4143 4134 return 0; 4144 4135 4145 - ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn); 4136 + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &bit_count); 4146 4137 if (ret < 0) { 4147 4138 drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n", 4148 4139 aux->name, ret); 4149 4140 return -ENODEV; 4150 4141 } 4151 4142 4152 - pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK; 4143 + bit_count &= DP_EDP_PWMGEN_BIT_COUNT_MASK; 4144 + 4145 + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); 4146 + if (ret < 0) { 4147 + drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n", 4148 + aux->name, ret); 4149 + return -ENODEV; 4150 + } 4151 + pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; 4152 + 4153 + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); 4154 + if (ret < 0) { 4155 + drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n", 4156 + aux->name, ret); 4157 + return -ENODEV; 4158 + } 4159 + pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; 4160 + 4161 + if (unlikely(pn_min > pn_max)) { 4162 + drm_dbg_kms(aux->drm_dev, "%s: Invalid pwmgen bit count cap min/max returned: %d %d\n", 4163 + aux->name, pn_min, pn_max); 4164 + return -EINVAL; 4165 + } 4166 + 4167 + /* 4168 + * Per VESA eDP Spec v1.4b, section 3.3.10.2: 4169 + * If DP_EDP_PWMGEN_BIT_COUNT is less than DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, 4170 + * the sink must use the MIN value as the effective PWM bit count. 4171 + * Clamp the reported value to the [MIN, MAX] capability range to ensure 4172 + * correct brightness scaling on compliant eDP panels. 4173 + * Only enable this logic if the [MIN, MAX] range is valid in regard to Spec. 4174 + */ 4175 + pn = bit_count; 4176 + if (bit_count < pn_min) 4177 + pn = clamp(bit_count, pn_min, pn_max); 4178 + 4153 4179 bl->max = (1 << pn) - 1; 4154 - if (!driver_pwm_freq_hz) 4180 + if (!driver_pwm_freq_hz) { 4181 + if (pn != bit_count) 4182 + goto bit_count_write_back; 4183 + 4155 4184 return 0; 4185 + } 4156 4186 4157 4187 /* 4158 4188 * Set PWM Frequency divider to match desired frequency provided by the driver. ··· 4215 4167 * - FxP is within 25% of desired value. 4216 4168 * Note: 25% is arbitrary value and may need some tweak. 4217 4169 */ 4218 - ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); 4219 - if (ret < 0) { 4220 - drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n", 4221 - aux->name, ret); 4222 - return 0; 4223 - } 4224 - ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); 4225 - if (ret < 0) { 4226 - drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n", 4227 - aux->name, ret); 4228 - return 0; 4229 - } 4230 - pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; 4231 - pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; 4232 - 4233 4170 /* Ensure frequency is within 25% of desired value */ 4234 4171 fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4); 4235 4172 fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); ··· 4232 4199 break; 4233 4200 } 4234 4201 4202 + bit_count_write_back: 4235 4203 ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn); 4236 4204 if (ret < 0) { 4237 4205 drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", 4238 4206 aux->name, ret); 4239 4207 return 0; 4240 4208 } 4209 + 4210 + if (!driver_pwm_freq_hz) 4211 + return 0; 4212 + 4241 4213 bl->pwmgen_bit_count = pn; 4242 4214 bl->max = (1 << pn) - 1; 4243 4215
+1 -2
drivers/gpu/drm/drm_atomic.c
··· 1308 1308 struct drm_encoder *encoder) 1309 1309 { 1310 1310 struct drm_bridge_state *bridge_state; 1311 - struct drm_bridge *bridge; 1312 1311 1313 1312 if (!encoder) 1314 1313 return 0; ··· 1316 1317 "Adding all bridges for [encoder:%d:%s] to %p\n", 1317 1318 encoder->base.id, encoder->name, state); 1318 1319 1319 - drm_for_each_bridge_in_chain(encoder, bridge) { 1320 + drm_for_each_bridge_in_chain_scoped(encoder, bridge) { 1320 1321 /* Skip bridges that don't implement the atomic state hooks. */ 1321 1322 if (!bridge->funcs->atomic_duplicate_state) 1322 1323 continue;
+40 -12
drivers/gpu/drm/drm_bridge.c
··· 197 197 * driver. 198 198 */ 199 199 200 + /* Protect bridge_list and bridge_lingering_list */ 200 201 static DEFINE_MUTEX(bridge_lock); 201 202 static LIST_HEAD(bridge_list); 203 + static LIST_HEAD(bridge_lingering_list); 202 204 203 205 static void __drm_bridge_free(struct kref *kref) 204 206 { 205 207 struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount); 206 208 209 + mutex_lock(&bridge_lock); 210 + list_del(&bridge->list); 211 + mutex_unlock(&bridge_lock); 212 + 207 213 if (bridge->funcs->destroy) 208 214 bridge->funcs->destroy(bridge); 215 + 209 216 kfree(bridge->container); 210 217 } 211 218 ··· 280 273 return ERR_PTR(-ENOMEM); 281 274 282 275 bridge = container + offset; 276 + INIT_LIST_HEAD(&bridge->list); 283 277 bridge->container = container; 284 278 bridge->funcs = funcs; 285 279 kref_init(&bridge->refcount); ··· 294 286 EXPORT_SYMBOL(__devm_drm_bridge_alloc); 295 287 296 288 /** 297 - * drm_bridge_add - add the given bridge to the global bridge list 289 + * drm_bridge_add - register a bridge 298 290 * 299 291 * @bridge: bridge control structure 292 + * 293 + * Add the given bridge to the global list of bridges, where they can be 294 + * found by users via of_drm_find_bridge(). 300 295 * 301 296 * The bridge to be added must have been allocated by 302 297 * devm_drm_bridge_alloc(). ··· 310 299 DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n"); 311 300 312 301 drm_bridge_get(bridge); 302 + 303 + /* 304 + * If the bridge was previously added and then removed, it is now 305 + * in bridge_lingering_list. Remove it or bridge_lingering_list will be 306 + * corrupted when adding this bridge to bridge_list below. 307 + */ 308 + if (!list_empty(&bridge->list)) 309 + list_del_init(&bridge->list); 313 310 314 311 mutex_init(&bridge->hpd_mutex); 315 312 ··· 355 336 EXPORT_SYMBOL(devm_drm_bridge_add); 356 337 357 338 /** 358 - * drm_bridge_remove - remove the given bridge from the global bridge list 339 + * drm_bridge_remove - unregister a bridge 359 340 * 360 341 * @bridge: bridge control structure 342 + * 343 + * Remove the given bridge from the global list of registered bridges, so 344 + * it won't be found by users via of_drm_find_bridge(), and add it to the 345 + * lingering bridge list, to keep track of it until its allocated memory is 346 + * eventually freed. 361 347 */ 362 348 void drm_bridge_remove(struct drm_bridge *bridge) 363 349 { 364 350 mutex_lock(&bridge_lock); 365 - list_del_init(&bridge->list); 351 + list_move_tail(&bridge->list, &bridge_lingering_list); 366 352 mutex_unlock(&bridge_lock); 367 353 368 354 mutex_destroy(&bridge->hpd_mutex); ··· 1145 1121 struct drm_atomic_state *state) 1146 1122 { 1147 1123 struct drm_bridge_state *bridge_state, *next_bridge_state; 1148 - struct drm_bridge *next_bridge; 1149 1124 u32 output_flags = 0; 1150 1125 1151 1126 bridge_state = drm_atomic_get_new_bridge_state(state, bridge); ··· 1153 1130 if (!bridge_state) 1154 1131 return; 1155 1132 1156 - next_bridge = drm_bridge_get_next_bridge(bridge); 1133 + struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge); 1157 1134 1158 1135 /* 1159 1136 * Let's try to apply the most common case here, that is, propagate ··· 1455 1432 1456 1433 static void drm_bridge_debugfs_show_bridge(struct drm_printer *p, 1457 1434 struct drm_bridge *bridge, 1458 - unsigned int idx) 1435 + unsigned int idx, 1436 + bool lingering) 1459 1437 { 1460 1438 drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs); 1461 1439 1462 - drm_printf(p, "\trefcount: %u\n", kref_read(&bridge->refcount)); 1440 + drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount), 1441 + lingering ? " [lingering]" : ""); 1463 1442 1464 1443 drm_printf(p, "\ttype: [%d] %s\n", 1465 1444 bridge->type, 1466 1445 drm_get_connector_type_name(bridge->type)); 1467 1446 1468 - if (bridge->of_node) 1447 + /* The OF node could be freed after drm_bridge_remove() */ 1448 + if (bridge->of_node && !lingering) 1469 1449 drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node); 1470 1450 1471 1451 drm_printf(p, "\tops: [0x%x]", bridge->ops); ··· 1494 1468 mutex_lock(&bridge_lock); 1495 1469 1496 1470 list_for_each_entry(bridge, &bridge_list, list) 1497 - drm_bridge_debugfs_show_bridge(&p, bridge, idx++); 1471 + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false); 1472 + 1473 + list_for_each_entry(bridge, &bridge_lingering_list, list) 1474 + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true); 1498 1475 1499 1476 mutex_unlock(&bridge_lock); 1500 1477 ··· 1509 1480 { 1510 1481 struct drm_encoder *encoder = m->private; 1511 1482 struct drm_printer p = drm_seq_file_printer(m); 1512 - struct drm_bridge *bridge; 1513 1483 unsigned int idx = 0; 1514 1484 1515 - drm_for_each_bridge_in_chain(encoder, bridge) 1516 - drm_bridge_debugfs_show_bridge(&p, bridge, idx++); 1485 + drm_for_each_bridge_in_chain_scoped(encoder, bridge) 1486 + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false); 1517 1487 1518 1488 return 0; 1519 1489 }
+44
drivers/gpu/drm/drm_client_modeset.c
··· 1293 1293 } 1294 1294 EXPORT_SYMBOL(drm_client_modeset_dpms); 1295 1295 1296 + /** 1297 + * drm_client_modeset_wait_for_vblank() - Wait for the next VBLANK to occur 1298 + * @client: DRM client 1299 + * @crtc_index: The ndex of the CRTC to wait on 1300 + * 1301 + * Block the caller until the given CRTC has seen a VBLANK. Do nothing 1302 + * if the CRTC is disabled. If there's another DRM master present, fail 1303 + * with -EBUSY. 1304 + * 1305 + * Returns: 1306 + * 0 on success, or negative error code otherwise. 1307 + */ 1308 + int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index) 1309 + { 1310 + struct drm_device *dev = client->dev; 1311 + struct drm_crtc *crtc; 1312 + int ret; 1313 + 1314 + /* 1315 + * Rate-limit update frequency to vblank. If there's a DRM master 1316 + * present, it could interfere while we're waiting for the vblank 1317 + * event. Don't wait in this case. 1318 + */ 1319 + if (!drm_master_internal_acquire(dev)) 1320 + return -EBUSY; 1321 + 1322 + crtc = client->modesets[crtc_index].crtc; 1323 + 1324 + /* 1325 + * Only wait for a vblank event if the CRTC is enabled, otherwise 1326 + * just don't do anything, not even report an error. 1327 + */ 1328 + ret = drm_crtc_vblank_get(crtc); 1329 + if (!ret) { 1330 + drm_crtc_wait_one_vblank(crtc); 1331 + drm_crtc_vblank_put(crtc); 1332 + } 1333 + 1334 + drm_master_internal_release(dev); 1335 + 1336 + return 0; 1337 + } 1338 + EXPORT_SYMBOL(drm_client_modeset_wait_for_vblank); 1339 + 1296 1340 #ifdef CONFIG_DRM_KUNIT_TEST 1297 1341 #include "tests/drm_client_modeset_test.c" 1298 1342 #endif
+159 -11
drivers/gpu/drm/drm_dumb_buffers.c
··· 25 25 26 26 #include <drm/drm_device.h> 27 27 #include <drm/drm_drv.h> 28 + #include <drm/drm_dumb_buffers.h> 29 + #include <drm/drm_fourcc.h> 28 30 #include <drm/drm_gem.h> 29 31 #include <drm/drm_mode.h> 30 32 ··· 58 56 * attempted on some ARM embedded platforms. Such drivers really must have 59 57 * a hardware-specific ioctl to allocate suitable buffer objects. 60 58 */ 59 + 60 + static int drm_mode_align_dumb(struct drm_mode_create_dumb *args, 61 + unsigned long hw_pitch_align, 62 + unsigned long hw_size_align) 63 + { 64 + u32 pitch = args->pitch; 65 + u32 size; 66 + 67 + if (!pitch) 68 + return -EINVAL; 69 + 70 + if (hw_pitch_align) 71 + pitch = roundup(pitch, hw_pitch_align); 72 + 73 + if (!hw_size_align) 74 + hw_size_align = PAGE_SIZE; 75 + else if (!IS_ALIGNED(hw_size_align, PAGE_SIZE)) 76 + return -EINVAL; /* TODO: handle this if necessary */ 77 + 78 + if (check_mul_overflow(args->height, pitch, &size)) 79 + return -EINVAL; 80 + size = ALIGN(size, hw_size_align); 81 + if (!size) 82 + return -EINVAL; 83 + 84 + args->pitch = pitch; 85 + args->size = size; 86 + 87 + return 0; 88 + } 89 + 90 + /** 91 + * drm_mode_size_dumb - Calculates the scanline and buffer sizes for dumb buffers 92 + * @dev: DRM device 93 + * @args: Parameters for the dumb buffer 94 + * @hw_pitch_align: Hardware scanline alignment in bytes 95 + * @hw_size_align: Hardware buffer-size alignment in bytes 96 + * 97 + * The helper drm_mode_size_dumb() calculates the size of the buffer 98 + * allocation and the scanline size for a dumb buffer. Callers have to 99 + * set the buffers width, height and color mode in the argument @arg. 100 + * The helper validates the correctness of the input and tests for 101 + * possible overflows. If successful, it returns the dumb buffer's 102 + * required scanline pitch and size in &args. 103 + * 104 + * The parameter @hw_pitch_align allows the driver to specifies an 105 + * alignment for the scanline pitch, if the hardware requires any. The 106 + * calculated pitch will be a multiple of the alignment. The parameter 107 + * @hw_size_align allows to specify an alignment for buffer sizes. The 108 + * provided alignment should represent requirements of the graphics 109 + * hardware. drm_mode_size_dumb() handles GEM-related constraints 110 + * automatically across all drivers and hardware. For example, the 111 + * returned buffer size is always a multiple of PAGE_SIZE, which is 112 + * required by mmap(). 113 + * 114 + * Returns: 115 + * Zero on success, or a negative error code otherwise. 116 + */ 117 + int drm_mode_size_dumb(struct drm_device *dev, 118 + struct drm_mode_create_dumb *args, 119 + unsigned long hw_pitch_align, 120 + unsigned long hw_size_align) 121 + { 122 + u64 pitch = 0; 123 + u32 fourcc; 124 + 125 + /* 126 + * The scanline pitch depends on the buffer width and the color 127 + * format. The latter is specified as a color-mode constant for 128 + * which we first have to find the corresponding color format. 129 + * 130 + * Different color formats can have the same color-mode constant. 131 + * For example XRGB8888 and BGRX8888 both have a color mode of 32. 132 + * It is possible to use different formats for dumb-buffer allocation 133 + * and rendering as long as all involved formats share the same 134 + * color-mode constant. 135 + */ 136 + fourcc = drm_driver_color_mode_format(dev, args->bpp); 137 + if (fourcc != DRM_FORMAT_INVALID) { 138 + const struct drm_format_info *info = drm_format_info(fourcc); 139 + 140 + if (!info) 141 + return -EINVAL; 142 + pitch = drm_format_info_min_pitch(info, 0, args->width); 143 + } else if (args->bpp) { 144 + /* 145 + * Some userspace throws in arbitrary values for bpp and 146 + * relies on the kernel to figure it out. In this case we 147 + * fall back to the old method of using bpp directly. The 148 + * over-commitment of memory from the rounding is acceptable 149 + * for compatibility with legacy userspace. We have a number 150 + * of deprecated legacy values that are explicitly supported. 151 + */ 152 + switch (args->bpp) { 153 + default: 154 + drm_warn_once(dev, 155 + "Unknown color mode %u; guessing buffer size.\n", 156 + args->bpp); 157 + fallthrough; 158 + /* 159 + * These constants represent various YUV formats supported by 160 + * drm_gem_afbc_get_bpp(). 161 + */ 162 + case 12: // DRM_FORMAT_YUV420_8BIT 163 + case 15: // DRM_FORMAT_YUV420_10BIT 164 + case 30: // DRM_FORMAT_VUY101010 165 + fallthrough; 166 + /* 167 + * Used by Mesa and Gstreamer to allocate NV formats and others 168 + * as RGB buffers. Technically, XRGB16161616F formats are RGB, 169 + * but the dumb buffers are not supposed to be used for anything 170 + * beyond 32 bits per pixels. 171 + */ 172 + case 10: // DRM_FORMAT_NV{15,20,30}, DRM_FORMAT_P010 173 + case 64: // DRM_FORMAT_{XRGB,XBGR,ARGB,ABGR}16161616F 174 + pitch = args->width * DIV_ROUND_UP(args->bpp, SZ_8); 175 + break; 176 + } 177 + } 178 + 179 + if (!pitch || pitch > U32_MAX) 180 + return -EINVAL; 181 + 182 + args->pitch = pitch; 183 + 184 + return drm_mode_align_dumb(args, hw_pitch_align, hw_size_align); 185 + } 186 + EXPORT_SYMBOL(drm_mode_size_dumb); 61 187 62 188 int drm_mode_create_dumb(struct drm_device *dev, 63 189 struct drm_mode_create_dumb *args, ··· 229 99 int drm_mode_create_dumb_ioctl(struct drm_device *dev, 230 100 void *data, struct drm_file *file_priv) 231 101 { 232 - return drm_mode_create_dumb(dev, data, file_priv); 102 + struct drm_mode_create_dumb *args = data; 103 + int err; 104 + 105 + err = drm_mode_create_dumb(dev, args, file_priv); 106 + if (err) { 107 + args->handle = 0; 108 + args->pitch = 0; 109 + args->size = 0; 110 + } 111 + return err; 112 + } 113 + 114 + static int drm_mode_mmap_dumb(struct drm_device *dev, struct drm_mode_map_dumb *args, 115 + struct drm_file *file_priv) 116 + { 117 + if (!dev->driver->dumb_create) 118 + return -ENOSYS; 119 + 120 + if (dev->driver->dumb_map_offset) 121 + return dev->driver->dumb_map_offset(file_priv, dev, args->handle, 122 + &args->offset); 123 + else 124 + return drm_gem_dumb_map_offset(file_priv, dev, args->handle, 125 + &args->offset); 233 126 } 234 127 235 128 /** ··· 273 120 void *data, struct drm_file *file_priv) 274 121 { 275 122 struct drm_mode_map_dumb *args = data; 123 + int err; 276 124 277 - if (!dev->driver->dumb_create) 278 - return -ENOSYS; 279 - 280 - if (dev->driver->dumb_map_offset) 281 - return dev->driver->dumb_map_offset(file_priv, dev, 282 - args->handle, 283 - &args->offset); 284 - else 285 - return drm_gem_dumb_map_offset(file_priv, dev, args->handle, 286 - &args->offset); 125 + err = drm_mode_mmap_dumb(dev, args, file_priv); 126 + if (err) 127 + args->offset = 0; 128 + return err; 287 129 } 288 130 289 131 int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
+6 -24
drivers/gpu/drm/drm_fb_helper.c
··· 368 368 unsigned long flags; 369 369 int ret; 370 370 371 + mutex_lock(&helper->lock); 372 + drm_client_modeset_wait_for_vblank(&helper->client, 0); 373 + mutex_unlock(&helper->lock); 374 + 371 375 if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty)) 372 376 return; 373 377 ··· 1072 1068 unsigned long arg) 1073 1069 { 1074 1070 struct drm_fb_helper *fb_helper = info->par; 1075 - struct drm_device *dev = fb_helper->dev; 1076 - struct drm_crtc *crtc; 1077 1071 int ret = 0; 1078 1072 1079 - mutex_lock(&fb_helper->lock); 1080 - if (!drm_master_internal_acquire(dev)) { 1081 - ret = -EBUSY; 1082 - goto unlock; 1083 - } 1073 + guard(mutex)(&fb_helper->lock); 1084 1074 1085 1075 switch (cmd) { 1086 1076 case FBIO_WAITFORVSYNC: ··· 1094 1096 * make. If we're not smart enough here, one should 1095 1097 * just consider switch the userspace to KMS. 1096 1098 */ 1097 - crtc = fb_helper->client.modesets[0].crtc; 1098 - 1099 - /* 1100 - * Only wait for a vblank event if the CRTC is 1101 - * enabled, otherwise just don't do anythintg, 1102 - * not even report an error. 1103 - */ 1104 - ret = drm_crtc_vblank_get(crtc); 1105 - if (!ret) { 1106 - drm_crtc_wait_one_vblank(crtc); 1107 - drm_crtc_vblank_put(crtc); 1108 - } 1109 - 1110 - ret = 0; 1099 + ret = drm_client_modeset_wait_for_vblank(&fb_helper->client, 0); 1111 1100 break; 1112 1101 default: 1113 1102 ret = -ENOTTY; 1114 1103 } 1115 1104 1116 - drm_master_internal_release(dev); 1117 - unlock: 1118 - mutex_unlock(&fb_helper->lock); 1119 1105 return ret; 1120 1106 } 1121 1107 EXPORT_SYMBOL(drm_fb_helper_ioctl);
-91
drivers/gpu/drm/drm_format_helper.c
··· 1165 1165 } 1166 1166 EXPORT_SYMBOL(drm_fb_argb8888_to_argb4444); 1167 1167 1168 - /** 1169 - * drm_fb_blit - Copy parts of a framebuffer to display memory 1170 - * @dst: Array of display-memory addresses to copy to 1171 - * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines 1172 - * within @dst; can be NULL if scanlines are stored next to each other. 1173 - * @dst_format: FOURCC code of the display's color format 1174 - * @src: The framebuffer memory to copy from 1175 - * @fb: The framebuffer to copy from 1176 - * @clip: Clip rectangle area to copy 1177 - * @state: Transform and conversion state 1178 - * 1179 - * This function copies parts of a framebuffer to display memory. If the 1180 - * formats of the display and the framebuffer mismatch, the blit function 1181 - * will attempt to convert between them during the process. The parameters @dst, 1182 - * @dst_pitch and @src refer to arrays. Each array must have at least as many 1183 - * entries as there are planes in @dst_format's format. Each entry stores the 1184 - * value for the format's respective color plane at the same index. 1185 - * 1186 - * This function does not apply clipping on @dst (i.e. the destination is at the 1187 - * top-left corner). 1188 - * 1189 - * Returns: 1190 - * 0 on success, or 1191 - * -EINVAL if the color-format conversion failed, or 1192 - * a negative error code otherwise. 1193 - */ 1194 - int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, 1195 - const struct iosys_map *src, const struct drm_framebuffer *fb, 1196 - const struct drm_rect *clip, struct drm_format_conv_state *state) 1197 - { 1198 - uint32_t fb_format = fb->format->format; 1199 - 1200 - if (fb_format == dst_format) { 1201 - drm_fb_memcpy(dst, dst_pitch, src, fb, clip); 1202 - return 0; 1203 - } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) { 1204 - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); 1205 - return 0; 1206 - } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) { 1207 - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); 1208 - return 0; 1209 - } else if (fb_format == DRM_FORMAT_XRGB8888) { 1210 - if (dst_format == DRM_FORMAT_RGB565) { 1211 - drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state); 1212 - return 0; 1213 - } else if (dst_format == DRM_FORMAT_XRGB1555) { 1214 - drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state); 1215 - return 0; 1216 - } else if (dst_format == DRM_FORMAT_ARGB1555) { 1217 - drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state); 1218 - return 0; 1219 - } else if (dst_format == DRM_FORMAT_RGBA5551) { 1220 - drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state); 1221 - return 0; 1222 - } else if (dst_format == DRM_FORMAT_RGB888) { 1223 - drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state); 1224 - return 0; 1225 - } else if (dst_format == DRM_FORMAT_BGR888) { 1226 - drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state); 1227 - return 0; 1228 - } else if (dst_format == DRM_FORMAT_ARGB8888) { 1229 - drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state); 1230 - return 0; 1231 - } else if (dst_format == DRM_FORMAT_XBGR8888) { 1232 - drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state); 1233 - return 0; 1234 - } else if (dst_format == DRM_FORMAT_ABGR8888) { 1235 - drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state); 1236 - return 0; 1237 - } else if (dst_format == DRM_FORMAT_XRGB2101010) { 1238 - drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state); 1239 - return 0; 1240 - } else if (dst_format == DRM_FORMAT_ARGB2101010) { 1241 - drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state); 1242 - return 0; 1243 - } else if (dst_format == DRM_FORMAT_BGRX8888) { 1244 - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); 1245 - return 0; 1246 - } else if (dst_format == DRM_FORMAT_RGB332) { 1247 - drm_fb_xrgb8888_to_rgb332(dst, dst_pitch, src, fb, clip, state); 1248 - return 0; 1249 - } 1250 - } 1251 - 1252 - drm_warn_once(fb->dev, "No conversion helper from %p4cc to %p4cc found.\n", 1253 - &fb_format, &dst_format); 1254 - 1255 - return -EINVAL; 1256 - } 1257 - EXPORT_SYMBOL(drm_fb_blit); 1258 - 1259 1168 static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels) 1260 1169 { 1261 1170 u8 *dbuf8 = dbuf;
+8 -8
drivers/gpu/drm/drm_gem.c
··· 101 101 102 102 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 103 103 GFP_KERNEL); 104 - if (!vma_offset_manager) { 105 - DRM_ERROR("out of memory\n"); 104 + if (!vma_offset_manager) 106 105 return -ENOMEM; 107 - } 108 106 109 107 dev->vma_offset_manager = vma_offset_manager; 110 108 drm_vma_offset_manager_init(vma_offset_manager, ··· 783 785 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 784 786 int count, struct drm_gem_object ***objs_out) 785 787 { 786 - int ret; 787 - u32 *handles; 788 + struct drm_device *dev = filp->minor->dev; 788 789 struct drm_gem_object **objs; 790 + u32 *handles; 791 + int ret; 789 792 790 793 if (!count) 791 794 return 0; ··· 806 807 807 808 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { 808 809 ret = -EFAULT; 809 - DRM_DEBUG("Failed to copy in GEM handles\n"); 810 + drm_dbg_core(dev, "Failed to copy in GEM handles\n"); 810 811 goto out; 811 812 } 812 813 ··· 854 855 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 855 856 bool wait_all, unsigned long timeout) 856 857 { 857 - long ret; 858 + struct drm_device *dev = filep->minor->dev; 858 859 struct drm_gem_object *obj; 860 + long ret; 859 861 860 862 obj = drm_gem_object_lookup(filep, handle); 861 863 if (!obj) { 862 - DRM_DEBUG("Failed to look up GEM BO %d\n", handle); 864 + drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle); 863 865 return -EINVAL; 864 866 } 865 867
+6 -3
drivers/gpu/drm/drm_gem_dma_helper.c
··· 20 20 #include <drm/drm.h> 21 21 #include <drm/drm_device.h> 22 22 #include <drm/drm_drv.h> 23 + #include <drm/drm_dumb_buffers.h> 23 24 #include <drm/drm_gem_dma_helper.h> 24 25 #include <drm/drm_vma_manager.h> 25 26 ··· 305 304 struct drm_mode_create_dumb *args) 306 305 { 307 306 struct drm_gem_dma_object *dma_obj; 307 + int ret; 308 308 309 - args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 310 - args->size = args->pitch * args->height; 309 + ret = drm_mode_size_dumb(drm, args, SZ_8, 0); 310 + if (ret) 311 + return ret; 311 312 312 313 dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size, 313 314 &args->handle); ··· 585 582 586 583 ret = dma_buf_vmap_unlocked(attach->dmabuf, &map); 587 584 if (ret) { 588 - DRM_ERROR("Failed to vmap PRIME buffer\n"); 585 + drm_err(dev, "Failed to vmap PRIME buffer\n"); 589 586 return ERR_PTR(ret); 590 587 } 591 588
+84 -52
drivers/gpu/drm/drm_gem_shmem_helper.c
··· 18 18 #include <drm/drm.h> 19 19 #include <drm/drm_device.h> 20 20 #include <drm/drm_drv.h> 21 + #include <drm/drm_dumb_buffers.h> 21 22 #include <drm/drm_gem_shmem_helper.h> 22 23 #include <drm/drm_prime.h> 23 24 #include <drm/drm_print.h> ··· 49 48 .vm_ops = &drm_gem_shmem_vm_ops, 50 49 }; 51 50 51 + static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, 52 + size_t size, bool private, struct vfsmount *gemfs) 53 + { 54 + struct drm_gem_object *obj = &shmem->base; 55 + int ret = 0; 56 + 57 + if (!obj->funcs) 58 + obj->funcs = &drm_gem_shmem_funcs; 59 + 60 + if (private) { 61 + drm_gem_private_object_init(dev, obj, size); 62 + shmem->map_wc = false; /* dma-buf mappings use always writecombine */ 63 + } else { 64 + ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs); 65 + } 66 + if (ret) { 67 + drm_gem_private_object_fini(obj); 68 + return ret; 69 + } 70 + 71 + ret = drm_gem_create_mmap_offset(obj); 72 + if (ret) 73 + goto err_release; 74 + 75 + INIT_LIST_HEAD(&shmem->madv_list); 76 + 77 + if (!private) { 78 + /* 79 + * Our buffers are kept pinned, so allocating them 80 + * from the MOVABLE zone is a really bad idea, and 81 + * conflicts with CMA. See comments above new_inode() 82 + * why this is required _and_ expected if you're 83 + * going to pin these pages. 84 + */ 85 + mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | 86 + __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 87 + } 88 + 89 + return 0; 90 + err_release: 91 + drm_gem_object_release(obj); 92 + return ret; 93 + } 94 + 95 + /** 96 + * drm_gem_shmem_init - Initialize an allocated object. 97 + * @dev: DRM device 98 + * @obj: The allocated shmem GEM object. 99 + * 100 + * Returns: 101 + * 0 on success, or a negative error code on failure. 102 + */ 103 + int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size) 104 + { 105 + return __drm_gem_shmem_init(dev, shmem, size, false, NULL); 106 + } 107 + EXPORT_SYMBOL_GPL(drm_gem_shmem_init); 108 + 52 109 static struct drm_gem_shmem_object * 53 110 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, 54 111 struct vfsmount *gemfs) ··· 129 70 obj = &shmem->base; 130 71 } 131 72 132 - if (!obj->funcs) 133 - obj->funcs = &drm_gem_shmem_funcs; 134 - 135 - if (private) { 136 - drm_gem_private_object_init(dev, obj, size); 137 - shmem->map_wc = false; /* dma-buf mappings use always writecombine */ 138 - } else { 139 - ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs); 140 - } 73 + ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs); 141 74 if (ret) { 142 - drm_gem_private_object_fini(obj); 143 - goto err_free; 144 - } 145 - 146 - ret = drm_gem_create_mmap_offset(obj); 147 - if (ret) 148 - goto err_release; 149 - 150 - INIT_LIST_HEAD(&shmem->madv_list); 151 - 152 - if (!private) { 153 - /* 154 - * Our buffers are kept pinned, so allocating them 155 - * from the MOVABLE zone is a really bad idea, and 156 - * conflicts with CMA. See comments above new_inode() 157 - * why this is required _and_ expected if you're 158 - * going to pin these pages. 159 - */ 160 - mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | 161 - __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 75 + kfree(obj); 76 + return ERR_PTR(ret); 162 77 } 163 78 164 79 return shmem; 165 - 166 - err_release: 167 - drm_gem_object_release(obj); 168 - err_free: 169 - kfree(obj); 170 - 171 - return ERR_PTR(ret); 172 80 } 173 81 /** 174 82 * drm_gem_shmem_create - Allocate an object with the given size ··· 176 150 EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt); 177 151 178 152 /** 179 - * drm_gem_shmem_free - Free resources associated with a shmem GEM object 180 - * @shmem: shmem GEM object to free 153 + * drm_gem_shmem_release - Release resources associated with a shmem GEM object. 154 + * @shmem: shmem GEM object 181 155 * 182 - * This function cleans up the GEM object state and frees the memory used to 183 - * store the object itself. 156 + * This function cleans up the GEM object state, but does not free the memory used to store the 157 + * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings. 184 158 */ 185 - void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) 159 + void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem) 186 160 { 187 161 struct drm_gem_object *obj = &shmem->base; 188 162 ··· 209 183 } 210 184 211 185 drm_gem_object_release(obj); 186 + } 187 + EXPORT_SYMBOL_GPL(drm_gem_shmem_release); 188 + 189 + /** 190 + * drm_gem_shmem_free - Free resources associated with a shmem GEM object 191 + * @shmem: shmem GEM object to free 192 + * 193 + * This function cleans up the GEM object state and frees the memory used to 194 + * store the object itself. 195 + */ 196 + void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) 197 + { 198 + drm_gem_shmem_release(shmem); 212 199 kfree(shmem); 213 200 } 214 201 EXPORT_SYMBOL_GPL(drm_gem_shmem_free); ··· 557 518 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, 558 519 struct drm_mode_create_dumb *args) 559 520 { 560 - u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 521 + int ret; 561 522 562 - if (!args->pitch || !args->size) { 563 - args->pitch = min_pitch; 564 - args->size = PAGE_ALIGN(args->pitch * args->height); 565 - } else { 566 - /* ensure sane minimum values */ 567 - if (args->pitch < min_pitch) 568 - args->pitch = min_pitch; 569 - if (args->size < args->pitch * args->height) 570 - args->size = PAGE_ALIGN(args->pitch * args->height); 571 - } 523 + ret = drm_mode_size_dumb(dev, args, SZ_8, 0); 524 + if (ret) 525 + return ret; 572 526 573 527 return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); 574 528 }
+3 -3
drivers/gpu/drm/drm_gem_vram_helper.c
··· 107 107 108 108 static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) 109 109 { 110 - /* We got here via ttm_bo_put(), which means that the 110 + /* We got here via ttm_bo_fini(), which means that the 111 111 * TTM buffer object in 'bo' has already been cleaned 112 112 * up; only release the GEM object. 113 113 */ ··· 234 234 * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object 235 235 * @gbo: the GEM VRAM object 236 236 * 237 - * See ttm_bo_put() for more information. 237 + * See ttm_bo_fini() for more information. 238 238 */ 239 239 void drm_gem_vram_put(struct drm_gem_vram_object *gbo) 240 240 { 241 - ttm_bo_put(&gbo->bo); 241 + ttm_bo_fini(&gbo->bo); 242 242 } 243 243 EXPORT_SYMBOL(drm_gem_vram_put); 244 244
+170 -2
drivers/gpu/drm/drm_vblank.c
··· 136 136 * vblanks after a timer has expired, which can be configured through the 137 137 * ``vblankoffdelay`` module parameter. 138 138 * 139 - * Drivers for hardware without support for vertical-blanking interrupts 140 - * must not call drm_vblank_init(). For such drivers, atomic helpers will 139 + * Drivers for hardware without support for vertical-blanking interrupts can 140 + * use DRM vblank timers to send vblank events at the rate of the current 141 + * display mode's refresh. While not synchronized to the hardware's 142 + * vertical-blanking regions, the timer helps DRM clients and compositors to 143 + * adapt their update cycle to the display output. Drivers should set up 144 + * vblanking as usual, but call drm_crtc_vblank_start_timer() and 145 + * drm_crtc_vblank_cancel_timer() as part of their atomic mode setting. 146 + * See also DRM vblank helpers for more information. 147 + * 148 + * Drivers without support for vertical-blanking interrupts nor timers must 149 + * not call drm_vblank_init(). For these drivers, atomic helpers will 141 150 * automatically generate fake vblank events as part of the display update. 142 151 * This functionality also can be controlled by the driver by enabling and 143 152 * disabling struct drm_crtc_state.no_vblank. ··· 516 507 517 508 drm_WARN_ON(dev, READ_ONCE(vblank->enabled) && 518 509 drm_core_check_feature(dev, DRIVER_MODESET)); 510 + 511 + if (vblank->vblank_timer.crtc) 512 + hrtimer_cancel(&vblank->vblank_timer.timer); 519 513 520 514 drm_vblank_destroy_worker(vblank); 521 515 timer_delete_sync(&vblank->disable_timer); ··· 2174 2162 return ret; 2175 2163 } 2176 2164 2165 + /* 2166 + * VBLANK timer 2167 + */ 2168 + 2169 + static enum hrtimer_restart drm_vblank_timer_function(struct hrtimer *timer) 2170 + { 2171 + struct drm_vblank_crtc_timer *vtimer = 2172 + container_of(timer, struct drm_vblank_crtc_timer, timer); 2173 + struct drm_crtc *crtc = vtimer->crtc; 2174 + const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 2175 + struct drm_device *dev = crtc->dev; 2176 + unsigned long flags; 2177 + ktime_t interval; 2178 + u64 ret_overrun; 2179 + bool succ; 2180 + 2181 + spin_lock_irqsave(&vtimer->interval_lock, flags); 2182 + interval = vtimer->interval; 2183 + spin_unlock_irqrestore(&vtimer->interval_lock, flags); 2184 + 2185 + if (!interval) 2186 + return HRTIMER_NORESTART; 2187 + 2188 + ret_overrun = hrtimer_forward_now(&vtimer->timer, interval); 2189 + if (ret_overrun != 1) 2190 + drm_dbg_vbl(dev, "vblank timer overrun\n"); 2191 + 2192 + if (crtc_funcs->handle_vblank_timeout) 2193 + succ = crtc_funcs->handle_vblank_timeout(crtc); 2194 + else 2195 + succ = drm_crtc_handle_vblank(crtc); 2196 + if (!succ) 2197 + return HRTIMER_NORESTART; 2198 + 2199 + return HRTIMER_RESTART; 2200 + } 2201 + 2202 + /** 2203 + * drm_crtc_vblank_start_timer - Starts the vblank timer on the given CRTC 2204 + * @crtc: the CRTC 2205 + * 2206 + * Drivers should call this function from their CRTC's enable_vblank 2207 + * function to start a vblank timer. The timer will fire after the duration 2208 + * of a full frame. drm_crtc_vblank_cancel_timer() disables a running timer. 2209 + * 2210 + * Returns: 2211 + * 0 on success, or a negative errno code otherwise. 2212 + */ 2213 + int drm_crtc_vblank_start_timer(struct drm_crtc *crtc) 2214 + { 2215 + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 2216 + struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer; 2217 + unsigned long flags; 2218 + 2219 + if (!vtimer->crtc) { 2220 + /* 2221 + * Set up the data structures on the first invocation. 2222 + */ 2223 + vtimer->crtc = crtc; 2224 + spin_lock_init(&vtimer->interval_lock); 2225 + hrtimer_setup(&vtimer->timer, drm_vblank_timer_function, 2226 + CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2227 + } else { 2228 + /* 2229 + * Timer should not be active. If it is, wait for the 2230 + * previous cancel operations to finish. 2231 + */ 2232 + while (hrtimer_active(&vtimer->timer)) 2233 + hrtimer_try_to_cancel(&vtimer->timer); 2234 + } 2235 + 2236 + drm_calc_timestamping_constants(crtc, &crtc->mode); 2237 + 2238 + spin_lock_irqsave(&vtimer->interval_lock, flags); 2239 + vtimer->interval = ns_to_ktime(vblank->framedur_ns); 2240 + spin_unlock_irqrestore(&vtimer->interval_lock, flags); 2241 + 2242 + hrtimer_start(&vtimer->timer, vtimer->interval, HRTIMER_MODE_REL); 2243 + 2244 + return 0; 2245 + } 2246 + EXPORT_SYMBOL(drm_crtc_vblank_start_timer); 2247 + 2248 + /** 2249 + * drm_crtc_vblank_start_timer - Cancels the given CRTC's vblank timer 2250 + * @crtc: the CRTC 2251 + * 2252 + * Drivers should call this function from their CRTC's disable_vblank 2253 + * function to stop a vblank timer. 2254 + */ 2255 + void drm_crtc_vblank_cancel_timer(struct drm_crtc *crtc) 2256 + { 2257 + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 2258 + struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer; 2259 + unsigned long flags; 2260 + 2261 + /* 2262 + * Calling hrtimer_cancel() can result in a deadlock with DRM's 2263 + * vblank_time_lime_lock and hrtimers' softirq_expiry_lock. So 2264 + * clear interval and indicate cancellation. The timer function 2265 + * will cancel itself on the next invocation. 2266 + */ 2267 + 2268 + spin_lock_irqsave(&vtimer->interval_lock, flags); 2269 + vtimer->interval = 0; 2270 + spin_unlock_irqrestore(&vtimer->interval_lock, flags); 2271 + 2272 + hrtimer_try_to_cancel(&vtimer->timer); 2273 + } 2274 + EXPORT_SYMBOL(drm_crtc_vblank_cancel_timer); 2275 + 2276 + /** 2277 + * drm_crtc_vblank_get_vblank_timeout - Returns the vblank timeout 2278 + * @crtc: The CRTC 2279 + * @vblank_time: Returns the next vblank timestamp 2280 + * 2281 + * The helper drm_crtc_vblank_get_vblank_timeout() returns the next vblank 2282 + * timestamp of the CRTC's vblank timer according to the timer's expiry 2283 + * time. 2284 + */ 2285 + void drm_crtc_vblank_get_vblank_timeout(struct drm_crtc *crtc, ktime_t *vblank_time) 2286 + { 2287 + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 2288 + struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer; 2289 + u64 cur_count; 2290 + ktime_t cur_time; 2291 + 2292 + if (!READ_ONCE(vblank->enabled)) { 2293 + *vblank_time = ktime_get(); 2294 + return; 2295 + } 2296 + 2297 + /* 2298 + * A concurrent vblank timeout could update the expires field before 2299 + * we compare it with the vblank time. Hence we'd compare the old 2300 + * expiry time to the new vblank time; deducing the timer had already 2301 + * expired. Reread until we get consistent values from both fields. 2302 + */ 2303 + do { 2304 + cur_count = drm_crtc_vblank_count_and_time(crtc, &cur_time); 2305 + *vblank_time = READ_ONCE(vtimer->timer.node.expires); 2306 + } while (cur_count != drm_crtc_vblank_count_and_time(crtc, &cur_time)); 2307 + 2308 + if (drm_WARN_ON(crtc->dev, !ktime_compare(*vblank_time, cur_time))) 2309 + return; /* Already expired */ 2310 + 2311 + /* 2312 + * To prevent races we roll the hrtimer forward before we do any 2313 + * interrupt processing - this is how real hw works (the interrupt 2314 + * is only generated after all the vblank registers are updated) 2315 + * and what the vblank core expects. Therefore we need to always 2316 + * correct the timestamp by one frame. 2317 + */ 2318 + *vblank_time = ktime_sub(*vblank_time, vtimer->interval); 2319 + } 2320 + EXPORT_SYMBOL(drm_crtc_vblank_get_vblank_timeout);
+176
drivers/gpu/drm/drm_vblank_helper.c
··· 1 + // SPDX-License-Identifier: MIT 2 + 3 + #include <drm/drm_atomic.h> 4 + #include <drm/drm_crtc.h> 5 + #include <drm/drm_managed.h> 6 + #include <drm/drm_modeset_helper_vtables.h> 7 + #include <drm/drm_print.h> 8 + #include <drm/drm_vblank.h> 9 + #include <drm/drm_vblank_helper.h> 10 + 11 + /** 12 + * DOC: overview 13 + * 14 + * The vblank helper library provides functions for supporting vertical 15 + * blanking in DRM drivers. 16 + * 17 + * For vblank timers, several callback implementations are available. 18 + * Drivers enable support for vblank timers by setting the vblank callbacks 19 + * in struct &drm_crtc_funcs to the helpers provided by this library. The 20 + * initializer macro DRM_CRTC_VBLANK_TIMER_FUNCS does this conveniently. 21 + * The driver further has to send the VBLANK event from its atomic_flush 22 + * callback and control vblank from the CRTC's atomic_enable and atomic_disable 23 + * callbacks. The callbacks are located in struct &drm_crtc_helper_funcs. 24 + * The vblank helper library provides implementations of these callbacks 25 + * for drivers without further requirements. The initializer macro 26 + * DRM_CRTC_HELPER_VBLANK_FUNCS sets them coveniently. 27 + * 28 + * Once the driver enables vblank support with drm_vblank_init(), each 29 + * CRTC's vblank timer fires according to the programmed display mode. By 30 + * default, the vblank timer invokes drm_crtc_handle_vblank(). Drivers with 31 + * more specific requirements can set their own handler function in 32 + * struct &drm_crtc_helper_funcs.handle_vblank_timeout. 33 + */ 34 + 35 + /* 36 + * VBLANK helpers 37 + */ 38 + 39 + /** 40 + * drm_crtc_vblank_atomic_flush - 41 + * Implements struct &drm_crtc_helper_funcs.atomic_flush 42 + * @crtc: The CRTC 43 + * @state: The atomic state to apply 44 + * 45 + * The helper drm_crtc_vblank_atomic_flush() implements atomic_flush of 46 + * struct drm_crtc_helper_funcs for CRTCs that only need to send out a 47 + * VBLANK event. 48 + * 49 + * See also struct &drm_crtc_helper_funcs.atomic_flush. 50 + */ 51 + void drm_crtc_vblank_atomic_flush(struct drm_crtc *crtc, 52 + struct drm_atomic_state *state) 53 + { 54 + struct drm_device *dev = crtc->dev; 55 + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 56 + struct drm_pending_vblank_event *event; 57 + 58 + spin_lock_irq(&dev->event_lock); 59 + 60 + event = crtc_state->event; 61 + crtc_state->event = NULL; 62 + 63 + if (event) { 64 + if (drm_crtc_vblank_get(crtc) == 0) 65 + drm_crtc_arm_vblank_event(crtc, event); 66 + else 67 + drm_crtc_send_vblank_event(crtc, event); 68 + } 69 + 70 + spin_unlock_irq(&dev->event_lock); 71 + } 72 + EXPORT_SYMBOL(drm_crtc_vblank_atomic_flush); 73 + 74 + /** 75 + * drm_crtc_vblank_atomic_enable - Implements struct &drm_crtc_helper_funcs.atomic_enable 76 + * @crtc: The CRTC 77 + * @state: The atomic state 78 + * 79 + * The helper drm_crtc_vblank_atomic_enable() implements atomic_enable 80 + * of struct drm_crtc_helper_funcs for CRTCs the only need to enable VBLANKs. 81 + * 82 + * See also struct &drm_crtc_helper_funcs.atomic_enable. 83 + */ 84 + void drm_crtc_vblank_atomic_enable(struct drm_crtc *crtc, 85 + struct drm_atomic_state *state) 86 + { 87 + drm_crtc_vblank_on(crtc); 88 + } 89 + EXPORT_SYMBOL(drm_crtc_vblank_atomic_enable); 90 + 91 + /** 92 + * drm_crtc_vblank_atomic_disable - Implements struct &drm_crtc_helper_funcs.atomic_disable 93 + * @crtc: The CRTC 94 + * @state: The atomic state 95 + * 96 + * The helper drm_crtc_vblank_atomic_disable() implements atomic_disable 97 + * of struct drm_crtc_helper_funcs for CRTCs the only need to disable VBLANKs. 98 + * 99 + * See also struct &drm_crtc_funcs.atomic_disable. 100 + */ 101 + void drm_crtc_vblank_atomic_disable(struct drm_crtc *crtc, 102 + struct drm_atomic_state *state) 103 + { 104 + drm_crtc_vblank_off(crtc); 105 + } 106 + EXPORT_SYMBOL(drm_crtc_vblank_atomic_disable); 107 + 108 + /* 109 + * VBLANK timer 110 + */ 111 + 112 + /** 113 + * drm_crtc_vblank_helper_enable_vblank_timer - Implements struct &drm_crtc_funcs.enable_vblank 114 + * @crtc: The CRTC 115 + * 116 + * The helper drm_crtc_vblank_helper_enable_vblank_timer() implements 117 + * enable_vblank of struct drm_crtc_helper_funcs for CRTCs that require 118 + * a VBLANK timer. It sets up the timer on the first invocation. The 119 + * started timer expires after the current frame duration. See struct 120 + * &drm_vblank_crtc.framedur_ns. 121 + * 122 + * See also struct &drm_crtc_helper_funcs.enable_vblank. 123 + * 124 + * Returns: 125 + * 0 on success, or a negative errno code otherwise. 126 + */ 127 + int drm_crtc_vblank_helper_enable_vblank_timer(struct drm_crtc *crtc) 128 + { 129 + return drm_crtc_vblank_start_timer(crtc); 130 + } 131 + EXPORT_SYMBOL(drm_crtc_vblank_helper_enable_vblank_timer); 132 + 133 + /** 134 + * drm_crtc_vblank_helper_disable_vblank_timer - Implements struct &drm_crtc_funcs.disable_vblank 135 + * @crtc: The CRTC 136 + * 137 + * The helper drm_crtc_vblank_helper_disable_vblank_timer() implements 138 + * disable_vblank of struct drm_crtc_funcs for CRTCs that require a 139 + * VBLANK timer. 140 + * 141 + * See also struct &drm_crtc_helper_funcs.disable_vblank. 142 + */ 143 + void drm_crtc_vblank_helper_disable_vblank_timer(struct drm_crtc *crtc) 144 + { 145 + drm_crtc_vblank_cancel_timer(crtc); 146 + } 147 + EXPORT_SYMBOL(drm_crtc_vblank_helper_disable_vblank_timer); 148 + 149 + /** 150 + * drm_crtc_vblank_helper_get_vblank_timestamp_from_timer - 151 + * Implements struct &drm_crtc_funcs.get_vblank_timestamp 152 + * @crtc: The CRTC 153 + * @max_error: Maximum acceptable error 154 + * @vblank_time: Returns the next vblank timestamp 155 + * @in_vblank_irq: True is called from drm_crtc_handle_vblank() 156 + * 157 + * The helper drm_crtc_helper_get_vblank_timestamp_from_timer() implements 158 + * get_vblank_timestamp of struct drm_crtc_funcs for CRTCs that require a 159 + * VBLANK timer. It returns the timestamp according to the timer's expiry 160 + * time. 161 + * 162 + * See also struct &drm_crtc_funcs.get_vblank_timestamp. 163 + * 164 + * Returns: 165 + * True on success, or false otherwise. 166 + */ 167 + bool drm_crtc_vblank_helper_get_vblank_timestamp_from_timer(struct drm_crtc *crtc, 168 + int *max_error, 169 + ktime_t *vblank_time, 170 + bool in_vblank_irq) 171 + { 172 + drm_crtc_vblank_get_vblank_timeout(crtc, vblank_time); 173 + 174 + return true; 175 + } 176 + EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_from_timer);
+5 -3
drivers/gpu/drm/exynos/exynos_drm_gem.c
··· 10 10 #include <linux/shmem_fs.h> 11 11 #include <linux/module.h> 12 12 13 + #include <drm/drm_dumb_buffers.h> 13 14 #include <drm/drm_prime.h> 14 15 #include <drm/drm_vma_manager.h> 15 16 #include <drm/exynos_drm.h> ··· 330 329 unsigned int flags; 331 330 int ret; 332 331 332 + ret = drm_mode_size_dumb(dev, args, 0, 0); 333 + if (ret) 334 + return ret; 335 + 333 336 /* 334 337 * allocate memory to be used for framebuffer. 335 338 * - this callback would be called by user application 336 339 * with DRM_IOCTL_MODE_CREATE_DUMB command. 337 340 */ 338 - 339 - args->pitch = args->width * ((args->bpp + 7) / 8); 340 - args->size = args->pitch * args->height; 341 341 342 342 if (is_drm_iommu_supported(dev)) 343 343 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
-43
drivers/gpu/drm/gma500/fbdev.c
··· 50 50 * struct fb_ops 51 51 */ 52 52 53 - #define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16) 54 - 55 - static int psb_fbdev_fb_setcolreg(unsigned int regno, 56 - unsigned int red, unsigned int green, 57 - unsigned int blue, unsigned int transp, 58 - struct fb_info *info) 59 - { 60 - struct drm_fb_helper *fb_helper = info->par; 61 - struct drm_framebuffer *fb = fb_helper->fb; 62 - uint32_t v; 63 - 64 - if (!fb) 65 - return -ENOMEM; 66 - 67 - if (regno > 255) 68 - return 1; 69 - 70 - red = CMAP_TOHW(red, info->var.red.length); 71 - blue = CMAP_TOHW(blue, info->var.blue.length); 72 - green = CMAP_TOHW(green, info->var.green.length); 73 - transp = CMAP_TOHW(transp, info->var.transp.length); 74 - 75 - v = (red << info->var.red.offset) | 76 - (green << info->var.green.offset) | 77 - (blue << info->var.blue.offset) | 78 - (transp << info->var.transp.offset); 79 - 80 - if (regno < 16) { 81 - switch (fb->format->cpp[0] * 8) { 82 - case 16: 83 - ((uint32_t *) info->pseudo_palette)[regno] = v; 84 - break; 85 - case 24: 86 - case 32: 87 - ((uint32_t *) info->pseudo_palette)[regno] = v; 88 - break; 89 - } 90 - } 91 - 92 - return 0; 93 - } 94 - 95 53 static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) 96 54 { 97 55 if (vma->vm_pgoff != 0) ··· 93 135 .owner = THIS_MODULE, 94 136 __FB_DEFAULT_IOMEM_OPS_RDWR, 95 137 DRM_FB_HELPER_DEFAULT_OPS, 96 - .fb_setcolreg = psb_fbdev_fb_setcolreg, 97 138 __FB_DEFAULT_IOMEM_OPS_DRAW, 98 139 .fb_mmap = psb_fbdev_fb_mmap, 99 140 .fb_destroy = psb_fbdev_fb_destroy,
+4 -4
drivers/gpu/drm/gud/gud_connector.c
··· 561 561 continue; /* not a DRM property */ 562 562 563 563 property = gud_connector_property_lookup(connector, prop); 564 - if (WARN_ON(IS_ERR(property))) 564 + if (drm_WARN_ON(drm, IS_ERR(property))) 565 565 continue; 566 566 567 567 state_val = gud_connector_tv_state_val(prop, &gconn->initial_tv_state); 568 - if (WARN_ON(IS_ERR(state_val))) 568 + if (drm_WARN_ON(drm, IS_ERR(state_val))) 569 569 continue; 570 570 571 571 *state_val = val; ··· 593 593 unsigned int *state_val; 594 594 595 595 state_val = gud_connector_tv_state_val(prop, &connector_state->tv); 596 - if (WARN_ON_ONCE(IS_ERR(state_val))) 596 + if (drm_WARN_ON_ONCE(connector_state->connector->dev, IS_ERR(state_val))) 597 597 return PTR_ERR(state_val); 598 598 599 599 val = *state_val; ··· 667 667 return ret; 668 668 } 669 669 670 - if (WARN_ON(connector->index != index)) 670 + if (drm_WARN_ON(drm, connector->index != index)) 671 671 return -EINVAL; 672 672 673 673 if (flags & GUD_CONNECTOR_FLAGS_POLL_STATUS)
+5 -5
drivers/gpu/drm/gud/gud_pipe.c
··· 61 61 size_t len; 62 62 void *buf; 63 63 64 - WARN_ON_ONCE(format->char_per_block[0] != 1); 64 + drm_WARN_ON_ONCE(fb->dev, format->char_per_block[0] != 1); 65 65 66 66 /* Start on a byte boundary */ 67 67 rect->x1 = ALIGN_DOWN(rect->x1, block_width); ··· 138 138 pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7); 139 139 break; 140 140 default: 141 - WARN_ON_ONCE(1); 141 + drm_WARN_ON_ONCE(fb->dev, 1); 142 142 return len; 143 143 } 144 144 ··· 527 527 drm_connector_list_iter_end(&conn_iter); 528 528 } 529 529 530 - if (WARN_ON_ONCE(!connector_state)) 530 + if (drm_WARN_ON_ONCE(plane->dev, !connector_state)) 531 531 return -ENOENT; 532 532 533 533 len = struct_size(req, properties, ··· 539 539 gud_from_display_mode(&req->mode, mode); 540 540 541 541 req->format = gud_from_fourcc(format->format); 542 - if (WARN_ON_ONCE(!req->format)) { 542 + if (drm_WARN_ON_ONCE(plane->dev, !req->format)) { 543 543 ret = -EINVAL; 544 544 goto out; 545 545 } ··· 561 561 val = new_plane_state->rotation; 562 562 break; 563 563 default: 564 - WARN_ON_ONCE(1); 564 + drm_WARN_ON_ONCE(plane->dev, 1); 565 565 ret = -EINVAL; 566 566 goto out; 567 567 }
+11
drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
··· 19 19 #include <drm/drm_probe_helper.h> 20 20 #include <drm/drm_panic.h> 21 21 #include <drm/drm_plane.h> 22 + #include <drm/drm_vblank.h> 23 + #include <drm/drm_vblank_helper.h> 22 24 23 25 #include "hyperv_drm.h" 24 26 ··· 113 111 crtc_state->mode.hdisplay, 114 112 crtc_state->mode.vdisplay, 115 113 plane_state->fb->pitches[0]); 114 + 115 + drm_crtc_vblank_on(crtc); 116 116 } 117 117 118 118 static const struct drm_crtc_helper_funcs hyperv_crtc_helper_funcs = { 119 119 .atomic_check = drm_crtc_helper_atomic_check, 120 + .atomic_flush = drm_crtc_vblank_atomic_flush, 120 121 .atomic_enable = hyperv_crtc_helper_atomic_enable, 122 + .atomic_disable = drm_crtc_vblank_atomic_disable, 121 123 }; 122 124 123 125 static const struct drm_crtc_funcs hyperv_crtc_funcs = { ··· 131 125 .page_flip = drm_atomic_helper_page_flip, 132 126 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 133 127 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 128 + DRM_CRTC_VBLANK_TIMER_FUNCS, 134 129 }; 135 130 136 131 static int hyperv_plane_atomic_check(struct drm_plane *plane, ··· 327 320 drm_err(dev, "Failed to initialized pipe.\n"); 328 321 return ret; 329 322 } 323 + 324 + ret = drm_vblank_init(dev, 1); 325 + if (ret) 326 + return ret; 330 327 331 328 drm_mode_config_reset(dev); 332 329
+2 -2
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
··· 1029 1029 { 1030 1030 GEM_BUG_ON(!obj->ttm.created); 1031 1031 1032 - ttm_bo_put(i915_gem_to_ttm(obj)); 1032 + ttm_bo_fini(i915_gem_to_ttm(obj)); 1033 1033 } 1034 1034 1035 1035 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) ··· 1325 1325 * If this function fails, it will call the destructor, but 1326 1326 * our caller still owns the object. So no freeing in the 1327 1327 * destructor until obj->ttm.created is true. 1328 - * Similarly, in delayed_destroy, we can't call ttm_bo_put() 1328 + * Similarly, in delayed_destroy, we can't call ttm_bo_fini() 1329 1329 * until successful initialization. 1330 1330 */ 1331 1331 ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
+23 -6
drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
··· 17 17 #include <drm/drm_atomic.h> 18 18 #include <drm/drm_atomic_helper.h> 19 19 #include <drm/drm_drv.h> 20 + #include <drm/drm_dumb_buffers.h> 20 21 #include <drm/drm_fbdev_dma.h> 22 + #include <drm/drm_fourcc.h> 21 23 #include <drm/drm_gem_dma_helper.h> 22 24 #include <drm/drm_gem_framebuffer_helper.h> 23 25 #include <drm/drm_managed.h> ··· 143 141 struct drm_device *drm, 144 142 struct drm_mode_create_dumb *args) 145 143 { 146 - u32 width = args->width; 144 + u32 fourcc; 145 + const struct drm_format_info *info; 146 + u64 pitch_align; 147 147 int ret; 148 148 149 - args->width = ALIGN(width, 8); 150 - 151 - ret = drm_gem_dma_dumb_create(file_priv, drm, args); 149 + /* 150 + * Hardware requires the framebuffer width to be aligned to 151 + * multiples of 8. The mode-setting code handles this, but 152 + * the buffer pitch has to be aligned as well. Set the pitch 153 + * alignment accordingly, so that the each scanline fits into 154 + * the allocated buffer. 155 + */ 156 + fourcc = drm_driver_color_mode_format(drm, args->bpp); 157 + if (fourcc == DRM_FORMAT_INVALID) 158 + return -EINVAL; 159 + info = drm_format_info(fourcc); 160 + if (!info) 161 + return -EINVAL; 162 + pitch_align = drm_format_info_min_pitch(info, 0, SZ_8); 163 + if (!pitch_align || pitch_align > U32_MAX) 164 + return -EINVAL; 165 + ret = drm_mode_size_dumb(drm, args, pitch_align, 0); 152 166 if (ret) 153 167 return ret; 154 168 155 - args->width = width; 156 - return ret; 169 + return drm_gem_dma_dumb_create(file_priv, drm, args); 157 170 } 158 171 159 172 static const struct drm_driver imx_drm_driver = {
+10 -7
drivers/gpu/drm/imx/ipuv3/imx-tve.c
··· 368 368 return 0; 369 369 } 370 370 371 - static long clk_tve_di_round_rate(struct clk_hw *hw, unsigned long rate, 372 - unsigned long *prate) 371 + static int clk_tve_di_determine_rate(struct clk_hw *hw, 372 + struct clk_rate_request *req) 373 373 { 374 374 unsigned long div; 375 375 376 - div = *prate / rate; 376 + div = req->best_parent_rate / req->rate; 377 377 if (div >= 4) 378 - return *prate / 4; 378 + req->rate = req->best_parent_rate / 4; 379 379 else if (div >= 2) 380 - return *prate / 2; 381 - return *prate; 380 + req->rate = req->best_parent_rate / 2; 381 + else 382 + req->rate = req->best_parent_rate; 383 + 384 + return 0; 382 385 } 383 386 384 387 static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate, ··· 412 409 } 413 410 414 411 static const struct clk_ops clk_tve_di_ops = { 415 - .round_rate = clk_tve_di_round_rate, 412 + .determine_rate = clk_tve_di_determine_rate, 416 413 .set_rate = clk_tve_di_set_rate, 417 414 .recalc_rate = clk_tve_di_recalc_rate, 418 415 };
+2 -2
drivers/gpu/drm/imx/ipuv3/parallel-display.c
··· 134 134 struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); 135 135 struct drm_display_info *di = &conn_state->connector->display_info; 136 136 struct drm_bridge_state *next_bridge_state = NULL; 137 - struct drm_bridge *next_bridge; 138 137 u32 bus_flags, bus_fmt; 139 138 140 - next_bridge = drm_bridge_get_next_bridge(bridge); 139 + struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge); 140 + 141 141 if (next_bridge) 142 142 next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, 143 143 next_bridge);
+9 -22
drivers/gpu/drm/loongson/lsdc_gem.c
··· 6 6 #include <linux/dma-buf.h> 7 7 8 8 #include <drm/drm_debugfs.h> 9 + #include <drm/drm_dumb_buffers.h> 9 10 #include <drm/drm_file.h> 10 11 #include <drm/drm_gem.h> 11 12 #include <drm/drm_prime.h> ··· 58 57 struct ttm_buffer_object *tbo = to_ttm_bo(obj); 59 58 60 59 if (tbo) 61 - ttm_bo_put(tbo); 60 + ttm_bo_fini(tbo); 62 61 } 63 62 64 63 static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map) ··· 205 204 const struct lsdc_desc *descp = ldev->descp; 206 205 u32 domain = LSDC_GEM_DOMAIN_VRAM; 207 206 struct drm_gem_object *gobj; 208 - size_t size; 209 - u32 pitch; 210 - u32 handle; 211 207 int ret; 212 208 213 - if (!args->width || !args->height) 214 - return -EINVAL; 215 - 216 - if (args->bpp != 32 && args->bpp != 16) 217 - return -EINVAL; 218 - 219 - pitch = args->width * args->bpp / 8; 220 - pitch = ALIGN(pitch, descp->pitch_align); 221 - size = pitch * args->height; 222 - size = ALIGN(size, PAGE_SIZE); 209 + ret = drm_mode_size_dumb(ddev, args, descp->pitch_align, 0); 210 + if (ret) 211 + return ret; 223 212 224 213 /* Maximum single bo size allowed is the half vram size available */ 225 - if (size > ldev->vram_size / 2) { 226 - drm_err(ddev, "Requesting(%zuMiB) failed\n", size >> 20); 214 + if (args->size > ldev->vram_size / 2) { 215 + drm_err(ddev, "Requesting(%zuMiB) failed\n", (size_t)(args->size >> PAGE_SHIFT)); 227 216 return -ENOMEM; 228 217 } 229 218 230 - gobj = lsdc_gem_object_create(ddev, domain, size, false, NULL, NULL); 219 + gobj = lsdc_gem_object_create(ddev, domain, args->size, false, NULL, NULL); 231 220 if (IS_ERR(gobj)) { 232 221 drm_err(ddev, "Failed to create gem object\n"); 233 222 return PTR_ERR(gobj); 234 223 } 235 224 236 - ret = drm_gem_handle_create(file, gobj, &handle); 225 + ret = drm_gem_handle_create(file, gobj, &args->handle); 237 226 238 227 /* drop reference from allocate, handle holds it now */ 239 228 drm_gem_object_put(gobj); 240 229 if (ret) 241 230 return ret; 242 - 243 - args->pitch = pitch; 244 - args->size = size; 245 - args->handle = handle; 246 231 247 232 return 0; 248 233 }
+8 -5
drivers/gpu/drm/mcde/mcde_clk_div.c
··· 71 71 return best_div; 72 72 } 73 73 74 - static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate, 75 - unsigned long *prate) 74 + static int mcde_clk_div_determine_rate(struct clk_hw *hw, 75 + struct clk_rate_request *req) 76 76 { 77 - int div = mcde_clk_div_choose_div(hw, rate, prate, true); 77 + int div = mcde_clk_div_choose_div(hw, req->rate, 78 + &req->best_parent_rate, true); 78 79 79 - return DIV_ROUND_UP_ULL(*prate, div); 80 + req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div); 81 + 82 + return 0; 80 83 } 81 84 82 85 static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw, ··· 135 132 static const struct clk_ops mcde_clk_div_ops = { 136 133 .enable = mcde_clk_div_enable, 137 134 .recalc_rate = mcde_clk_div_recalc_rate, 138 - .round_rate = mcde_clk_div_round_rate, 135 + .determine_rate = mcde_clk_div_determine_rate, 139 136 .set_rate = mcde_clk_div_set_rate, 140 137 }; 141 138
+25 -2
drivers/gpu/drm/msm/msm_gem.c
··· 10 10 #include <linux/shmem_fs.h> 11 11 #include <linux/dma-buf.h> 12 12 13 + #include <drm/drm_dumb_buffers.h> 13 14 #include <drm/drm_prime.h> 14 15 #include <drm/drm_file.h> 16 + #include <drm/drm_fourcc.h> 15 17 16 18 #include <trace/events/gpu_mem.h> 17 19 ··· 700 698 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 701 699 struct drm_mode_create_dumb *args) 702 700 { 703 - args->pitch = align_pitch(args->width, args->bpp); 704 - args->size = PAGE_ALIGN(args->pitch * args->height); 701 + u32 fourcc; 702 + const struct drm_format_info *info; 703 + u64 pitch_align; 704 + int ret; 705 + 706 + /* 707 + * Adreno needs pitch aligned to 32 pixels. Compute the number 708 + * of bytes for a block of 32 pixels at the given color format. 709 + * Use the result as pitch alignment. 710 + */ 711 + fourcc = drm_driver_color_mode_format(dev, args->bpp); 712 + if (fourcc == DRM_FORMAT_INVALID) 713 + return -EINVAL; 714 + info = drm_format_info(fourcc); 715 + if (!info) 716 + return -EINVAL; 717 + pitch_align = drm_format_info_min_pitch(info, 0, SZ_32); 718 + if (!pitch_align || pitch_align > U32_MAX) 719 + return -EINVAL; 720 + ret = drm_mode_size_dumb(dev, args, pitch_align, 0); 721 + if (ret) 722 + return ret; 723 + 705 724 return msm_gem_new_handle(dev, file, args->size, 706 725 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 707 726 }
+1
drivers/gpu/drm/nouveau/Kconfig
··· 28 28 select THERMAL if ACPI && X86 29 29 select ACPI_VIDEO if ACPI && X86 30 30 select SND_HDA_COMPONENT if SND_HDA_CORE 31 + select PM_DEVFREQ if ARCH_TEGRA 31 32 help 32 33 Choose this option for open-source NVIDIA support. 33 34
+2
drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
··· 9 9 struct nvkm_device device; 10 10 struct platform_device *pdev; 11 11 12 + void __iomem *regs; 13 + 12 14 struct reset_control *rst; 13 15 struct clk *clk; 14 16 struct clk *clk_ref;
+1
drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
··· 134 134 int gk104_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); 135 135 int gk20a_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); 136 136 int gm20b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); 137 + int gp10b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); 137 138 #endif
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.h
··· 57 57 static inline void 58 58 nouveau_bo_fini(struct nouveau_bo *bo) 59 59 { 60 - ttm_bo_put(&bo->bo); 60 + ttm_bo_fini(&bo->bo); 61 61 } 62 62 63 63 extern struct ttm_device_funcs nouveau_bo_driver;
+4 -3
drivers/gpu/drm/nouveau/nouveau_display.c
··· 30 30 #include <drm/drm_atomic_helper.h> 31 31 #include <drm/drm_client_event.h> 32 32 #include <drm/drm_crtc_helper.h> 33 + #include <drm/drm_dumb_buffers.h> 33 34 #include <drm/drm_fourcc.h> 34 35 #include <drm/drm_gem_framebuffer_helper.h> 35 36 #include <drm/drm_probe_helper.h> ··· 808 807 uint32_t domain; 809 808 int ret; 810 809 811 - args->pitch = roundup(args->width * (args->bpp / 8), 256); 812 - args->size = args->pitch * args->height; 813 - args->size = roundup(args->size, PAGE_SIZE); 810 + ret = drm_mode_size_dumb(dev, args, SZ_256, 0); 811 + if (ret) 812 + return ret; 814 813 815 814 /* Use VRAM if there is any ; otherwise fallback to system memory */ 816 815 if (nouveau_drm(dev)->client.device.info.ram_size != 0)
+1 -1
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 87 87 return; 88 88 } 89 89 90 - ttm_bo_put(&nvbo->bo); 90 + ttm_bo_fini(&nvbo->bo); 91 91 92 92 pm_runtime_mark_last_busy(dev); 93 93 pm_runtime_put_autosuspend(dev);
+20
drivers/gpu/drm/nouveau/nouveau_platform.c
··· 21 21 */ 22 22 #include "nouveau_platform.h" 23 23 24 + #include <nvkm/subdev/clk/gk20a_devfreq.h> 25 + 24 26 static int nouveau_platform_probe(struct platform_device *pdev) 25 27 { 26 28 const struct nvkm_device_tegra_func *func; ··· 41 39 42 40 nouveau_drm_device_remove(drm); 43 41 } 42 + 43 + #ifdef CONFIG_PM_SLEEP 44 + static int nouveau_platform_suspend(struct device *dev) 45 + { 46 + return gk20a_devfreq_suspend(dev); 47 + } 48 + 49 + static int nouveau_platform_resume(struct device *dev) 50 + { 51 + return gk20a_devfreq_resume(dev); 52 + } 53 + 54 + static SIMPLE_DEV_PM_OPS(nouveau_pm_ops, nouveau_platform_suspend, 55 + nouveau_platform_resume); 56 + #endif 44 57 45 58 #if IS_ENABLED(CONFIG_OF) 46 59 static const struct nvkm_device_tegra_func gk20a_platform_data = { ··· 98 81 .driver = { 99 82 .name = "nouveau", 100 83 .of_match_table = of_match_ptr(nouveau_platform_match), 84 + #ifdef CONFIG_PM_SLEEP 85 + .pm = &nouveau_pm_ops, 86 + #endif 101 87 }, 102 88 .probe = nouveau_platform_probe, 103 89 .remove = nouveau_platform_remove,
+1
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
··· 2280 2280 .acr = { 0x00000001, gp10b_acr_new }, 2281 2281 .bar = { 0x00000001, gm20b_bar_new }, 2282 2282 .bus = { 0x00000001, gf100_bus_new }, 2283 + .clk = { 0x00000001, gp10b_clk_new }, 2283 2284 .fault = { 0x00000001, gp10b_fault_new }, 2284 2285 .fb = { 0x00000001, gp10b_fb_new }, 2285 2286 .fuse = { 0x00000001, gm107_fuse_new },
+4
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
··· 259 259 tdev->func = func; 260 260 tdev->pdev = pdev; 261 261 262 + tdev->regs = devm_platform_ioremap_resource(pdev, 0); 263 + if (IS_ERR(tdev->regs)) 264 + return PTR_ERR(tdev->regs); 265 + 262 266 if (func->require_vdd) { 263 267 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); 264 268 if (IS_ERR(tdev->vdd)) {
+2
drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
··· 10 10 nvkm-y += nvkm/subdev/clk/gk104.o 11 11 nvkm-y += nvkm/subdev/clk/gk20a.o 12 12 nvkm-y += nvkm/subdev/clk/gm20b.o 13 + nvkm-y += nvkm/subdev/clk/gp10b.o 14 + nvkm-$(CONFIG_PM_DEVFREQ) += nvkm/subdev/clk/gk20a_devfreq.o 13 15 14 16 nvkm-y += nvkm/subdev/clk/pllnv04.o 15 17 nvkm-y += nvkm/subdev/clk/pllgt215.o
+5
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
··· 23 23 * 24 24 */ 25 25 #include "priv.h" 26 + #include "gk20a_devfreq.h" 26 27 #include "gk20a.h" 27 28 28 29 #include <core/tegra.h> ··· 589 588 nvkm_error(subdev, "cannot initialize clock\n"); 590 589 return ret; 591 590 } 591 + 592 + ret = gk20a_devfreq_init(base, &clk->devfreq); 593 + if (ret) 594 + return ret; 592 595 593 596 return 0; 594 597 }
+1
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
··· 118 118 const struct gk20a_clk_pllg_params *params; 119 119 struct gk20a_pll pll; 120 120 u32 parent_rate; 121 + struct gk20a_devfreq *devfreq; 121 122 122 123 u32 (*div_to_pl)(u32); 123 124 u32 (*pl_to_div)(u32);
+320
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c
··· 1 + // SPDX-License-Identifier: MIT 2 + #include <linux/clk.h> 3 + #include <linux/math64.h> 4 + #include <linux/platform_device.h> 5 + #include <linux/pm_opp.h> 6 + 7 + #include <drm/drm_managed.h> 8 + 9 + #include <subdev/clk.h> 10 + 11 + #include "nouveau_drv.h" 12 + #include "nouveau_chan.h" 13 + #include "priv.h" 14 + #include "gk20a_devfreq.h" 15 + #include "gk20a.h" 16 + #include "gp10b.h" 17 + 18 + #define PMU_BUSY_CYCLES_NORM_MAX 1000U 19 + 20 + #define PWR_PMU_IDLE_COUNTER_TOTAL 0U 21 + #define PWR_PMU_IDLE_COUNTER_BUSY 4U 22 + 23 + #define PWR_PMU_IDLE_COUNT_REG_OFFSET 0x0010A508U 24 + #define PWR_PMU_IDLE_COUNT_REG_SIZE 16U 25 + #define PWR_PMU_IDLE_COUNT_MASK 0x7FFFFFFFU 26 + #define PWR_PMU_IDLE_COUNT_RESET_VALUE (0x1U << 31U) 27 + 28 + #define PWR_PMU_IDLE_INTR_REG_OFFSET 0x0010A9E8U 29 + #define PWR_PMU_IDLE_INTR_ENABLE_VALUE 0U 30 + 31 + #define PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET 0x0010A9ECU 32 + #define PWR_PMU_IDLE_INTR_STATUS_MASK 0x00000001U 33 + #define PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE 0x1U 34 + 35 + #define PWR_PMU_IDLE_THRESHOLD_REG_OFFSET 0x0010A8A0U 36 + #define PWR_PMU_IDLE_THRESHOLD_REG_SIZE 4U 37 + #define PWR_PMU_IDLE_THRESHOLD_MAX_VALUE 0x7FFFFFFFU 38 + 39 + #define PWR_PMU_IDLE_CTRL_REG_OFFSET 0x0010A50CU 40 + #define PWR_PMU_IDLE_CTRL_REG_SIZE 16U 41 + #define PWR_PMU_IDLE_CTRL_VALUE_MASK 0x3U 42 + #define PWR_PMU_IDLE_CTRL_VALUE_BUSY 0x2U 43 + #define PWR_PMU_IDLE_CTRL_VALUE_ALWAYS 0x3U 44 + #define PWR_PMU_IDLE_CTRL_FILTER_MASK (0x1U << 2) 45 + #define PWR_PMU_IDLE_CTRL_FILTER_DISABLED 0x0U 46 + 47 + #define PWR_PMU_IDLE_MASK_REG_OFFSET 0x0010A504U 48 + #define PWR_PMU_IDLE_MASK_REG_SIZE 16U 49 + #define PWM_PMU_IDLE_MASK_GR_ENABLED 0x1U 50 + #define PWM_PMU_IDLE_MASK_CE_2_ENABLED 0x200000U 51 + 52 + /** 53 + * struct gk20a_devfreq - Device frequency management 54 + */ 55 + struct gk20a_devfreq { 56 + /** @devfreq: devfreq device. */ 57 + struct devfreq *devfreq; 58 + 59 + /** @regs: Device registers. */ 60 + void __iomem *regs; 61 + 62 + /** @gov_data: Governor data. */ 63 + struct devfreq_simple_ondemand_data gov_data; 64 + 65 + /** @busy_time: Busy time. */ 66 + ktime_t busy_time; 67 + 68 + /** @total_time: Total time. */ 69 + ktime_t total_time; 70 + 71 + /** @time_last_update: Last update time. */ 72 + ktime_t time_last_update; 73 + }; 74 + 75 + static struct gk20a_devfreq *dev_to_gk20a_devfreq(struct device *dev) 76 + { 77 + struct nouveau_drm *drm = dev_get_drvdata(dev); 78 + struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0); 79 + struct nvkm_clk *base = nvkm_clk(subdev); 80 + 81 + switch (drm->nvkm->chipset) { 82 + case 0x13b: return gp10b_clk(base)->devfreq; break; 83 + default: return gk20a_clk(base)->devfreq; break; 84 + } 85 + } 86 + 87 + static void gk20a_pmu_init_perfmon_counter(struct gk20a_devfreq *gdevfreq) 88 + { 89 + u32 data; 90 + 91 + // Set pmu idle intr status bit on total counter overflow 92 + writel(PWR_PMU_IDLE_INTR_ENABLE_VALUE, 93 + gdevfreq->regs + PWR_PMU_IDLE_INTR_REG_OFFSET); 94 + 95 + writel(PWR_PMU_IDLE_THRESHOLD_MAX_VALUE, 96 + gdevfreq->regs + PWR_PMU_IDLE_THRESHOLD_REG_OFFSET + 97 + (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_THRESHOLD_REG_SIZE)); 98 + 99 + // Setup counter for total cycles 100 + data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + 101 + (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE)); 102 + data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK); 103 + data |= PWR_PMU_IDLE_CTRL_VALUE_ALWAYS | PWR_PMU_IDLE_CTRL_FILTER_DISABLED; 104 + writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + 105 + (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE)); 106 + 107 + // Setup counter for busy cycles 108 + writel(PWM_PMU_IDLE_MASK_GR_ENABLED | PWM_PMU_IDLE_MASK_CE_2_ENABLED, 109 + gdevfreq->regs + PWR_PMU_IDLE_MASK_REG_OFFSET + 110 + (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_MASK_REG_SIZE)); 111 + 112 + data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + 113 + (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE)); 114 + data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK); 115 + data |= PWR_PMU_IDLE_CTRL_VALUE_BUSY | PWR_PMU_IDLE_CTRL_FILTER_DISABLED; 116 + writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + 117 + (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE)); 118 + } 119 + 120 + static u32 gk20a_pmu_read_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id) 121 + { 122 + u32 ret; 123 + 124 + ret = readl(gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET + 125 + (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE)); 126 + 127 + return ret & PWR_PMU_IDLE_COUNT_MASK; 128 + } 129 + 130 + static void gk20a_pmu_reset_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id) 131 + { 132 + writel(PWR_PMU_IDLE_COUNT_RESET_VALUE, gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET + 133 + (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE)); 134 + } 135 + 136 + static u32 gk20a_pmu_read_idle_intr_status(struct gk20a_devfreq *gdevfreq) 137 + { 138 + u32 ret; 139 + 140 + ret = readl(gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET); 141 + 142 + return ret & PWR_PMU_IDLE_INTR_STATUS_MASK; 143 + } 144 + 145 + static void gk20a_pmu_clear_idle_intr_status(struct gk20a_devfreq *gdevfreq) 146 + { 147 + writel(PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE, 148 + gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET); 149 + } 150 + 151 + static void gk20a_devfreq_update_utilization(struct gk20a_devfreq *gdevfreq) 152 + { 153 + ktime_t now, last; 154 + u64 busy_cycles, total_cycles; 155 + u32 norm, intr_status; 156 + 157 + now = ktime_get(); 158 + last = gdevfreq->time_last_update; 159 + gdevfreq->total_time = ktime_us_delta(now, last); 160 + 161 + busy_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY); 162 + total_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL); 163 + intr_status = gk20a_pmu_read_idle_intr_status(gdevfreq); 164 + 165 + gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY); 166 + gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL); 167 + 168 + if (intr_status != 0UL) { 169 + norm = PMU_BUSY_CYCLES_NORM_MAX; 170 + gk20a_pmu_clear_idle_intr_status(gdevfreq); 171 + } else if (total_cycles == 0ULL || busy_cycles > total_cycles) { 172 + norm = PMU_BUSY_CYCLES_NORM_MAX; 173 + } else { 174 + norm = (u32)div64_u64(busy_cycles * PMU_BUSY_CYCLES_NORM_MAX, 175 + total_cycles); 176 + } 177 + 178 + gdevfreq->busy_time = div_u64(gdevfreq->total_time * norm, PMU_BUSY_CYCLES_NORM_MAX); 179 + gdevfreq->time_last_update = now; 180 + } 181 + 182 + static int gk20a_devfreq_target(struct device *dev, unsigned long *freq, 183 + u32 flags) 184 + { 185 + struct nouveau_drm *drm = dev_get_drvdata(dev); 186 + struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0); 187 + struct nvkm_clk *base = nvkm_clk(subdev); 188 + struct nvkm_pstate *pstates = base->func->pstates; 189 + int nr_pstates = base->func->nr_pstates; 190 + int i, ret; 191 + 192 + for (i = 0; i < nr_pstates - 1; i++) 193 + if (pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV >= *freq) 194 + break; 195 + 196 + ret = nvkm_clk_ustate(base, pstates[i].pstate, 0); 197 + ret |= nvkm_clk_ustate(base, pstates[i].pstate, 1); 198 + if (ret) { 199 + nvkm_error(subdev, "cannot update clock\n"); 200 + return ret; 201 + } 202 + 203 + *freq = pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV; 204 + 205 + return 0; 206 + } 207 + 208 + static int gk20a_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) 209 + { 210 + struct nouveau_drm *drm = dev_get_drvdata(dev); 211 + struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0); 212 + struct nvkm_clk *base = nvkm_clk(subdev); 213 + 214 + *freq = nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV; 215 + 216 + return 0; 217 + } 218 + 219 + static void gk20a_devfreq_reset(struct gk20a_devfreq *gdevfreq) 220 + { 221 + gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY); 222 + gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL); 223 + gk20a_pmu_clear_idle_intr_status(gdevfreq); 224 + 225 + gdevfreq->busy_time = 0; 226 + gdevfreq->total_time = 0; 227 + gdevfreq->time_last_update = ktime_get(); 228 + } 229 + 230 + static int gk20a_devfreq_get_dev_status(struct device *dev, 231 + struct devfreq_dev_status *status) 232 + { 233 + struct nouveau_drm *drm = dev_get_drvdata(dev); 234 + struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev); 235 + 236 + gk20a_devfreq_get_cur_freq(dev, &status->current_frequency); 237 + 238 + gk20a_devfreq_update_utilization(gdevfreq); 239 + 240 + status->busy_time = ktime_to_ns(gdevfreq->busy_time); 241 + status->total_time = ktime_to_ns(gdevfreq->total_time); 242 + 243 + gk20a_devfreq_reset(gdevfreq); 244 + 245 + NV_DEBUG(drm, "busy %lu total %lu %lu %% freq %lu MHz\n", 246 + status->busy_time, status->total_time, 247 + status->busy_time / (status->total_time / 100), 248 + status->current_frequency / 1000 / 1000); 249 + 250 + return 0; 251 + } 252 + 253 + static struct devfreq_dev_profile gk20a_devfreq_profile = { 254 + .timer = DEVFREQ_TIMER_DELAYED, 255 + .polling_ms = 50, 256 + .target = gk20a_devfreq_target, 257 + .get_cur_freq = gk20a_devfreq_get_cur_freq, 258 + .get_dev_status = gk20a_devfreq_get_dev_status, 259 + }; 260 + 261 + int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **gdevfreq) 262 + { 263 + struct nvkm_device *device = base->subdev.device; 264 + struct nouveau_drm *drm = dev_get_drvdata(device->dev); 265 + struct nvkm_device_tegra *tdev = device->func->tegra(device); 266 + struct nvkm_pstate *pstates = base->func->pstates; 267 + int nr_pstates = base->func->nr_pstates; 268 + struct gk20a_devfreq *new_gdevfreq; 269 + int i; 270 + 271 + new_gdevfreq = drmm_kzalloc(drm->dev, sizeof(struct gk20a_devfreq), GFP_KERNEL); 272 + if (!new_gdevfreq) 273 + return -ENOMEM; 274 + 275 + new_gdevfreq->regs = tdev->regs; 276 + 277 + for (i = 0; i < nr_pstates; i++) 278 + dev_pm_opp_add(base->subdev.device->dev, 279 + pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV, 0); 280 + 281 + gk20a_pmu_init_perfmon_counter(new_gdevfreq); 282 + gk20a_devfreq_reset(new_gdevfreq); 283 + 284 + gk20a_devfreq_profile.initial_freq = 285 + nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV; 286 + 287 + new_gdevfreq->gov_data.upthreshold = 45; 288 + new_gdevfreq->gov_data.downdifferential = 5; 289 + 290 + new_gdevfreq->devfreq = devm_devfreq_add_device(device->dev, 291 + &gk20a_devfreq_profile, 292 + DEVFREQ_GOV_SIMPLE_ONDEMAND, 293 + &new_gdevfreq->gov_data); 294 + if (IS_ERR(new_gdevfreq->devfreq)) 295 + return PTR_ERR(new_gdevfreq->devfreq); 296 + 297 + *gdevfreq = new_gdevfreq; 298 + 299 + return 0; 300 + } 301 + 302 + int gk20a_devfreq_resume(struct device *dev) 303 + { 304 + struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev); 305 + 306 + if (!gdevfreq || !gdevfreq->devfreq) 307 + return 0; 308 + 309 + return devfreq_resume_device(gdevfreq->devfreq); 310 + } 311 + 312 + int gk20a_devfreq_suspend(struct device *dev) 313 + { 314 + struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev); 315 + 316 + if (!gdevfreq || !gdevfreq->devfreq) 317 + return 0; 318 + 319 + return devfreq_suspend_device(gdevfreq->devfreq); 320 + }
+24
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + #ifndef __GK20A_DEVFREQ_H__ 3 + #define __GK20A_DEVFREQ_H__ 4 + 5 + #include <linux/devfreq.h> 6 + 7 + struct gk20a_devfreq; 8 + 9 + #if defined(CONFIG_PM_DEVFREQ) 10 + int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq); 11 + 12 + int gk20a_devfreq_resume(struct device *dev); 13 + int gk20a_devfreq_suspend(struct device *dev); 14 + #else 15 + static inline int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq) 16 + { 17 + return 0; 18 + } 19 + 20 + static inline int gk20a_devfreq_resume(struct device dev) { return 0; } 21 + static inline int gk20a_devfreq_suspend(struct device *dev) { return 0; } 22 + #endif /* CONFIG_PM_DEVFREQ */ 23 + 24 + #endif /* __GK20A_DEVFREQ_H__ */
+5
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
··· 27 27 #include <core/tegra.h> 28 28 29 29 #include "priv.h" 30 + #include "gk20a_devfreq.h" 30 31 #include "gk20a.h" 31 32 32 33 #define GPCPLL_CFG_SYNC_MODE BIT(2) ··· 869 868 nvkm_error(subdev, "cannot initialize clock\n"); 870 869 return ret; 871 870 } 871 + 872 + ret = gk20a_devfreq_init(base, &clk->devfreq); 873 + if (ret) 874 + return ret; 872 875 873 876 return 0; 874 877 }
+185
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c
··· 1 + // SPDX-License-Identifier: MIT 2 + #include <subdev/clk.h> 3 + #include <subdev/timer.h> 4 + #include <core/device.h> 5 + #include <core/tegra.h> 6 + 7 + #include "priv.h" 8 + #include "gk20a_devfreq.h" 9 + #include "gk20a.h" 10 + #include "gp10b.h" 11 + 12 + static int 13 + gp10b_clk_init(struct nvkm_clk *base) 14 + { 15 + struct gp10b_clk *clk = gp10b_clk(base); 16 + struct nvkm_subdev *subdev = &clk->base.subdev; 17 + int ret; 18 + 19 + /* Start with the highest frequency, matching the BPMP default */ 20 + base->func->calc(base, &base->func->pstates[base->func->nr_pstates - 1].base); 21 + ret = base->func->prog(base); 22 + if (ret) { 23 + nvkm_error(subdev, "cannot initialize clock\n"); 24 + return ret; 25 + } 26 + 27 + ret = gk20a_devfreq_init(base, &clk->devfreq); 28 + if (ret) 29 + return ret; 30 + 31 + return 0; 32 + } 33 + 34 + static int 35 + gp10b_clk_read(struct nvkm_clk *base, enum nv_clk_src src) 36 + { 37 + struct gp10b_clk *clk = gp10b_clk(base); 38 + struct nvkm_subdev *subdev = &clk->base.subdev; 39 + 40 + switch (src) { 41 + case nv_clk_src_gpc: 42 + return clk_get_rate(clk->clk) / GK20A_CLK_GPC_MDIV; 43 + default: 44 + nvkm_error(subdev, "invalid clock source %d\n", src); 45 + return -EINVAL; 46 + } 47 + } 48 + 49 + static int 50 + gp10b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) 51 + { 52 + struct gp10b_clk *clk = gp10b_clk(base); 53 + u32 target_rate = cstate->domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV; 54 + 55 + clk->new_rate = clk_round_rate(clk->clk, target_rate) / GK20A_CLK_GPC_MDIV; 56 + 57 + return 0; 58 + } 59 + 60 + static int 61 + gp10b_clk_prog(struct nvkm_clk *base) 62 + { 63 + struct gp10b_clk *clk = gp10b_clk(base); 64 + int ret; 65 + 66 + ret = clk_set_rate(clk->clk, clk->new_rate * GK20A_CLK_GPC_MDIV); 67 + if (ret < 0) 68 + return ret; 69 + 70 + clk->rate = clk_get_rate(clk->clk) / GK20A_CLK_GPC_MDIV; 71 + 72 + return 0; 73 + } 74 + 75 + static struct nvkm_pstate 76 + gp10b_pstates[] = { 77 + { 78 + .base = { 79 + .domain[nv_clk_src_gpc] = 114750, 80 + }, 81 + }, 82 + { 83 + .base = { 84 + .domain[nv_clk_src_gpc] = 216750, 85 + }, 86 + }, 87 + { 88 + .base = { 89 + .domain[nv_clk_src_gpc] = 318750, 90 + }, 91 + }, 92 + { 93 + .base = { 94 + .domain[nv_clk_src_gpc] = 420750, 95 + }, 96 + }, 97 + { 98 + .base = { 99 + .domain[nv_clk_src_gpc] = 522750, 100 + }, 101 + }, 102 + { 103 + .base = { 104 + .domain[nv_clk_src_gpc] = 624750, 105 + }, 106 + }, 107 + { 108 + .base = { 109 + .domain[nv_clk_src_gpc] = 726750, 110 + }, 111 + }, 112 + { 113 + .base = { 114 + .domain[nv_clk_src_gpc] = 828750, 115 + }, 116 + }, 117 + { 118 + .base = { 119 + .domain[nv_clk_src_gpc] = 930750, 120 + }, 121 + }, 122 + { 123 + .base = { 124 + .domain[nv_clk_src_gpc] = 1032750, 125 + }, 126 + }, 127 + { 128 + .base = { 129 + .domain[nv_clk_src_gpc] = 1134750, 130 + }, 131 + }, 132 + { 133 + .base = { 134 + .domain[nv_clk_src_gpc] = 1236750, 135 + }, 136 + }, 137 + { 138 + .base = { 139 + .domain[nv_clk_src_gpc] = 1300500, 140 + }, 141 + }, 142 + }; 143 + 144 + static const struct nvkm_clk_func 145 + gp10b_clk = { 146 + .init = gp10b_clk_init, 147 + .read = gp10b_clk_read, 148 + .calc = gp10b_clk_calc, 149 + .prog = gp10b_clk_prog, 150 + .tidy = gk20a_clk_tidy, 151 + .pstates = gp10b_pstates, 152 + .nr_pstates = ARRAY_SIZE(gp10b_pstates), 153 + .domains = { 154 + { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV }, 155 + { nv_clk_src_max } 156 + } 157 + }; 158 + 159 + int 160 + gp10b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 161 + struct nvkm_clk **pclk) 162 + { 163 + struct nvkm_device_tegra *tdev = device->func->tegra(device); 164 + const struct nvkm_clk_func *func = &gp10b_clk; 165 + struct gp10b_clk *clk; 166 + int ret, i; 167 + 168 + clk = kzalloc(sizeof(*clk), GFP_KERNEL); 169 + if (!clk) 170 + return -ENOMEM; 171 + *pclk = &clk->base; 172 + clk->clk = tdev->clk; 173 + 174 + /* Finish initializing the pstates */ 175 + for (i = 0; i < func->nr_pstates; i++) { 176 + INIT_LIST_HEAD(&func->pstates[i].list); 177 + func->pstates[i].pstate = i + 1; 178 + } 179 + 180 + ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base); 181 + if (ret) 182 + return ret; 183 + 184 + return 0; 185 + }
+18
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + #ifndef __NVKM_CLK_GP10B_H__ 3 + #define __NVKM_CLK_GP10B_H__ 4 + 5 + struct gp10b_clk { 6 + /* currently applied parameters */ 7 + struct nvkm_clk base; 8 + struct gk20a_devfreq *devfreq; 9 + struct clk *clk; 10 + u32 rate; 11 + 12 + /* new parameters to apply */ 13 + u32 new_rate; 14 + }; 15 + 16 + #define gp10b_clk(p) container_of((p), struct gp10b_clk, base) 17 + 18 + #endif
+1 -3
drivers/gpu/drm/omapdrm/omap_encoder.c
··· 77 77 struct omap_dss_device *output = omap_encoder->output; 78 78 struct drm_device *dev = encoder->dev; 79 79 struct drm_connector *connector; 80 - struct drm_bridge *bridge; 81 80 struct videomode vm = { 0 }; 82 81 u32 bus_flags; 83 82 ··· 96 97 * 97 98 * A better solution is to use DRM's bus-flags through the whole driver. 98 99 */ 99 - for (bridge = output->bridge; bridge; 100 - bridge = drm_bridge_get_next_bridge(bridge)) { 100 + drm_for_each_bridge_in_chain_from(output->bridge, bridge) { 101 101 if (!bridge->timings) 102 102 continue; 103 103
+7 -8
drivers/gpu/drm/omapdrm/omap_gem.c
··· 10 10 #include <linux/spinlock.h> 11 11 #include <linux/vmalloc.h> 12 12 13 + #include <drm/drm_dumb_buffers.h> 13 14 #include <drm/drm_prime.h> 14 15 #include <drm/drm_vma_manager.h> 15 16 ··· 581 580 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 582 581 struct drm_mode_create_dumb *args) 583 582 { 584 - union omap_gem_size gsize; 583 + union omap_gem_size gsize = { }; 584 + int ret; 585 585 586 - args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 587 - 588 - args->size = PAGE_ALIGN(args->pitch * args->height); 589 - 590 - gsize = (union omap_gem_size){ 591 - .bytes = args->size, 592 - }; 586 + ret = drm_mode_size_dumb(dev, args, SZ_8, 0); 587 + if (ret) 588 + return ret; 589 + gsize.bytes = args->size; 593 590 594 591 return omap_gem_new_handle(dev, file, gsize, 595 592 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
+15
drivers/gpu/drm/panel/Kconfig
··· 888 888 Say Y here if you want to enable support for the Seiko 889 889 43WVF1G controller for 800x480 LCD panels 890 890 891 + config DRM_PANEL_SHARP_LQ079L1SX01 892 + tristate "Sharp LQ079L1SX01 panel" 893 + depends on OF 894 + depends on DRM_MIPI_DSI 895 + depends on BACKLIGHT_CLASS_DEVICE 896 + select VIDEOMODE_HELPERS 897 + help 898 + Say Y here if you want to enable support for Sharp LQ079L1SX01 899 + TFT-LCD modules. The panel has a 2560x1600 resolution and uses 900 + 24 bit RGB per pixel. It provides a dual MIPI DSI interface to 901 + the host. 902 + 903 + To compile this driver as a module, choose M here: the module 904 + will be called panel-sharp-lq079l1sx01. 905 + 891 906 config DRM_PANEL_SHARP_LQ101R1SX01 892 907 tristate "Sharp LQ101R1SX01 panel" 893 908 depends on OF
+1
drivers/gpu/drm/panel/Makefile
··· 91 91 obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01) += panel-samsung-s6e8aa5x01-ams561ra01.o 92 92 obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o 93 93 obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o 94 + obj-$(CONFIG_DRM_PANEL_SHARP_LQ079L1SX01) += panel-sharp-lq079l1sx01.o 94 95 obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o 95 96 obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o 96 97 obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
+8
drivers/gpu/drm/panel/panel-edp.c
··· 1909 1909 EDP_PANEL_ENTRY('A', 'U', 'O', 0x8bba, &delay_200_500_e50, "B140UAN08.5"), 1910 1910 EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"), 1911 1911 EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"), 1912 + EDP_PANEL_ENTRY('A', 'U', 'O', 0xb7a9, &delay_200_500_e50, "B140HAK03.3"), 1912 1913 EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"), 1913 1914 EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"), 1914 1915 EDP_PANEL_ENTRY('A', 'U', 'O', 0xcdba, &delay_200_500_e50, "B140UAX01.2"), ··· 1975 1974 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"), 1976 1975 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"), 1977 1976 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"), 1977 + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf2, &delay_200_500_e200, "NV156FHM-N4S"), 1978 1978 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf6, &delay_200_500_e200, "NV140WUM-N64"), 1979 1979 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"), 1980 1980 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d45, &delay_200_500_e80, "NV116WHM-N4B"), ··· 2009 2007 EDP_PANEL_ENTRY('C', 'M', 'N', 0x1441, &delay_200_500_e80_d50, "N140JCA-ELK"), 2010 2008 EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"), 2011 2009 EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"), 2010 + EDP_PANEL_ENTRY('C', 'M', 'N', 0x148f, &delay_200_500_e80, "N140HCA-EAC"), 2012 2011 EDP_PANEL_ENTRY('C', 'M', 'N', 0x14a8, &delay_200_500_e80, "N140JCA-ELP"), 2013 2012 EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"), 2014 2013 EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"), 2015 2014 EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"), 2015 + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1565, &delay_200_500_e80, "N156HCA-EAB"), 2016 2016 EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"), 2017 2017 EDP_PANEL_ENTRY('C', 'M', 'N', 0x7402, &delay_200_500_e200_d50, "N116BCA-EAK"), 2018 2018 ··· 2026 2022 EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50_d100, "MNB601LS1-4"), 2027 2023 EDP_PANEL_ENTRY('C', 'S', 'W', 0x143f, &delay_200_500_e50, "MNE007QS3-6"), 2028 2024 EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"), 2025 + EDP_PANEL_ENTRY('C', 'S', 'W', 0x144b, &delay_200_500_e80, "MNE001BS1-4"), 2029 2026 EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"), 2030 2027 EDP_PANEL_ENTRY('C', 'S', 'W', 0x1462, &delay_200_500_e50, "MNE007QS5-2"), 2031 2028 EDP_PANEL_ENTRY('C', 'S', 'W', 0x1468, &delay_200_500_e50, "MNE007QB2-2"), 2032 2029 EDP_PANEL_ENTRY('C', 'S', 'W', 0x146e, &delay_80_500_e50_d50, "MNE007QB3-1"), 2030 + EDP_PANEL_ENTRY('C', 'S', 'W', 0x1519, &delay_200_500_e80_d50, "MNF601BS1-3"), 2033 2031 2034 2032 EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"), 2035 2033 ··· 2052 2046 EDP_PANEL_ENTRY('K', 'D', 'B', 0x1212, &delay_200_500_e50, "KD116N0930A16"), 2053 2047 EDP_PANEL_ENTRY('K', 'D', 'B', 0x1707, &delay_200_150_e50, "KD116N2130B12"), 2054 2048 2049 + EDP_PANEL_ENTRY('K', 'D', 'C', 0x0110, &delay_200_500_e50, "KD116N3730A07"), 2050 + EDP_PANEL_ENTRY('K', 'D', 'C', 0x0397, &delay_200_500_e50, "KD116N3730A12"), 2055 2051 EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"), 2056 2052 EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"), 2057 2053 EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
+656 -442
drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
··· 100 100 ILI9881C_COMMAND_INSTR(0x13, 0x00), 101 101 ILI9881C_COMMAND_INSTR(0x14, 0x00), 102 102 ILI9881C_COMMAND_INSTR(0x15, 0x00), 103 - ILI9881C_COMMAND_INSTR(0x16, 0x0C), 103 + ILI9881C_COMMAND_INSTR(0x16, 0x0c), 104 104 ILI9881C_COMMAND_INSTR(0x17, 0x00), 105 105 ILI9881C_COMMAND_INSTR(0x18, 0x00), 106 106 ILI9881C_COMMAND_INSTR(0x19, 0x00), ··· 108 108 ILI9881C_COMMAND_INSTR(0x1b, 0x00), 109 109 ILI9881C_COMMAND_INSTR(0x1c, 0x00), 110 110 ILI9881C_COMMAND_INSTR(0x1d, 0x00), 111 - ILI9881C_COMMAND_INSTR(0x1e, 0xC0), 111 + ILI9881C_COMMAND_INSTR(0x1e, 0xc0), 112 112 ILI9881C_COMMAND_INSTR(0x1f, 0x80), 113 113 ILI9881C_COMMAND_INSTR(0x20, 0x04), 114 114 ILI9881C_COMMAND_INSTR(0x21, 0x01), ··· 134 134 ILI9881C_COMMAND_INSTR(0x35, 0x00), 135 135 ILI9881C_COMMAND_INSTR(0x36, 0x00), 136 136 ILI9881C_COMMAND_INSTR(0x37, 0x00), 137 - ILI9881C_COMMAND_INSTR(0x38, 0x3C), 137 + ILI9881C_COMMAND_INSTR(0x38, 0x3c), 138 138 ILI9881C_COMMAND_INSTR(0x39, 0x00), 139 139 ILI9881C_COMMAND_INSTR(0x3a, 0x00), 140 140 ILI9881C_COMMAND_INSTR(0x3b, 0x00), ··· 173 173 ILI9881C_COMMAND_INSTR(0x67, 0x02), 174 174 ILI9881C_COMMAND_INSTR(0x68, 0x02), 175 175 ILI9881C_COMMAND_INSTR(0x69, 0x02), 176 - ILI9881C_COMMAND_INSTR(0x6a, 0x0C), 176 + ILI9881C_COMMAND_INSTR(0x6a, 0x0c), 177 177 ILI9881C_COMMAND_INSTR(0x6b, 0x02), 178 - ILI9881C_COMMAND_INSTR(0x6c, 0x0F), 179 - ILI9881C_COMMAND_INSTR(0x6d, 0x0E), 180 - ILI9881C_COMMAND_INSTR(0x6e, 0x0D), 178 + ILI9881C_COMMAND_INSTR(0x6c, 0x0f), 179 + ILI9881C_COMMAND_INSTR(0x6d, 0x0e), 180 + ILI9881C_COMMAND_INSTR(0x6e, 0x0d), 181 181 ILI9881C_COMMAND_INSTR(0x6f, 0x06), 182 182 ILI9881C_COMMAND_INSTR(0x70, 0x07), 183 183 ILI9881C_COMMAND_INSTR(0x71, 0x02), ··· 195 195 ILI9881C_COMMAND_INSTR(0x7d, 0x02), 196 196 ILI9881C_COMMAND_INSTR(0x7e, 0x02), 197 197 ILI9881C_COMMAND_INSTR(0x7f, 0x02), 198 - ILI9881C_COMMAND_INSTR(0x80, 0x0C), 198 + ILI9881C_COMMAND_INSTR(0x80, 0x0c), 199 199 ILI9881C_COMMAND_INSTR(0x81, 0x02), 200 - ILI9881C_COMMAND_INSTR(0x82, 0x0F), 201 - ILI9881C_COMMAND_INSTR(0x83, 0x0E), 202 - ILI9881C_COMMAND_INSTR(0x84, 0x0D), 200 + ILI9881C_COMMAND_INSTR(0x82, 0x0f), 201 + ILI9881C_COMMAND_INSTR(0x83, 0x0e), 202 + ILI9881C_COMMAND_INSTR(0x84, 0x0d), 203 203 ILI9881C_COMMAND_INSTR(0x85, 0x06), 204 204 ILI9881C_COMMAND_INSTR(0x86, 0x07), 205 205 ILI9881C_COMMAND_INSTR(0x87, 0x02), 206 206 ILI9881C_COMMAND_INSTR(0x88, 0x02), 207 207 ILI9881C_COMMAND_INSTR(0x89, 0x02), 208 - ILI9881C_COMMAND_INSTR(0x8A, 0x02), 208 + ILI9881C_COMMAND_INSTR(0x8a, 0x02), 209 209 ILI9881C_SWITCH_PAGE_INSTR(4), 210 - ILI9881C_COMMAND_INSTR(0x6C, 0x15), 211 - ILI9881C_COMMAND_INSTR(0x6E, 0x22), 212 - ILI9881C_COMMAND_INSTR(0x6F, 0x33), 213 - ILI9881C_COMMAND_INSTR(0x3A, 0xA4), 214 - ILI9881C_COMMAND_INSTR(0x8D, 0x0D), 215 - ILI9881C_COMMAND_INSTR(0x87, 0xBA), 210 + ILI9881C_COMMAND_INSTR(0x6c, 0x15), 211 + ILI9881C_COMMAND_INSTR(0x6e, 0x22), 212 + ILI9881C_COMMAND_INSTR(0x6f, 0x33), 213 + ILI9881C_COMMAND_INSTR(0x3a, 0xa4), 214 + ILI9881C_COMMAND_INSTR(0x8d, 0x0d), 215 + ILI9881C_COMMAND_INSTR(0x87, 0xba), 216 216 ILI9881C_COMMAND_INSTR(0x26, 0x76), 217 - ILI9881C_COMMAND_INSTR(0xB2, 0xD1), 217 + ILI9881C_COMMAND_INSTR(0xb2, 0xd1), 218 218 ILI9881C_SWITCH_PAGE_INSTR(1), 219 - ILI9881C_COMMAND_INSTR(0x22, 0x0A), 220 - ILI9881C_COMMAND_INSTR(0x53, 0xDC), 221 - ILI9881C_COMMAND_INSTR(0x55, 0xA7), 219 + ILI9881C_COMMAND_INSTR(0x22, 0x0a), 220 + ILI9881C_COMMAND_INSTR(0x53, 0xdc), 221 + ILI9881C_COMMAND_INSTR(0x55, 0xa7), 222 222 ILI9881C_COMMAND_INSTR(0x50, 0x78), 223 223 ILI9881C_COMMAND_INSTR(0x51, 0x78), 224 224 ILI9881C_COMMAND_INSTR(0x31, 0x02), 225 225 ILI9881C_COMMAND_INSTR(0x60, 0x14), 226 - ILI9881C_COMMAND_INSTR(0xA0, 0x2A), 227 - ILI9881C_COMMAND_INSTR(0xA1, 0x39), 228 - ILI9881C_COMMAND_INSTR(0xA2, 0x46), 229 - ILI9881C_COMMAND_INSTR(0xA3, 0x0e), 230 - ILI9881C_COMMAND_INSTR(0xA4, 0x12), 231 - ILI9881C_COMMAND_INSTR(0xA5, 0x25), 232 - ILI9881C_COMMAND_INSTR(0xA6, 0x19), 233 - ILI9881C_COMMAND_INSTR(0xA7, 0x1d), 234 - ILI9881C_COMMAND_INSTR(0xA8, 0xa6), 235 - ILI9881C_COMMAND_INSTR(0xA9, 0x1C), 236 - ILI9881C_COMMAND_INSTR(0xAA, 0x29), 237 - ILI9881C_COMMAND_INSTR(0xAB, 0x85), 238 - ILI9881C_COMMAND_INSTR(0xAC, 0x1C), 239 - ILI9881C_COMMAND_INSTR(0xAD, 0x1B), 240 - ILI9881C_COMMAND_INSTR(0xAE, 0x51), 241 - ILI9881C_COMMAND_INSTR(0xAF, 0x22), 242 - ILI9881C_COMMAND_INSTR(0xB0, 0x2d), 243 - ILI9881C_COMMAND_INSTR(0xB1, 0x4f), 244 - ILI9881C_COMMAND_INSTR(0xB2, 0x59), 245 - ILI9881C_COMMAND_INSTR(0xB3, 0x3F), 246 - ILI9881C_COMMAND_INSTR(0xC0, 0x2A), 247 - ILI9881C_COMMAND_INSTR(0xC1, 0x3a), 248 - ILI9881C_COMMAND_INSTR(0xC2, 0x45), 249 - ILI9881C_COMMAND_INSTR(0xC3, 0x0e), 250 - ILI9881C_COMMAND_INSTR(0xC4, 0x11), 251 - ILI9881C_COMMAND_INSTR(0xC5, 0x24), 252 - ILI9881C_COMMAND_INSTR(0xC6, 0x1a), 253 - ILI9881C_COMMAND_INSTR(0xC7, 0x1c), 254 - ILI9881C_COMMAND_INSTR(0xC8, 0xaa), 255 - ILI9881C_COMMAND_INSTR(0xC9, 0x1C), 256 - ILI9881C_COMMAND_INSTR(0xCA, 0x29), 257 - ILI9881C_COMMAND_INSTR(0xCB, 0x96), 258 - ILI9881C_COMMAND_INSTR(0xCC, 0x1C), 259 - ILI9881C_COMMAND_INSTR(0xCD, 0x1B), 260 - ILI9881C_COMMAND_INSTR(0xCE, 0x51), 261 - ILI9881C_COMMAND_INSTR(0xCF, 0x22), 262 - ILI9881C_COMMAND_INSTR(0xD0, 0x2b), 263 - ILI9881C_COMMAND_INSTR(0xD1, 0x4b), 264 - ILI9881C_COMMAND_INSTR(0xD2, 0x59), 265 - ILI9881C_COMMAND_INSTR(0xD3, 0x3F), 226 + ILI9881C_COMMAND_INSTR(0xa0, 0x2a), 227 + ILI9881C_COMMAND_INSTR(0xa1, 0x39), 228 + ILI9881C_COMMAND_INSTR(0xa2, 0x46), 229 + ILI9881C_COMMAND_INSTR(0xa3, 0x0e), 230 + ILI9881C_COMMAND_INSTR(0xa4, 0x12), 231 + ILI9881C_COMMAND_INSTR(0xa5, 0x25), 232 + ILI9881C_COMMAND_INSTR(0xa6, 0x19), 233 + ILI9881C_COMMAND_INSTR(0xa7, 0x1d), 234 + ILI9881C_COMMAND_INSTR(0xa8, 0xa6), 235 + ILI9881C_COMMAND_INSTR(0xa9, 0x1c), 236 + ILI9881C_COMMAND_INSTR(0xaa, 0x29), 237 + ILI9881C_COMMAND_INSTR(0xab, 0x85), 238 + ILI9881C_COMMAND_INSTR(0xac, 0x1c), 239 + ILI9881C_COMMAND_INSTR(0xad, 0x1b), 240 + ILI9881C_COMMAND_INSTR(0xae, 0x51), 241 + ILI9881C_COMMAND_INSTR(0xaf, 0x22), 242 + ILI9881C_COMMAND_INSTR(0xb0, 0x2d), 243 + ILI9881C_COMMAND_INSTR(0xb1, 0x4f), 244 + ILI9881C_COMMAND_INSTR(0xb2, 0x59), 245 + ILI9881C_COMMAND_INSTR(0xb3, 0x3f), 246 + ILI9881C_COMMAND_INSTR(0xc0, 0x2a), 247 + ILI9881C_COMMAND_INSTR(0xc1, 0x3a), 248 + ILI9881C_COMMAND_INSTR(0xc2, 0x45), 249 + ILI9881C_COMMAND_INSTR(0xc3, 0x0e), 250 + ILI9881C_COMMAND_INSTR(0xc4, 0x11), 251 + ILI9881C_COMMAND_INSTR(0xc5, 0x24), 252 + ILI9881C_COMMAND_INSTR(0xc6, 0x1a), 253 + ILI9881C_COMMAND_INSTR(0xc7, 0x1c), 254 + ILI9881C_COMMAND_INSTR(0xc8, 0xaa), 255 + ILI9881C_COMMAND_INSTR(0xc9, 0x1c), 256 + ILI9881C_COMMAND_INSTR(0xca, 0x29), 257 + ILI9881C_COMMAND_INSTR(0xcb, 0x96), 258 + ILI9881C_COMMAND_INSTR(0xcc, 0x1c), 259 + ILI9881C_COMMAND_INSTR(0xcd, 0x1b), 260 + ILI9881C_COMMAND_INSTR(0xce, 0x51), 261 + ILI9881C_COMMAND_INSTR(0xcf, 0x22), 262 + ILI9881C_COMMAND_INSTR(0xd0, 0x2b), 263 + ILI9881C_COMMAND_INSTR(0xd1, 0x4b), 264 + ILI9881C_COMMAND_INSTR(0xd2, 0x59), 265 + ILI9881C_COMMAND_INSTR(0xd3, 0x3f), 266 266 }; 267 267 268 268 static const struct ili9881c_instr k101_im2byl02_init[] = { ··· 276 276 ILI9881C_COMMAND_INSTR(0x07, 0x00), 277 277 ILI9881C_COMMAND_INSTR(0x08, 0x00), 278 278 ILI9881C_COMMAND_INSTR(0x09, 0x00), 279 - ILI9881C_COMMAND_INSTR(0x0A, 0x01), 280 - ILI9881C_COMMAND_INSTR(0x0B, 0x01), 281 - ILI9881C_COMMAND_INSTR(0x0C, 0x00), 282 - ILI9881C_COMMAND_INSTR(0x0D, 0x01), 283 - ILI9881C_COMMAND_INSTR(0x0E, 0x01), 284 - ILI9881C_COMMAND_INSTR(0x0F, 0x00), 279 + ILI9881C_COMMAND_INSTR(0x0a, 0x01), 280 + ILI9881C_COMMAND_INSTR(0x0b, 0x01), 281 + ILI9881C_COMMAND_INSTR(0x0c, 0x00), 282 + ILI9881C_COMMAND_INSTR(0x0d, 0x01), 283 + ILI9881C_COMMAND_INSTR(0x0e, 0x01), 284 + ILI9881C_COMMAND_INSTR(0x0f, 0x00), 285 285 ILI9881C_COMMAND_INSTR(0x10, 0x00), 286 286 ILI9881C_COMMAND_INSTR(0x11, 0x00), 287 287 ILI9881C_COMMAND_INSTR(0x12, 0x00), ··· 292 292 ILI9881C_COMMAND_INSTR(0x17, 0x00), 293 293 ILI9881C_COMMAND_INSTR(0x18, 0x00), 294 294 ILI9881C_COMMAND_INSTR(0x19, 0x00), 295 - ILI9881C_COMMAND_INSTR(0x1A, 0x00), 296 - ILI9881C_COMMAND_INSTR(0x1B, 0x00), 297 - ILI9881C_COMMAND_INSTR(0x1C, 0x00), 298 - ILI9881C_COMMAND_INSTR(0x1D, 0x00), 299 - ILI9881C_COMMAND_INSTR(0x1E, 0x40), 300 - ILI9881C_COMMAND_INSTR(0x1F, 0xC0), 295 + ILI9881C_COMMAND_INSTR(0x1a, 0x00), 296 + ILI9881C_COMMAND_INSTR(0x1b, 0x00), 297 + ILI9881C_COMMAND_INSTR(0x1c, 0x00), 298 + ILI9881C_COMMAND_INSTR(0x1d, 0x00), 299 + ILI9881C_COMMAND_INSTR(0x1e, 0x40), 300 + ILI9881C_COMMAND_INSTR(0x1f, 0xc0), 301 301 ILI9881C_COMMAND_INSTR(0x20, 0x06), 302 302 ILI9881C_COMMAND_INSTR(0x21, 0x01), 303 303 ILI9881C_COMMAND_INSTR(0x22, 0x06), ··· 306 306 ILI9881C_COMMAND_INSTR(0x25, 0x88), 307 307 ILI9881C_COMMAND_INSTR(0x26, 0x00), 308 308 ILI9881C_COMMAND_INSTR(0x27, 0x00), 309 - ILI9881C_COMMAND_INSTR(0x28, 0x3B), 309 + ILI9881C_COMMAND_INSTR(0x28, 0x3b), 310 310 ILI9881C_COMMAND_INSTR(0x29, 0x03), 311 - ILI9881C_COMMAND_INSTR(0x2A, 0x00), 312 - ILI9881C_COMMAND_INSTR(0x2B, 0x00), 313 - ILI9881C_COMMAND_INSTR(0x2C, 0x00), 314 - ILI9881C_COMMAND_INSTR(0x2D, 0x00), 315 - ILI9881C_COMMAND_INSTR(0x2E, 0x00), 316 - ILI9881C_COMMAND_INSTR(0x2F, 0x00), 311 + ILI9881C_COMMAND_INSTR(0x2a, 0x00), 312 + ILI9881C_COMMAND_INSTR(0x2b, 0x00), 313 + ILI9881C_COMMAND_INSTR(0x2c, 0x00), 314 + ILI9881C_COMMAND_INSTR(0x2d, 0x00), 315 + ILI9881C_COMMAND_INSTR(0x2e, 0x00), 316 + ILI9881C_COMMAND_INSTR(0x2f, 0x00), 317 317 ILI9881C_COMMAND_INSTR(0x30, 0x00), 318 318 ILI9881C_COMMAND_INSTR(0x31, 0x00), 319 319 ILI9881C_COMMAND_INSTR(0x32, 0x00), ··· 324 324 ILI9881C_COMMAND_INSTR(0x37, 0x00), 325 325 ILI9881C_COMMAND_INSTR(0x38, 0x00), 326 326 ILI9881C_COMMAND_INSTR(0x39, 0x00), 327 - ILI9881C_COMMAND_INSTR(0x3A, 0x00), 328 - ILI9881C_COMMAND_INSTR(0x3B, 0x00), 329 - ILI9881C_COMMAND_INSTR(0x3C, 0x00), 330 - ILI9881C_COMMAND_INSTR(0x3D, 0x00), 331 - ILI9881C_COMMAND_INSTR(0x3E, 0x00), 332 - ILI9881C_COMMAND_INSTR(0x3F, 0x00), 327 + ILI9881C_COMMAND_INSTR(0x3a, 0x00), 328 + ILI9881C_COMMAND_INSTR(0x3b, 0x00), 329 + ILI9881C_COMMAND_INSTR(0x3c, 0x00), 330 + ILI9881C_COMMAND_INSTR(0x3d, 0x00), 331 + ILI9881C_COMMAND_INSTR(0x3e, 0x00), 332 + ILI9881C_COMMAND_INSTR(0x3f, 0x00), 333 333 ILI9881C_COMMAND_INSTR(0x40, 0x00), 334 334 ILI9881C_COMMAND_INSTR(0x41, 0x00), 335 335 ILI9881C_COMMAND_INSTR(0x42, 0x00), ··· 340 340 ILI9881C_COMMAND_INSTR(0x52, 0x45), 341 341 ILI9881C_COMMAND_INSTR(0x53, 0x67), 342 342 ILI9881C_COMMAND_INSTR(0x54, 0x89), 343 - ILI9881C_COMMAND_INSTR(0x55, 0xAB), 343 + ILI9881C_COMMAND_INSTR(0x55, 0xab), 344 344 ILI9881C_COMMAND_INSTR(0x56, 0x01), 345 345 ILI9881C_COMMAND_INSTR(0x57, 0x23), 346 346 ILI9881C_COMMAND_INSTR(0x58, 0x45), 347 347 ILI9881C_COMMAND_INSTR(0x59, 0x67), 348 - ILI9881C_COMMAND_INSTR(0x5A, 0x89), 349 - ILI9881C_COMMAND_INSTR(0x5B, 0xAB), 350 - ILI9881C_COMMAND_INSTR(0x5C, 0xCD), 351 - ILI9881C_COMMAND_INSTR(0x5D, 0xEF), 352 - ILI9881C_COMMAND_INSTR(0x5E, 0x00), 353 - ILI9881C_COMMAND_INSTR(0x5F, 0x01), 348 + ILI9881C_COMMAND_INSTR(0x5a, 0x89), 349 + ILI9881C_COMMAND_INSTR(0x5b, 0xab), 350 + ILI9881C_COMMAND_INSTR(0x5c, 0xcd), 351 + ILI9881C_COMMAND_INSTR(0x5d, 0xef), 352 + ILI9881C_COMMAND_INSTR(0x5e, 0x00), 353 + ILI9881C_COMMAND_INSTR(0x5f, 0x01), 354 354 ILI9881C_COMMAND_INSTR(0x60, 0x01), 355 355 ILI9881C_COMMAND_INSTR(0x61, 0x06), 356 356 ILI9881C_COMMAND_INSTR(0x62, 0x06), ··· 361 361 ILI9881C_COMMAND_INSTR(0x67, 0x02), 362 362 ILI9881C_COMMAND_INSTR(0x68, 0x02), 363 363 ILI9881C_COMMAND_INSTR(0x69, 0x05), 364 - ILI9881C_COMMAND_INSTR(0x6A, 0x05), 365 - ILI9881C_COMMAND_INSTR(0x6B, 0x02), 366 - ILI9881C_COMMAND_INSTR(0x6C, 0x0D), 367 - ILI9881C_COMMAND_INSTR(0x6D, 0x0D), 368 - ILI9881C_COMMAND_INSTR(0x6E, 0x0C), 369 - ILI9881C_COMMAND_INSTR(0x6F, 0x0C), 370 - ILI9881C_COMMAND_INSTR(0x70, 0x0F), 371 - ILI9881C_COMMAND_INSTR(0x71, 0x0F), 372 - ILI9881C_COMMAND_INSTR(0x72, 0x0E), 373 - ILI9881C_COMMAND_INSTR(0x73, 0x0E), 364 + ILI9881C_COMMAND_INSTR(0x6a, 0x05), 365 + ILI9881C_COMMAND_INSTR(0x6b, 0x02), 366 + ILI9881C_COMMAND_INSTR(0x6c, 0x0d), 367 + ILI9881C_COMMAND_INSTR(0x6d, 0x0d), 368 + ILI9881C_COMMAND_INSTR(0x6e, 0x0c), 369 + ILI9881C_COMMAND_INSTR(0x6f, 0x0c), 370 + ILI9881C_COMMAND_INSTR(0x70, 0x0f), 371 + ILI9881C_COMMAND_INSTR(0x71, 0x0f), 372 + ILI9881C_COMMAND_INSTR(0x72, 0x0e), 373 + ILI9881C_COMMAND_INSTR(0x73, 0x0e), 374 374 ILI9881C_COMMAND_INSTR(0x74, 0x02), 375 375 ILI9881C_COMMAND_INSTR(0x75, 0x01), 376 376 ILI9881C_COMMAND_INSTR(0x76, 0x01), 377 377 ILI9881C_COMMAND_INSTR(0x77, 0x06), 378 378 ILI9881C_COMMAND_INSTR(0x78, 0x06), 379 379 ILI9881C_COMMAND_INSTR(0x79, 0x07), 380 - ILI9881C_COMMAND_INSTR(0x7A, 0x07), 381 - ILI9881C_COMMAND_INSTR(0x7B, 0x00), 382 - ILI9881C_COMMAND_INSTR(0x7C, 0x00), 383 - ILI9881C_COMMAND_INSTR(0x7D, 0x02), 384 - ILI9881C_COMMAND_INSTR(0x7E, 0x02), 385 - ILI9881C_COMMAND_INSTR(0x7F, 0x05), 380 + ILI9881C_COMMAND_INSTR(0x7a, 0x07), 381 + ILI9881C_COMMAND_INSTR(0x7b, 0x00), 382 + ILI9881C_COMMAND_INSTR(0x7c, 0x00), 383 + ILI9881C_COMMAND_INSTR(0x7d, 0x02), 384 + ILI9881C_COMMAND_INSTR(0x7e, 0x02), 385 + ILI9881C_COMMAND_INSTR(0x7f, 0x05), 386 386 ILI9881C_COMMAND_INSTR(0x80, 0x05), 387 387 ILI9881C_COMMAND_INSTR(0x81, 0x02), 388 - ILI9881C_COMMAND_INSTR(0x82, 0x0D), 389 - ILI9881C_COMMAND_INSTR(0x83, 0x0D), 390 - ILI9881C_COMMAND_INSTR(0x84, 0x0C), 391 - ILI9881C_COMMAND_INSTR(0x85, 0x0C), 392 - ILI9881C_COMMAND_INSTR(0x86, 0x0F), 393 - ILI9881C_COMMAND_INSTR(0x87, 0x0F), 394 - ILI9881C_COMMAND_INSTR(0x88, 0x0E), 395 - ILI9881C_COMMAND_INSTR(0x89, 0x0E), 396 - ILI9881C_COMMAND_INSTR(0x8A, 0x02), 388 + ILI9881C_COMMAND_INSTR(0x82, 0x0d), 389 + ILI9881C_COMMAND_INSTR(0x83, 0x0d), 390 + ILI9881C_COMMAND_INSTR(0x84, 0x0c), 391 + ILI9881C_COMMAND_INSTR(0x85, 0x0c), 392 + ILI9881C_COMMAND_INSTR(0x86, 0x0f), 393 + ILI9881C_COMMAND_INSTR(0x87, 0x0f), 394 + ILI9881C_COMMAND_INSTR(0x88, 0x0e), 395 + ILI9881C_COMMAND_INSTR(0x89, 0x0e), 396 + ILI9881C_COMMAND_INSTR(0x8a, 0x02), 397 397 ILI9881C_SWITCH_PAGE_INSTR(4), 398 - ILI9881C_COMMAND_INSTR(0x3B, 0xC0), /* ILI4003D sel */ 399 - ILI9881C_COMMAND_INSTR(0x6C, 0x15), /* Set VCORE voltage = 1.5V */ 400 - ILI9881C_COMMAND_INSTR(0x6E, 0x2A), /* di_pwr_reg=0 for power mode 2A, VGH clamp 18V */ 401 - ILI9881C_COMMAND_INSTR(0x6F, 0x33), /* pumping ratio VGH=5x VGL=-3x */ 402 - ILI9881C_COMMAND_INSTR(0x8D, 0x1B), /* VGL clamp -10V */ 403 - ILI9881C_COMMAND_INSTR(0x87, 0xBA), /* ESD */ 404 - ILI9881C_COMMAND_INSTR(0x3A, 0x24), /* POWER SAVING */ 398 + ILI9881C_COMMAND_INSTR(0x3b, 0xc0), /* ILI4003D sel */ 399 + ILI9881C_COMMAND_INSTR(0x6c, 0x15), /* Set VCORE voltage = 1.5V */ 400 + ILI9881C_COMMAND_INSTR(0x6e, 0x2a), /* di_pwr_reg=0 for power mode 2A, VGH clamp 18V */ 401 + ILI9881C_COMMAND_INSTR(0x6f, 0x33), /* pumping ratio VGH=5x VGL=-3x */ 402 + ILI9881C_COMMAND_INSTR(0x8d, 0x1b), /* VGL clamp -10V */ 403 + ILI9881C_COMMAND_INSTR(0x87, 0xba), /* ESD */ 404 + ILI9881C_COMMAND_INSTR(0x3a, 0x24), /* POWER SAVING */ 405 405 ILI9881C_COMMAND_INSTR(0x26, 0x76), 406 - ILI9881C_COMMAND_INSTR(0xB2, 0xD1), 406 + ILI9881C_COMMAND_INSTR(0xb2, 0xd1), 407 407 ILI9881C_SWITCH_PAGE_INSTR(1), 408 - ILI9881C_COMMAND_INSTR(0x22, 0x0A), /* BGR, SS */ 408 + ILI9881C_COMMAND_INSTR(0x22, 0x0a), /* BGR, SS */ 409 409 ILI9881C_COMMAND_INSTR(0x31, 0x00), /* Zigzag type3 inversion */ 410 410 ILI9881C_COMMAND_INSTR(0x40, 0x53), /* ILI4003D sel */ 411 411 ILI9881C_COMMAND_INSTR(0x43, 0x66), 412 - ILI9881C_COMMAND_INSTR(0x53, 0x4C), 412 + ILI9881C_COMMAND_INSTR(0x53, 0x4c), 413 413 ILI9881C_COMMAND_INSTR(0x50, 0x87), 414 414 ILI9881C_COMMAND_INSTR(0x51, 0x82), 415 415 ILI9881C_COMMAND_INSTR(0x60, 0x15), 416 416 ILI9881C_COMMAND_INSTR(0x61, 0x01), 417 - ILI9881C_COMMAND_INSTR(0x62, 0x0C), 417 + ILI9881C_COMMAND_INSTR(0x62, 0x0c), 418 418 ILI9881C_COMMAND_INSTR(0x63, 0x00), 419 - ILI9881C_COMMAND_INSTR(0xA0, 0x00), 420 - ILI9881C_COMMAND_INSTR(0xA1, 0x13), /* VP251 */ 421 - ILI9881C_COMMAND_INSTR(0xA2, 0x23), /* VP247 */ 422 - ILI9881C_COMMAND_INSTR(0xA3, 0x14), /* VP243 */ 423 - ILI9881C_COMMAND_INSTR(0xA4, 0x16), /* VP239 */ 424 - ILI9881C_COMMAND_INSTR(0xA5, 0x29), /* VP231 */ 425 - ILI9881C_COMMAND_INSTR(0xA6, 0x1E), /* VP219 */ 426 - ILI9881C_COMMAND_INSTR(0xA7, 0x1D), /* VP203 */ 427 - ILI9881C_COMMAND_INSTR(0xA8, 0x86), /* VP175 */ 428 - ILI9881C_COMMAND_INSTR(0xA9, 0x1E), /* VP144 */ 429 - ILI9881C_COMMAND_INSTR(0xAA, 0x29), /* VP111 */ 430 - ILI9881C_COMMAND_INSTR(0xAB, 0x74), /* VP80 */ 431 - ILI9881C_COMMAND_INSTR(0xAC, 0x19), /* VP52 */ 432 - ILI9881C_COMMAND_INSTR(0xAD, 0x17), /* VP36 */ 433 - ILI9881C_COMMAND_INSTR(0xAE, 0x4B), /* VP24 */ 434 - ILI9881C_COMMAND_INSTR(0xAF, 0x20), /* VP16 */ 435 - ILI9881C_COMMAND_INSTR(0xB0, 0x26), /* VP12 */ 436 - ILI9881C_COMMAND_INSTR(0xB1, 0x4C), /* VP8 */ 437 - ILI9881C_COMMAND_INSTR(0xB2, 0x5D), /* VP4 */ 438 - ILI9881C_COMMAND_INSTR(0xB3, 0x3F), /* VP0 */ 439 - ILI9881C_COMMAND_INSTR(0xC0, 0x00), /* VN255 GAMMA N */ 440 - ILI9881C_COMMAND_INSTR(0xC1, 0x13), /* VN251 */ 441 - ILI9881C_COMMAND_INSTR(0xC2, 0x23), /* VN247 */ 442 - ILI9881C_COMMAND_INSTR(0xC3, 0x14), /* VN243 */ 443 - ILI9881C_COMMAND_INSTR(0xC4, 0x16), /* VN239 */ 444 - ILI9881C_COMMAND_INSTR(0xC5, 0x29), /* VN231 */ 445 - ILI9881C_COMMAND_INSTR(0xC6, 0x1E), /* VN219 */ 446 - ILI9881C_COMMAND_INSTR(0xC7, 0x1D), /* VN203 */ 447 - ILI9881C_COMMAND_INSTR(0xC8, 0x86), /* VN175 */ 448 - ILI9881C_COMMAND_INSTR(0xC9, 0x1E), /* VN144 */ 449 - ILI9881C_COMMAND_INSTR(0xCA, 0x29), /* VN111 */ 450 - ILI9881C_COMMAND_INSTR(0xCB, 0x74), /* VN80 */ 451 - ILI9881C_COMMAND_INSTR(0xCC, 0x19), /* VN52 */ 452 - ILI9881C_COMMAND_INSTR(0xCD, 0x17), /* VN36 */ 453 - ILI9881C_COMMAND_INSTR(0xCE, 0x4B), /* VN24 */ 454 - ILI9881C_COMMAND_INSTR(0xCF, 0x20), /* VN16 */ 455 - ILI9881C_COMMAND_INSTR(0xD0, 0x26), /* VN12 */ 456 - ILI9881C_COMMAND_INSTR(0xD1, 0x4C), /* VN8 */ 457 - ILI9881C_COMMAND_INSTR(0xD2, 0x5D), /* VN4 */ 458 - ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */ 419 + ILI9881C_COMMAND_INSTR(0xa0, 0x00), 420 + ILI9881C_COMMAND_INSTR(0xa1, 0x13), /* VP251 */ 421 + ILI9881C_COMMAND_INSTR(0xa2, 0x23), /* VP247 */ 422 + ILI9881C_COMMAND_INSTR(0xa3, 0x14), /* VP243 */ 423 + ILI9881C_COMMAND_INSTR(0xa4, 0x16), /* VP239 */ 424 + ILI9881C_COMMAND_INSTR(0xa5, 0x29), /* VP231 */ 425 + ILI9881C_COMMAND_INSTR(0xa6, 0x1e), /* VP219 */ 426 + ILI9881C_COMMAND_INSTR(0xa7, 0x1d), /* VP203 */ 427 + ILI9881C_COMMAND_INSTR(0xa8, 0x86), /* VP175 */ 428 + ILI9881C_COMMAND_INSTR(0xa9, 0x1e), /* VP144 */ 429 + ILI9881C_COMMAND_INSTR(0xaa, 0x29), /* VP111 */ 430 + ILI9881C_COMMAND_INSTR(0xab, 0x74), /* VP80 */ 431 + ILI9881C_COMMAND_INSTR(0xac, 0x19), /* VP52 */ 432 + ILI9881C_COMMAND_INSTR(0xad, 0x17), /* VP36 */ 433 + ILI9881C_COMMAND_INSTR(0xae, 0x4b), /* VP24 */ 434 + ILI9881C_COMMAND_INSTR(0xaf, 0x20), /* VP16 */ 435 + ILI9881C_COMMAND_INSTR(0xb0, 0x26), /* VP12 */ 436 + ILI9881C_COMMAND_INSTR(0xb1, 0x4c), /* VP8 */ 437 + ILI9881C_COMMAND_INSTR(0xb2, 0x5d), /* VP4 */ 438 + ILI9881C_COMMAND_INSTR(0xb3, 0x3f), /* VP0 */ 439 + ILI9881C_COMMAND_INSTR(0xc0, 0x00), /* VN255 GAMMA N */ 440 + ILI9881C_COMMAND_INSTR(0xc1, 0x13), /* VN251 */ 441 + ILI9881C_COMMAND_INSTR(0xc2, 0x23), /* VN247 */ 442 + ILI9881C_COMMAND_INSTR(0xc3, 0x14), /* VN243 */ 443 + ILI9881C_COMMAND_INSTR(0xc4, 0x16), /* VN239 */ 444 + ILI9881C_COMMAND_INSTR(0xc5, 0x29), /* VN231 */ 445 + ILI9881C_COMMAND_INSTR(0xc6, 0x1e), /* VN219 */ 446 + ILI9881C_COMMAND_INSTR(0xc7, 0x1d), /* VN203 */ 447 + ILI9881C_COMMAND_INSTR(0xc8, 0x86), /* VN175 */ 448 + ILI9881C_COMMAND_INSTR(0xc9, 0x1e), /* VN144 */ 449 + ILI9881C_COMMAND_INSTR(0xca, 0x29), /* VN111 */ 450 + ILI9881C_COMMAND_INSTR(0xcb, 0x74), /* VN80 */ 451 + ILI9881C_COMMAND_INSTR(0xcc, 0x19), /* VN52 */ 452 + ILI9881C_COMMAND_INSTR(0xcd, 0x17), /* VN36 */ 453 + ILI9881C_COMMAND_INSTR(0xce, 0x4b), /* VN24 */ 454 + ILI9881C_COMMAND_INSTR(0xcf, 0x20), /* VN16 */ 455 + ILI9881C_COMMAND_INSTR(0xd0, 0x26), /* VN12 */ 456 + ILI9881C_COMMAND_INSTR(0xd1, 0x4c), /* VN8 */ 457 + ILI9881C_COMMAND_INSTR(0xd2, 0x5d), /* VN4 */ 458 + ILI9881C_COMMAND_INSTR(0xd3, 0x3f), /* VN0 */ 459 459 }; 460 460 461 461 static const struct ili9881c_instr kd050hdfia020_init[] = { ··· 517 517 ILI9881C_COMMAND_INSTR(0x35, 0x00), 518 518 ILI9881C_COMMAND_INSTR(0x36, 0x00), 519 519 ILI9881C_COMMAND_INSTR(0x37, 0x00), 520 - ILI9881C_COMMAND_INSTR(0x38, 0x3C), 520 + ILI9881C_COMMAND_INSTR(0x38, 0x3c), 521 521 ILI9881C_COMMAND_INSTR(0x39, 0x00), 522 522 ILI9881C_COMMAND_INSTR(0x3a, 0x40), 523 523 ILI9881C_COMMAND_INSTR(0x3b, 0x40), ··· 549 549 ILI9881C_COMMAND_INSTR(0x60, 0x00), 550 550 ILI9881C_COMMAND_INSTR(0x61, 0x15), 551 551 ILI9881C_COMMAND_INSTR(0x62, 0x14), 552 - ILI9881C_COMMAND_INSTR(0x63, 0x0E), 553 - ILI9881C_COMMAND_INSTR(0x64, 0x0F), 554 - ILI9881C_COMMAND_INSTR(0x65, 0x0C), 555 - ILI9881C_COMMAND_INSTR(0x66, 0x0D), 552 + ILI9881C_COMMAND_INSTR(0x63, 0x0e), 553 + ILI9881C_COMMAND_INSTR(0x64, 0x0f), 554 + ILI9881C_COMMAND_INSTR(0x65, 0x0c), 555 + ILI9881C_COMMAND_INSTR(0x66, 0x0d), 556 556 ILI9881C_COMMAND_INSTR(0x67, 0x06), 557 557 ILI9881C_COMMAND_INSTR(0x68, 0x02), 558 558 ILI9881C_COMMAND_INSTR(0x69, 0x07), ··· 571 571 ILI9881C_COMMAND_INSTR(0x76, 0x00), 572 572 ILI9881C_COMMAND_INSTR(0x77, 0x14), 573 573 ILI9881C_COMMAND_INSTR(0x78, 0x15), 574 - ILI9881C_COMMAND_INSTR(0x79, 0x0E), 575 - ILI9881C_COMMAND_INSTR(0x7a, 0x0F), 576 - ILI9881C_COMMAND_INSTR(0x7b, 0x0C), 577 - ILI9881C_COMMAND_INSTR(0x7c, 0x0D), 574 + ILI9881C_COMMAND_INSTR(0x79, 0x0e), 575 + ILI9881C_COMMAND_INSTR(0x7a, 0x0f), 576 + ILI9881C_COMMAND_INSTR(0x7b, 0x0c), 577 + ILI9881C_COMMAND_INSTR(0x7c, 0x0d), 578 578 ILI9881C_COMMAND_INSTR(0x7d, 0x06), 579 579 ILI9881C_COMMAND_INSTR(0x7e, 0x02), 580 580 ILI9881C_COMMAND_INSTR(0x7f, 0x07), ··· 587 587 ILI9881C_COMMAND_INSTR(0x87, 0x02), 588 588 ILI9881C_COMMAND_INSTR(0x88, 0x02), 589 589 ILI9881C_COMMAND_INSTR(0x89, 0x02), 590 - ILI9881C_COMMAND_INSTR(0x8A, 0x02), 590 + ILI9881C_COMMAND_INSTR(0x8a, 0x02), 591 591 ILI9881C_SWITCH_PAGE_INSTR(0x4), 592 - ILI9881C_COMMAND_INSTR(0x6C, 0x15), 593 - ILI9881C_COMMAND_INSTR(0x6E, 0x2A), 594 - ILI9881C_COMMAND_INSTR(0x6F, 0x33), 595 - ILI9881C_COMMAND_INSTR(0x3A, 0x94), 596 - ILI9881C_COMMAND_INSTR(0x8D, 0x15), 597 - ILI9881C_COMMAND_INSTR(0x87, 0xBA), 592 + ILI9881C_COMMAND_INSTR(0x6c, 0x15), 593 + ILI9881C_COMMAND_INSTR(0x6e, 0x2a), 594 + ILI9881C_COMMAND_INSTR(0x6f, 0x33), 595 + ILI9881C_COMMAND_INSTR(0x3a, 0x94), 596 + ILI9881C_COMMAND_INSTR(0x8d, 0x15), 597 + ILI9881C_COMMAND_INSTR(0x87, 0xba), 598 598 ILI9881C_COMMAND_INSTR(0x26, 0x76), 599 - ILI9881C_COMMAND_INSTR(0xB2, 0xD1), 600 - ILI9881C_COMMAND_INSTR(0xB5, 0x06), 599 + ILI9881C_COMMAND_INSTR(0xb2, 0xd1), 600 + ILI9881C_COMMAND_INSTR(0xb5, 0x06), 601 601 ILI9881C_SWITCH_PAGE_INSTR(0x1), 602 - ILI9881C_COMMAND_INSTR(0x22, 0x0A), 602 + ILI9881C_COMMAND_INSTR(0x22, 0x0a), 603 603 ILI9881C_COMMAND_INSTR(0x31, 0x00), 604 604 ILI9881C_COMMAND_INSTR(0x53, 0x90), 605 - ILI9881C_COMMAND_INSTR(0x55, 0xA2), 606 - ILI9881C_COMMAND_INSTR(0x50, 0xB7), 607 - ILI9881C_COMMAND_INSTR(0x51, 0xB7), 605 + ILI9881C_COMMAND_INSTR(0x55, 0xa2), 606 + ILI9881C_COMMAND_INSTR(0x50, 0xb7), 607 + ILI9881C_COMMAND_INSTR(0x51, 0xb7), 608 608 ILI9881C_COMMAND_INSTR(0x60, 0x22), 609 609 ILI9881C_COMMAND_INSTR(0x61, 0x00), 610 610 ILI9881C_COMMAND_INSTR(0x62, 0x19), 611 611 ILI9881C_COMMAND_INSTR(0x63, 0x10), 612 - ILI9881C_COMMAND_INSTR(0xA0, 0x08), 613 - ILI9881C_COMMAND_INSTR(0xA1, 0x1A), 614 - ILI9881C_COMMAND_INSTR(0xA2, 0x27), 615 - ILI9881C_COMMAND_INSTR(0xA3, 0x15), 616 - ILI9881C_COMMAND_INSTR(0xA4, 0x17), 617 - ILI9881C_COMMAND_INSTR(0xA5, 0x2A), 618 - ILI9881C_COMMAND_INSTR(0xA6, 0x1E), 619 - ILI9881C_COMMAND_INSTR(0xA7, 0x1F), 620 - ILI9881C_COMMAND_INSTR(0xA8, 0x8B), 621 - ILI9881C_COMMAND_INSTR(0xA9, 0x1B), 622 - ILI9881C_COMMAND_INSTR(0xAA, 0x27), 623 - ILI9881C_COMMAND_INSTR(0xAB, 0x78), 624 - ILI9881C_COMMAND_INSTR(0xAC, 0x18), 625 - ILI9881C_COMMAND_INSTR(0xAD, 0x18), 626 - ILI9881C_COMMAND_INSTR(0xAE, 0x4C), 627 - ILI9881C_COMMAND_INSTR(0xAF, 0x21), 628 - ILI9881C_COMMAND_INSTR(0xB0, 0x27), 629 - ILI9881C_COMMAND_INSTR(0xB1, 0x54), 630 - ILI9881C_COMMAND_INSTR(0xB2, 0x67), 631 - ILI9881C_COMMAND_INSTR(0xB3, 0x39), 632 - ILI9881C_COMMAND_INSTR(0xC0, 0x08), 633 - ILI9881C_COMMAND_INSTR(0xC1, 0x1A), 634 - ILI9881C_COMMAND_INSTR(0xC2, 0x27), 635 - ILI9881C_COMMAND_INSTR(0xC3, 0x15), 636 - ILI9881C_COMMAND_INSTR(0xC4, 0x17), 637 - ILI9881C_COMMAND_INSTR(0xC5, 0x2A), 638 - ILI9881C_COMMAND_INSTR(0xC6, 0x1E), 639 - ILI9881C_COMMAND_INSTR(0xC7, 0x1F), 640 - ILI9881C_COMMAND_INSTR(0xC8, 0x8B), 641 - ILI9881C_COMMAND_INSTR(0xC9, 0x1B), 642 - ILI9881C_COMMAND_INSTR(0xCA, 0x27), 643 - ILI9881C_COMMAND_INSTR(0xCB, 0x78), 644 - ILI9881C_COMMAND_INSTR(0xCC, 0x18), 645 - ILI9881C_COMMAND_INSTR(0xCD, 0x18), 646 - ILI9881C_COMMAND_INSTR(0xCE, 0x4C), 647 - ILI9881C_COMMAND_INSTR(0xCF, 0x21), 648 - ILI9881C_COMMAND_INSTR(0xD0, 0x27), 649 - ILI9881C_COMMAND_INSTR(0xD1, 0x54), 650 - ILI9881C_COMMAND_INSTR(0xD2, 0x67), 651 - ILI9881C_COMMAND_INSTR(0xD3, 0x39), 612 + ILI9881C_COMMAND_INSTR(0xa0, 0x08), 613 + ILI9881C_COMMAND_INSTR(0xa1, 0x1a), 614 + ILI9881C_COMMAND_INSTR(0xa2, 0x27), 615 + ILI9881C_COMMAND_INSTR(0xa3, 0x15), 616 + ILI9881C_COMMAND_INSTR(0xa4, 0x17), 617 + ILI9881C_COMMAND_INSTR(0xa5, 0x2a), 618 + ILI9881C_COMMAND_INSTR(0xa6, 0x1e), 619 + ILI9881C_COMMAND_INSTR(0xa7, 0x1f), 620 + ILI9881C_COMMAND_INSTR(0xa8, 0x8b), 621 + ILI9881C_COMMAND_INSTR(0xa9, 0x1b), 622 + ILI9881C_COMMAND_INSTR(0xaa, 0x27), 623 + ILI9881C_COMMAND_INSTR(0xab, 0x78), 624 + ILI9881C_COMMAND_INSTR(0xac, 0x18), 625 + ILI9881C_COMMAND_INSTR(0xad, 0x18), 626 + ILI9881C_COMMAND_INSTR(0xae, 0x4c), 627 + ILI9881C_COMMAND_INSTR(0xaf, 0x21), 628 + ILI9881C_COMMAND_INSTR(0xb0, 0x27), 629 + ILI9881C_COMMAND_INSTR(0xb1, 0x54), 630 + ILI9881C_COMMAND_INSTR(0xb2, 0x67), 631 + ILI9881C_COMMAND_INSTR(0xb3, 0x39), 632 + ILI9881C_COMMAND_INSTR(0xc0, 0x08), 633 + ILI9881C_COMMAND_INSTR(0xc1, 0x1a), 634 + ILI9881C_COMMAND_INSTR(0xc2, 0x27), 635 + ILI9881C_COMMAND_INSTR(0xc3, 0x15), 636 + ILI9881C_COMMAND_INSTR(0xc4, 0x17), 637 + ILI9881C_COMMAND_INSTR(0xc5, 0x2a), 638 + ILI9881C_COMMAND_INSTR(0xc6, 0x1e), 639 + ILI9881C_COMMAND_INSTR(0xc7, 0x1f), 640 + ILI9881C_COMMAND_INSTR(0xc8, 0x8b), 641 + ILI9881C_COMMAND_INSTR(0xc9, 0x1b), 642 + ILI9881C_COMMAND_INSTR(0xca, 0x27), 643 + ILI9881C_COMMAND_INSTR(0xcb, 0x78), 644 + ILI9881C_COMMAND_INSTR(0xcc, 0x18), 645 + ILI9881C_COMMAND_INSTR(0xcd, 0x18), 646 + ILI9881C_COMMAND_INSTR(0xce, 0x4c), 647 + ILI9881C_COMMAND_INSTR(0xcf, 0x21), 648 + ILI9881C_COMMAND_INSTR(0xd0, 0x27), 649 + ILI9881C_COMMAND_INSTR(0xd1, 0x54), 650 + ILI9881C_COMMAND_INSTR(0xd2, 0x67), 651 + ILI9881C_COMMAND_INSTR(0xd3, 0x39), 652 652 ILI9881C_SWITCH_PAGE_INSTR(0), 653 653 ILI9881C_COMMAND_INSTR(0x35, 0x00), 654 - ILI9881C_COMMAND_INSTR(0x3A, 0x7), 654 + ILI9881C_COMMAND_INSTR(0x3a, 0x7), 655 655 }; 656 656 657 657 static const struct ili9881c_instr tl050hdv35_init[] = { ··· 696 696 ILI9881C_COMMAND_INSTR(0x35, 0x00), 697 697 ILI9881C_COMMAND_INSTR(0x36, 0x00), 698 698 ILI9881C_COMMAND_INSTR(0x37, 0x00), 699 - ILI9881C_COMMAND_INSTR(0x38, 0x3C), 699 + ILI9881C_COMMAND_INSTR(0x38, 0x3c), 700 700 ILI9881C_COMMAND_INSTR(0x39, 0x00), 701 701 ILI9881C_COMMAND_INSTR(0x3a, 0x40), 702 702 ILI9881C_COMMAND_INSTR(0x3b, 0x40), ··· 750 750 ILI9881C_COMMAND_INSTR(0x7f, 0x07), 751 751 ILI9881C_COMMAND_INSTR(0x88, 0x02), 752 752 ILI9881C_COMMAND_INSTR(0x89, 0x02), 753 - ILI9881C_COMMAND_INSTR(0x8A, 0x02), 753 + ILI9881C_COMMAND_INSTR(0x8a, 0x02), 754 754 ILI9881C_SWITCH_PAGE_INSTR(4), 755 755 ILI9881C_COMMAND_INSTR(0x38, 0x01), 756 756 ILI9881C_COMMAND_INSTR(0x39, 0x00), ··· 831 831 ILI9881C_COMMAND_INSTR(0x07, 0x02), 832 832 ILI9881C_COMMAND_INSTR(0x08, 0x02), 833 833 ILI9881C_COMMAND_INSTR(0x09, 0x00), 834 - ILI9881C_COMMAND_INSTR(0x0A, 0x00), 835 - ILI9881C_COMMAND_INSTR(0x0B, 0x00), 836 - ILI9881C_COMMAND_INSTR(0x0C, 0x00), 837 - ILI9881C_COMMAND_INSTR(0x0D, 0x00), 838 - ILI9881C_COMMAND_INSTR(0x0E, 0x00), 839 - ILI9881C_COMMAND_INSTR(0x0F, 0x00), 834 + ILI9881C_COMMAND_INSTR(0x0a, 0x00), 835 + ILI9881C_COMMAND_INSTR(0x0b, 0x00), 836 + ILI9881C_COMMAND_INSTR(0x0c, 0x00), 837 + ILI9881C_COMMAND_INSTR(0x0d, 0x00), 838 + ILI9881C_COMMAND_INSTR(0x0e, 0x00), 839 + ILI9881C_COMMAND_INSTR(0x0f, 0x00), 840 840 841 841 ILI9881C_COMMAND_INSTR(0x10, 0x00), 842 842 ILI9881C_COMMAND_INSTR(0x11, 0x00), ··· 848 848 ILI9881C_COMMAND_INSTR(0x17, 0x00), 849 849 ILI9881C_COMMAND_INSTR(0x18, 0x08), 850 850 ILI9881C_COMMAND_INSTR(0x19, 0x00), 851 - ILI9881C_COMMAND_INSTR(0x1A, 0x00), 852 - ILI9881C_COMMAND_INSTR(0x1B, 0x00), 853 - ILI9881C_COMMAND_INSTR(0x1C, 0x00), 854 - ILI9881C_COMMAND_INSTR(0x1D, 0x00), 855 - ILI9881C_COMMAND_INSTR(0x1E, 0xC0), 856 - ILI9881C_COMMAND_INSTR(0x1F, 0x80), 851 + ILI9881C_COMMAND_INSTR(0x1a, 0x00), 852 + ILI9881C_COMMAND_INSTR(0x1b, 0x00), 853 + ILI9881C_COMMAND_INSTR(0x1c, 0x00), 854 + ILI9881C_COMMAND_INSTR(0x1d, 0x00), 855 + ILI9881C_COMMAND_INSTR(0x1e, 0xc0), 856 + ILI9881C_COMMAND_INSTR(0x1f, 0x80), 857 857 858 858 ILI9881C_COMMAND_INSTR(0x20, 0x02), 859 859 ILI9881C_COMMAND_INSTR(0x21, 0x09), ··· 865 865 ILI9881C_COMMAND_INSTR(0x27, 0x00), 866 866 ILI9881C_COMMAND_INSTR(0x28, 0x55), 867 867 ILI9881C_COMMAND_INSTR(0x29, 0x03), 868 - ILI9881C_COMMAND_INSTR(0x2A, 0x00), 869 - ILI9881C_COMMAND_INSTR(0x2B, 0x00), 870 - ILI9881C_COMMAND_INSTR(0x2C, 0x00), 871 - ILI9881C_COMMAND_INSTR(0x2D, 0x00), 872 - ILI9881C_COMMAND_INSTR(0x2E, 0x00), 873 - ILI9881C_COMMAND_INSTR(0x2F, 0x00), 868 + ILI9881C_COMMAND_INSTR(0x2a, 0x00), 869 + ILI9881C_COMMAND_INSTR(0x2b, 0x00), 870 + ILI9881C_COMMAND_INSTR(0x2c, 0x00), 871 + ILI9881C_COMMAND_INSTR(0x2d, 0x00), 872 + ILI9881C_COMMAND_INSTR(0x2e, 0x00), 873 + ILI9881C_COMMAND_INSTR(0x2f, 0x00), 874 874 875 875 ILI9881C_COMMAND_INSTR(0x30, 0x00), 876 876 ILI9881C_COMMAND_INSTR(0x31, 0x00), ··· 880 880 ILI9881C_COMMAND_INSTR(0x35, 0x05), 881 881 ILI9881C_COMMAND_INSTR(0x36, 0x05), 882 882 ILI9881C_COMMAND_INSTR(0x37, 0x00), 883 - ILI9881C_COMMAND_INSTR(0x38, 0x3C), 883 + ILI9881C_COMMAND_INSTR(0x38, 0x3c), 884 884 ILI9881C_COMMAND_INSTR(0x39, 0x35), 885 - ILI9881C_COMMAND_INSTR(0x3A, 0x00), 886 - ILI9881C_COMMAND_INSTR(0x3B, 0x40), 887 - ILI9881C_COMMAND_INSTR(0x3C, 0x00), 888 - ILI9881C_COMMAND_INSTR(0x3D, 0x00), 889 - ILI9881C_COMMAND_INSTR(0x3E, 0x00), 890 - ILI9881C_COMMAND_INSTR(0x3F, 0x00), 885 + ILI9881C_COMMAND_INSTR(0x3a, 0x00), 886 + ILI9881C_COMMAND_INSTR(0x3b, 0x40), 887 + ILI9881C_COMMAND_INSTR(0x3c, 0x00), 888 + ILI9881C_COMMAND_INSTR(0x3d, 0x00), 889 + ILI9881C_COMMAND_INSTR(0x3e, 0x00), 890 + ILI9881C_COMMAND_INSTR(0x3f, 0x00), 891 891 892 892 ILI9881C_COMMAND_INSTR(0x40, 0x00), 893 893 ILI9881C_COMMAND_INSTR(0x41, 0x88), 894 894 ILI9881C_COMMAND_INSTR(0x42, 0x00), 895 895 ILI9881C_COMMAND_INSTR(0x43, 0x00), 896 - ILI9881C_COMMAND_INSTR(0x44, 0x1F), 896 + ILI9881C_COMMAND_INSTR(0x44, 0x1f), 897 897 898 898 ILI9881C_COMMAND_INSTR(0x50, 0x01), 899 899 ILI9881C_COMMAND_INSTR(0x51, 0x23), 900 900 ILI9881C_COMMAND_INSTR(0x52, 0x45), 901 901 ILI9881C_COMMAND_INSTR(0x53, 0x67), 902 902 ILI9881C_COMMAND_INSTR(0x54, 0x89), 903 - ILI9881C_COMMAND_INSTR(0x55, 0xaB), 903 + ILI9881C_COMMAND_INSTR(0x55, 0xab), 904 904 ILI9881C_COMMAND_INSTR(0x56, 0x01), 905 905 ILI9881C_COMMAND_INSTR(0x57, 0x23), 906 906 ILI9881C_COMMAND_INSTR(0x58, 0x45), 907 907 ILI9881C_COMMAND_INSTR(0x59, 0x67), 908 - ILI9881C_COMMAND_INSTR(0x5A, 0x89), 909 - ILI9881C_COMMAND_INSTR(0x5B, 0xAB), 910 - ILI9881C_COMMAND_INSTR(0x5C, 0xCD), 911 - ILI9881C_COMMAND_INSTR(0x5D, 0xEF), 912 - ILI9881C_COMMAND_INSTR(0x5E, 0x03), 913 - ILI9881C_COMMAND_INSTR(0x5F, 0x14), 908 + ILI9881C_COMMAND_INSTR(0x5a, 0x89), 909 + ILI9881C_COMMAND_INSTR(0x5b, 0xab), 910 + ILI9881C_COMMAND_INSTR(0x5c, 0xcd), 911 + ILI9881C_COMMAND_INSTR(0x5d, 0xef), 912 + ILI9881C_COMMAND_INSTR(0x5e, 0x03), 913 + ILI9881C_COMMAND_INSTR(0x5f, 0x14), 914 914 915 915 ILI9881C_COMMAND_INSTR(0x60, 0x15), 916 - ILI9881C_COMMAND_INSTR(0x61, 0x0C), 917 - ILI9881C_COMMAND_INSTR(0x62, 0x0D), 918 - ILI9881C_COMMAND_INSTR(0x63, 0x0E), 919 - ILI9881C_COMMAND_INSTR(0x64, 0x0F), 916 + ILI9881C_COMMAND_INSTR(0x61, 0x0c), 917 + ILI9881C_COMMAND_INSTR(0x62, 0x0d), 918 + ILI9881C_COMMAND_INSTR(0x63, 0x0e), 919 + ILI9881C_COMMAND_INSTR(0x64, 0x0f), 920 920 ILI9881C_COMMAND_INSTR(0x65, 0x10), 921 921 ILI9881C_COMMAND_INSTR(0x66, 0x11), 922 922 ILI9881C_COMMAND_INSTR(0x67, 0x08), 923 923 ILI9881C_COMMAND_INSTR(0x68, 0x02), 924 - ILI9881C_COMMAND_INSTR(0x69, 0x0A), 925 - ILI9881C_COMMAND_INSTR(0x6A, 0x02), 926 - ILI9881C_COMMAND_INSTR(0x6B, 0x02), 927 - ILI9881C_COMMAND_INSTR(0x6C, 0x02), 928 - ILI9881C_COMMAND_INSTR(0x6D, 0x02), 929 - ILI9881C_COMMAND_INSTR(0x6E, 0x02), 930 - ILI9881C_COMMAND_INSTR(0x6F, 0x02), 924 + ILI9881C_COMMAND_INSTR(0x69, 0x0a), 925 + ILI9881C_COMMAND_INSTR(0x6a, 0x02), 926 + ILI9881C_COMMAND_INSTR(0x6b, 0x02), 927 + ILI9881C_COMMAND_INSTR(0x6c, 0x02), 928 + ILI9881C_COMMAND_INSTR(0x6d, 0x02), 929 + ILI9881C_COMMAND_INSTR(0x6e, 0x02), 930 + ILI9881C_COMMAND_INSTR(0x6f, 0x02), 931 931 932 932 ILI9881C_COMMAND_INSTR(0x70, 0x02), 933 933 ILI9881C_COMMAND_INSTR(0x71, 0x02), ··· 936 936 ILI9881C_COMMAND_INSTR(0x74, 0x02), 937 937 ILI9881C_COMMAND_INSTR(0x75, 0x14), 938 938 ILI9881C_COMMAND_INSTR(0x76, 0x15), 939 - ILI9881C_COMMAND_INSTR(0x77, 0x0F), 940 - ILI9881C_COMMAND_INSTR(0x78, 0x0E), 941 - ILI9881C_COMMAND_INSTR(0x79, 0x0D), 942 - ILI9881C_COMMAND_INSTR(0x7A, 0x0C), 943 - ILI9881C_COMMAND_INSTR(0x7B, 0x11), 944 - ILI9881C_COMMAND_INSTR(0x7C, 0x10), 945 - ILI9881C_COMMAND_INSTR(0x7D, 0x06), 946 - ILI9881C_COMMAND_INSTR(0x7E, 0x02), 947 - ILI9881C_COMMAND_INSTR(0x7F, 0x0A), 939 + ILI9881C_COMMAND_INSTR(0x77, 0x0f), 940 + ILI9881C_COMMAND_INSTR(0x78, 0x0e), 941 + ILI9881C_COMMAND_INSTR(0x79, 0x0d), 942 + ILI9881C_COMMAND_INSTR(0x7a, 0x0c), 943 + ILI9881C_COMMAND_INSTR(0x7b, 0x11), 944 + ILI9881C_COMMAND_INSTR(0x7c, 0x10), 945 + ILI9881C_COMMAND_INSTR(0x7d, 0x06), 946 + ILI9881C_COMMAND_INSTR(0x7e, 0x02), 947 + ILI9881C_COMMAND_INSTR(0x7f, 0x0a), 948 948 949 949 ILI9881C_COMMAND_INSTR(0x80, 0x02), 950 950 ILI9881C_COMMAND_INSTR(0x81, 0x02), ··· 956 956 ILI9881C_COMMAND_INSTR(0x87, 0x02), 957 957 ILI9881C_COMMAND_INSTR(0x88, 0x08), 958 958 ILI9881C_COMMAND_INSTR(0x89, 0x02), 959 - ILI9881C_COMMAND_INSTR(0x8A, 0x02), 959 + ILI9881C_COMMAND_INSTR(0x8a, 0x02), 960 960 961 961 ILI9881C_SWITCH_PAGE_INSTR(4), 962 962 ILI9881C_COMMAND_INSTR(0x00, 0x80), 963 963 ILI9881C_COMMAND_INSTR(0x70, 0x00), 964 964 ILI9881C_COMMAND_INSTR(0x71, 0x00), 965 - ILI9881C_COMMAND_INSTR(0x66, 0xFE), 965 + ILI9881C_COMMAND_INSTR(0x66, 0xfe), 966 966 ILI9881C_COMMAND_INSTR(0x82, 0x15), 967 967 ILI9881C_COMMAND_INSTR(0x84, 0x15), 968 968 ILI9881C_COMMAND_INSTR(0x85, 0x15), 969 969 ILI9881C_COMMAND_INSTR(0x3a, 0x24), 970 - ILI9881C_COMMAND_INSTR(0x32, 0xAC), 971 - ILI9881C_COMMAND_INSTR(0x8C, 0x80), 972 - ILI9881C_COMMAND_INSTR(0x3C, 0xF5), 970 + ILI9881C_COMMAND_INSTR(0x32, 0xac), 971 + ILI9881C_COMMAND_INSTR(0x8c, 0x80), 972 + ILI9881C_COMMAND_INSTR(0x3c, 0xf5), 973 973 ILI9881C_COMMAND_INSTR(0x88, 0x33), 974 974 975 975 ILI9881C_SWITCH_PAGE_INSTR(1), 976 - ILI9881C_COMMAND_INSTR(0x22, 0x0A), 976 + ILI9881C_COMMAND_INSTR(0x22, 0x0a), 977 977 ILI9881C_COMMAND_INSTR(0x31, 0x00), 978 978 ILI9881C_COMMAND_INSTR(0x53, 0x78), 979 - ILI9881C_COMMAND_INSTR(0x50, 0x5B), 980 - ILI9881C_COMMAND_INSTR(0x51, 0x5B), 979 + ILI9881C_COMMAND_INSTR(0x50, 0x5b), 980 + ILI9881C_COMMAND_INSTR(0x51, 0x5b), 981 981 ILI9881C_COMMAND_INSTR(0x60, 0x20), 982 982 ILI9881C_COMMAND_INSTR(0x61, 0x00), 983 - ILI9881C_COMMAND_INSTR(0x62, 0x0D), 983 + ILI9881C_COMMAND_INSTR(0x62, 0x0d), 984 984 ILI9881C_COMMAND_INSTR(0x63, 0x00), 985 985 986 - ILI9881C_COMMAND_INSTR(0xA0, 0x00), 987 - ILI9881C_COMMAND_INSTR(0xA1, 0x10), 988 - ILI9881C_COMMAND_INSTR(0xA2, 0x1C), 989 - ILI9881C_COMMAND_INSTR(0xA3, 0x13), 990 - ILI9881C_COMMAND_INSTR(0xA4, 0x15), 991 - ILI9881C_COMMAND_INSTR(0xA5, 0x26), 992 - ILI9881C_COMMAND_INSTR(0xA6, 0x1A), 993 - ILI9881C_COMMAND_INSTR(0xA7, 0x1D), 994 - ILI9881C_COMMAND_INSTR(0xA8, 0x67), 995 - ILI9881C_COMMAND_INSTR(0xA9, 0x1C), 996 - ILI9881C_COMMAND_INSTR(0xAA, 0x29), 997 - ILI9881C_COMMAND_INSTR(0xAB, 0x5B), 998 - ILI9881C_COMMAND_INSTR(0xAC, 0x26), 999 - ILI9881C_COMMAND_INSTR(0xAD, 0x28), 1000 - ILI9881C_COMMAND_INSTR(0xAE, 0x5C), 1001 - ILI9881C_COMMAND_INSTR(0xAF, 0x30), 1002 - ILI9881C_COMMAND_INSTR(0xB0, 0x31), 1003 - ILI9881C_COMMAND_INSTR(0xB1, 0x2E), 1004 - ILI9881C_COMMAND_INSTR(0xB2, 0x32), 1005 - ILI9881C_COMMAND_INSTR(0xB3, 0x00), 986 + ILI9881C_COMMAND_INSTR(0xa0, 0x00), 987 + ILI9881C_COMMAND_INSTR(0xa1, 0x10), 988 + ILI9881C_COMMAND_INSTR(0xa2, 0x1c), 989 + ILI9881C_COMMAND_INSTR(0xa3, 0x13), 990 + ILI9881C_COMMAND_INSTR(0xa4, 0x15), 991 + ILI9881C_COMMAND_INSTR(0xa5, 0x26), 992 + ILI9881C_COMMAND_INSTR(0xa6, 0x1a), 993 + ILI9881C_COMMAND_INSTR(0xa7, 0x1d), 994 + ILI9881C_COMMAND_INSTR(0xa8, 0x67), 995 + ILI9881C_COMMAND_INSTR(0xa9, 0x1c), 996 + ILI9881C_COMMAND_INSTR(0xaa, 0x29), 997 + ILI9881C_COMMAND_INSTR(0xab, 0x5b), 998 + ILI9881C_COMMAND_INSTR(0xac, 0x26), 999 + ILI9881C_COMMAND_INSTR(0xad, 0x28), 1000 + ILI9881C_COMMAND_INSTR(0xae, 0x5c), 1001 + ILI9881C_COMMAND_INSTR(0xaf, 0x30), 1002 + ILI9881C_COMMAND_INSTR(0xb0, 0x31), 1003 + ILI9881C_COMMAND_INSTR(0xb1, 0x2e), 1004 + ILI9881C_COMMAND_INSTR(0xb2, 0x32), 1005 + ILI9881C_COMMAND_INSTR(0xb3, 0x00), 1006 1006 1007 - ILI9881C_COMMAND_INSTR(0xC0, 0x00), 1008 - ILI9881C_COMMAND_INSTR(0xC1, 0x10), 1009 - ILI9881C_COMMAND_INSTR(0xC2, 0x1C), 1010 - ILI9881C_COMMAND_INSTR(0xC3, 0x13), 1011 - ILI9881C_COMMAND_INSTR(0xC4, 0x15), 1012 - ILI9881C_COMMAND_INSTR(0xC5, 0x26), 1013 - ILI9881C_COMMAND_INSTR(0xC6, 0x1A), 1014 - ILI9881C_COMMAND_INSTR(0xC7, 0x1D), 1015 - ILI9881C_COMMAND_INSTR(0xC8, 0x67), 1016 - ILI9881C_COMMAND_INSTR(0xC9, 0x1C), 1017 - ILI9881C_COMMAND_INSTR(0xCA, 0x29), 1018 - ILI9881C_COMMAND_INSTR(0xCB, 0x5B), 1019 - ILI9881C_COMMAND_INSTR(0xCC, 0x26), 1020 - ILI9881C_COMMAND_INSTR(0xCD, 0x28), 1021 - ILI9881C_COMMAND_INSTR(0xCE, 0x5C), 1022 - ILI9881C_COMMAND_INSTR(0xCF, 0x30), 1023 - ILI9881C_COMMAND_INSTR(0xD0, 0x31), 1024 - ILI9881C_COMMAND_INSTR(0xD1, 0x2E), 1025 - ILI9881C_COMMAND_INSTR(0xD2, 0x32), 1026 - ILI9881C_COMMAND_INSTR(0xD3, 0x00), 1007 + ILI9881C_COMMAND_INSTR(0xc0, 0x00), 1008 + ILI9881C_COMMAND_INSTR(0xc1, 0x10), 1009 + ILI9881C_COMMAND_INSTR(0xc2, 0x1c), 1010 + ILI9881C_COMMAND_INSTR(0xc3, 0x13), 1011 + ILI9881C_COMMAND_INSTR(0xc4, 0x15), 1012 + ILI9881C_COMMAND_INSTR(0xc5, 0x26), 1013 + ILI9881C_COMMAND_INSTR(0xc6, 0x1a), 1014 + ILI9881C_COMMAND_INSTR(0xc7, 0x1d), 1015 + ILI9881C_COMMAND_INSTR(0xc8, 0x67), 1016 + ILI9881C_COMMAND_INSTR(0xc9, 0x1c), 1017 + ILI9881C_COMMAND_INSTR(0xca, 0x29), 1018 + ILI9881C_COMMAND_INSTR(0xcb, 0x5b), 1019 + ILI9881C_COMMAND_INSTR(0xcc, 0x26), 1020 + ILI9881C_COMMAND_INSTR(0xcd, 0x28), 1021 + ILI9881C_COMMAND_INSTR(0xce, 0x5c), 1022 + ILI9881C_COMMAND_INSTR(0xcf, 0x30), 1023 + ILI9881C_COMMAND_INSTR(0xd0, 0x31), 1024 + ILI9881C_COMMAND_INSTR(0xd1, 0x2e), 1025 + ILI9881C_COMMAND_INSTR(0xd2, 0x32), 1026 + ILI9881C_COMMAND_INSTR(0xd3, 0x00), 1027 1027 ILI9881C_SWITCH_PAGE_INSTR(0), 1028 1028 }; 1029 1029 ··· 1032 1032 ILI9881C_COMMAND_INSTR(0x01, 0x00), 1033 1033 ILI9881C_COMMAND_INSTR(0x02, 0x00), 1034 1034 ILI9881C_COMMAND_INSTR(0x03, 0x73), 1035 - ILI9881C_COMMAND_INSTR(0x04, 0xD3), 1035 + ILI9881C_COMMAND_INSTR(0x04, 0xd3), 1036 1036 ILI9881C_COMMAND_INSTR(0x05, 0x00), 1037 - ILI9881C_COMMAND_INSTR(0x06, 0x0A), 1038 - ILI9881C_COMMAND_INSTR(0x07, 0x0E), 1037 + ILI9881C_COMMAND_INSTR(0x06, 0x0a), 1038 + ILI9881C_COMMAND_INSTR(0x07, 0x0e), 1039 1039 ILI9881C_COMMAND_INSTR(0x08, 0x00), 1040 1040 ILI9881C_COMMAND_INSTR(0x09, 0x01), 1041 1041 ILI9881C_COMMAND_INSTR(0x0a, 0x01), ··· 1117 1117 ILI9881C_COMMAND_INSTR(0x5f, 0x02), 1118 1118 ILI9881C_COMMAND_INSTR(0x60, 0x00), 1119 1119 ILI9881C_COMMAND_INSTR(0x61, 0x01), 1120 - ILI9881C_COMMAND_INSTR(0x62, 0x0D), 1121 - ILI9881C_COMMAND_INSTR(0x63, 0x0C), 1122 - ILI9881C_COMMAND_INSTR(0x64, 0x0F), 1123 - ILI9881C_COMMAND_INSTR(0x65, 0x0E), 1120 + ILI9881C_COMMAND_INSTR(0x62, 0x0d), 1121 + ILI9881C_COMMAND_INSTR(0x63, 0x0c), 1122 + ILI9881C_COMMAND_INSTR(0x64, 0x0f), 1123 + ILI9881C_COMMAND_INSTR(0x65, 0x0e), 1124 1124 ILI9881C_COMMAND_INSTR(0x66, 0x06), 1125 1125 ILI9881C_COMMAND_INSTR(0x67, 0x07), 1126 1126 ILI9881C_COMMAND_INSTR(0x68, 0x02), ··· 1139 1139 ILI9881C_COMMAND_INSTR(0x75, 0x02), 1140 1140 ILI9881C_COMMAND_INSTR(0x76, 0x00), 1141 1141 ILI9881C_COMMAND_INSTR(0x77, 0x01), 1142 - ILI9881C_COMMAND_INSTR(0x78, 0x0D), 1143 - ILI9881C_COMMAND_INSTR(0x79, 0x0C), 1144 - ILI9881C_COMMAND_INSTR(0x7a, 0x0F), 1145 - ILI9881C_COMMAND_INSTR(0x7b, 0x0E), 1142 + ILI9881C_COMMAND_INSTR(0x78, 0x0d), 1143 + ILI9881C_COMMAND_INSTR(0x79, 0x0c), 1144 + ILI9881C_COMMAND_INSTR(0x7a, 0x0f), 1145 + ILI9881C_COMMAND_INSTR(0x7b, 0x0e), 1146 1146 ILI9881C_COMMAND_INSTR(0x7c, 0x06), 1147 1147 ILI9881C_COMMAND_INSTR(0x7d, 0x07), 1148 1148 ILI9881C_COMMAND_INSTR(0x7e, 0x02), ··· 1157 1157 ILI9881C_COMMAND_INSTR(0x87, 0x02), 1158 1158 ILI9881C_COMMAND_INSTR(0x88, 0x02), 1159 1159 ILI9881C_COMMAND_INSTR(0x89, 0x02), 1160 - ILI9881C_COMMAND_INSTR(0x8A, 0x02), 1160 + ILI9881C_COMMAND_INSTR(0x8a, 0x02), 1161 1161 1162 1162 ILI9881C_SWITCH_PAGE_INSTR(4), 1163 1163 ILI9881C_COMMAND_INSTR(0x6c, 0x15), ··· 1170 1170 ILI9881C_COMMAND_INSTR(0xb2, 0xd1), 1171 1171 1172 1172 ILI9881C_SWITCH_PAGE_INSTR(1), 1173 - ILI9881C_COMMAND_INSTR(0x22, 0x0A), 1174 - ILI9881C_COMMAND_INSTR(0x31, 0x0B), 1173 + ILI9881C_COMMAND_INSTR(0x22, 0x0a), 1174 + ILI9881C_COMMAND_INSTR(0x31, 0x0b), 1175 1175 ILI9881C_COMMAND_INSTR(0x50, 0xa5), 1176 1176 ILI9881C_COMMAND_INSTR(0x51, 0xa0), 1177 1177 ILI9881C_COMMAND_INSTR(0x53, 0x70), 1178 - ILI9881C_COMMAND_INSTR(0x55, 0x7A), 1178 + ILI9881C_COMMAND_INSTR(0x55, 0x7a), 1179 1179 ILI9881C_COMMAND_INSTR(0x60, 0x14), 1180 1180 1181 - ILI9881C_COMMAND_INSTR(0xA0, 0x00), 1182 - ILI9881C_COMMAND_INSTR(0xA1, 0x53), 1183 - ILI9881C_COMMAND_INSTR(0xA2, 0x50), 1184 - ILI9881C_COMMAND_INSTR(0xA3, 0x20), 1185 - ILI9881C_COMMAND_INSTR(0xA4, 0x27), 1186 - ILI9881C_COMMAND_INSTR(0xA5, 0x33), 1187 - ILI9881C_COMMAND_INSTR(0xA6, 0x25), 1188 - ILI9881C_COMMAND_INSTR(0xA7, 0x25), 1189 - ILI9881C_COMMAND_INSTR(0xA8, 0xD4), 1190 - ILI9881C_COMMAND_INSTR(0xA9, 0x1A), 1191 - ILI9881C_COMMAND_INSTR(0xAA, 0x2B), 1192 - ILI9881C_COMMAND_INSTR(0xAB, 0xB5), 1193 - ILI9881C_COMMAND_INSTR(0xAC, 0x19), 1194 - ILI9881C_COMMAND_INSTR(0xAD, 0x18), 1195 - ILI9881C_COMMAND_INSTR(0xAE, 0x53), 1196 - ILI9881C_COMMAND_INSTR(0xAF, 0x1A), 1197 - ILI9881C_COMMAND_INSTR(0xB0, 0x25), 1198 - ILI9881C_COMMAND_INSTR(0xB1, 0x62), 1199 - ILI9881C_COMMAND_INSTR(0xB2, 0x6A), 1200 - ILI9881C_COMMAND_INSTR(0xB3, 0x31), 1181 + ILI9881C_COMMAND_INSTR(0xa0, 0x00), 1182 + ILI9881C_COMMAND_INSTR(0xa1, 0x53), 1183 + ILI9881C_COMMAND_INSTR(0xa2, 0x50), 1184 + ILI9881C_COMMAND_INSTR(0xa3, 0x20), 1185 + ILI9881C_COMMAND_INSTR(0xa4, 0x27), 1186 + ILI9881C_COMMAND_INSTR(0xa5, 0x33), 1187 + ILI9881C_COMMAND_INSTR(0xa6, 0x25), 1188 + ILI9881C_COMMAND_INSTR(0xa7, 0x25), 1189 + ILI9881C_COMMAND_INSTR(0xa8, 0xd4), 1190 + ILI9881C_COMMAND_INSTR(0xa9, 0x1a), 1191 + ILI9881C_COMMAND_INSTR(0xaa, 0x2b), 1192 + ILI9881C_COMMAND_INSTR(0xab, 0xb5), 1193 + ILI9881C_COMMAND_INSTR(0xac, 0x19), 1194 + ILI9881C_COMMAND_INSTR(0xad, 0x18), 1195 + ILI9881C_COMMAND_INSTR(0xae, 0x53), 1196 + ILI9881C_COMMAND_INSTR(0xaf, 0x1a), 1197 + ILI9881C_COMMAND_INSTR(0xb0, 0x25), 1198 + ILI9881C_COMMAND_INSTR(0xb1, 0x62), 1199 + ILI9881C_COMMAND_INSTR(0xb2, 0x6a), 1200 + ILI9881C_COMMAND_INSTR(0xb3, 0x31), 1201 1201 1202 - ILI9881C_COMMAND_INSTR(0xC0, 0x00), 1203 - ILI9881C_COMMAND_INSTR(0xC1, 0x53), 1204 - ILI9881C_COMMAND_INSTR(0xC2, 0x50), 1205 - ILI9881C_COMMAND_INSTR(0xC3, 0x20), 1206 - ILI9881C_COMMAND_INSTR(0xC4, 0x27), 1207 - ILI9881C_COMMAND_INSTR(0xC5, 0x33), 1208 - ILI9881C_COMMAND_INSTR(0xC6, 0x25), 1209 - ILI9881C_COMMAND_INSTR(0xC7, 0x25), 1210 - ILI9881C_COMMAND_INSTR(0xC8, 0xD4), 1211 - ILI9881C_COMMAND_INSTR(0xC9, 0x1A), 1212 - ILI9881C_COMMAND_INSTR(0xCA, 0x2B), 1213 - ILI9881C_COMMAND_INSTR(0xCB, 0xB5), 1214 - ILI9881C_COMMAND_INSTR(0xCC, 0x19), 1215 - ILI9881C_COMMAND_INSTR(0xCD, 0x18), 1216 - ILI9881C_COMMAND_INSTR(0xCE, 0x53), 1217 - ILI9881C_COMMAND_INSTR(0xCF, 0x1A), 1218 - ILI9881C_COMMAND_INSTR(0xD0, 0x25), 1219 - ILI9881C_COMMAND_INSTR(0xD1, 0x62), 1220 - ILI9881C_COMMAND_INSTR(0xD2, 0x6A), 1221 - ILI9881C_COMMAND_INSTR(0xD3, 0x31), 1202 + ILI9881C_COMMAND_INSTR(0xc0, 0x00), 1203 + ILI9881C_COMMAND_INSTR(0xc1, 0x53), 1204 + ILI9881C_COMMAND_INSTR(0xc2, 0x50), 1205 + ILI9881C_COMMAND_INSTR(0xc3, 0x20), 1206 + ILI9881C_COMMAND_INSTR(0xc4, 0x27), 1207 + ILI9881C_COMMAND_INSTR(0xc5, 0x33), 1208 + ILI9881C_COMMAND_INSTR(0xc6, 0x25), 1209 + ILI9881C_COMMAND_INSTR(0xc7, 0x25), 1210 + ILI9881C_COMMAND_INSTR(0xc8, 0xd4), 1211 + ILI9881C_COMMAND_INSTR(0xc9, 0x1a), 1212 + ILI9881C_COMMAND_INSTR(0xca, 0x2b), 1213 + ILI9881C_COMMAND_INSTR(0xcb, 0xb5), 1214 + ILI9881C_COMMAND_INSTR(0xcc, 0x19), 1215 + ILI9881C_COMMAND_INSTR(0xcd, 0x18), 1216 + ILI9881C_COMMAND_INSTR(0xce, 0x53), 1217 + ILI9881C_COMMAND_INSTR(0xcf, 0x1a), 1218 + ILI9881C_COMMAND_INSTR(0xd0, 0x25), 1219 + ILI9881C_COMMAND_INSTR(0xd1, 0x62), 1220 + ILI9881C_COMMAND_INSTR(0xd2, 0x6a), 1221 + ILI9881C_COMMAND_INSTR(0xd3, 0x31), 1222 1222 ILI9881C_SWITCH_PAGE_INSTR(0), 1223 1223 ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c), 1224 1224 ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00), 1225 + }; 1226 + 1227 + static const struct ili9881c_instr rpi_5inch_init[] = { 1228 + ILI9881C_SWITCH_PAGE_INSTR(3), 1229 + ILI9881C_COMMAND_INSTR(0x01, 0x00), 1230 + ILI9881C_COMMAND_INSTR(0x02, 0x00), 1231 + ILI9881C_COMMAND_INSTR(0x03, 0x73), 1232 + ILI9881C_COMMAND_INSTR(0x04, 0x73), 1233 + ILI9881C_COMMAND_INSTR(0x05, 0x00), 1234 + ILI9881C_COMMAND_INSTR(0x06, 0x06), 1235 + ILI9881C_COMMAND_INSTR(0x07, 0x02), 1236 + ILI9881C_COMMAND_INSTR(0x08, 0x00), 1237 + ILI9881C_COMMAND_INSTR(0x09, 0x01), 1238 + ILI9881C_COMMAND_INSTR(0x0a, 0x01), 1239 + ILI9881C_COMMAND_INSTR(0x0b, 0x01), 1240 + ILI9881C_COMMAND_INSTR(0x0c, 0x01), 1241 + ILI9881C_COMMAND_INSTR(0x0d, 0x01), 1242 + ILI9881C_COMMAND_INSTR(0x0e, 0x01), 1243 + ILI9881C_COMMAND_INSTR(0x0f, 0x01), 1244 + ILI9881C_COMMAND_INSTR(0x10, 0x01), 1245 + ILI9881C_COMMAND_INSTR(0x11, 0x00), 1246 + ILI9881C_COMMAND_INSTR(0x12, 0x00), 1247 + ILI9881C_COMMAND_INSTR(0x13, 0x01), 1248 + ILI9881C_COMMAND_INSTR(0x14, 0x00), 1249 + ILI9881C_COMMAND_INSTR(0x15, 0x00), 1250 + ILI9881C_COMMAND_INSTR(0x16, 0x00), 1251 + ILI9881C_COMMAND_INSTR(0x17, 0x00), 1252 + ILI9881C_COMMAND_INSTR(0x18, 0x00), 1253 + ILI9881C_COMMAND_INSTR(0x19, 0x00), 1254 + ILI9881C_COMMAND_INSTR(0x1a, 0x00), 1255 + ILI9881C_COMMAND_INSTR(0x1b, 0x00), 1256 + ILI9881C_COMMAND_INSTR(0x1c, 0x00), 1257 + ILI9881C_COMMAND_INSTR(0x1d, 0x00), 1258 + ILI9881C_COMMAND_INSTR(0x1e, 0xc0), 1259 + ILI9881C_COMMAND_INSTR(0x1f, 0x80), 1260 + ILI9881C_COMMAND_INSTR(0x20, 0x04), 1261 + ILI9881C_COMMAND_INSTR(0x21, 0x03), 1262 + ILI9881C_COMMAND_INSTR(0x22, 0x00), 1263 + ILI9881C_COMMAND_INSTR(0x23, 0x00), 1264 + ILI9881C_COMMAND_INSTR(0x24, 0x00), 1265 + ILI9881C_COMMAND_INSTR(0x25, 0x00), 1266 + ILI9881C_COMMAND_INSTR(0x26, 0x00), 1267 + ILI9881C_COMMAND_INSTR(0x27, 0x00), 1268 + ILI9881C_COMMAND_INSTR(0x28, 0x33), 1269 + ILI9881C_COMMAND_INSTR(0x29, 0x03), 1270 + ILI9881C_COMMAND_INSTR(0x2a, 0x00), 1271 + ILI9881C_COMMAND_INSTR(0x2b, 0x00), 1272 + ILI9881C_COMMAND_INSTR(0x2c, 0x00), 1273 + ILI9881C_COMMAND_INSTR(0x2d, 0x00), 1274 + ILI9881C_COMMAND_INSTR(0x2e, 0x00), 1275 + ILI9881C_COMMAND_INSTR(0x2f, 0x00), 1276 + ILI9881C_COMMAND_INSTR(0x30, 0x00), 1277 + ILI9881C_COMMAND_INSTR(0x31, 0x00), 1278 + ILI9881C_COMMAND_INSTR(0x32, 0x00), 1279 + ILI9881C_COMMAND_INSTR(0x33, 0x00), 1280 + ILI9881C_COMMAND_INSTR(0x34, 0x03), 1281 + ILI9881C_COMMAND_INSTR(0x35, 0x00), 1282 + ILI9881C_COMMAND_INSTR(0x36, 0x03), 1283 + ILI9881C_COMMAND_INSTR(0x37, 0x00), 1284 + ILI9881C_COMMAND_INSTR(0x38, 0x00), 1285 + ILI9881C_COMMAND_INSTR(0x39, 0x00), 1286 + ILI9881C_COMMAND_INSTR(0x3a, 0x00), 1287 + ILI9881C_COMMAND_INSTR(0x3b, 0x00), 1288 + ILI9881C_COMMAND_INSTR(0x3c, 0x00), 1289 + ILI9881C_COMMAND_INSTR(0x3d, 0x00), 1290 + ILI9881C_COMMAND_INSTR(0x3e, 0x00), 1291 + ILI9881C_COMMAND_INSTR(0x3f, 0x00), 1292 + ILI9881C_COMMAND_INSTR(0x40, 0x00), 1293 + ILI9881C_COMMAND_INSTR(0x41, 0x00), 1294 + ILI9881C_COMMAND_INSTR(0x42, 0x00), 1295 + ILI9881C_COMMAND_INSTR(0x43, 0x00), 1296 + ILI9881C_COMMAND_INSTR(0x44, 0x00), 1297 + ILI9881C_COMMAND_INSTR(0x50, 0x01), 1298 + ILI9881C_COMMAND_INSTR(0x51, 0x23), 1299 + ILI9881C_COMMAND_INSTR(0x52, 0x45), 1300 + ILI9881C_COMMAND_INSTR(0x53, 0x67), 1301 + ILI9881C_COMMAND_INSTR(0x54, 0x89), 1302 + ILI9881C_COMMAND_INSTR(0x55, 0xab), 1303 + ILI9881C_COMMAND_INSTR(0x56, 0x01), 1304 + ILI9881C_COMMAND_INSTR(0x57, 0x23), 1305 + ILI9881C_COMMAND_INSTR(0x58, 0x45), 1306 + ILI9881C_COMMAND_INSTR(0x59, 0x67), 1307 + ILI9881C_COMMAND_INSTR(0x5a, 0x89), 1308 + ILI9881C_COMMAND_INSTR(0x5b, 0xab), 1309 + ILI9881C_COMMAND_INSTR(0x5c, 0xcd), 1310 + ILI9881C_COMMAND_INSTR(0x5d, 0xef), 1311 + ILI9881C_COMMAND_INSTR(0x5e, 0x10), 1312 + ILI9881C_COMMAND_INSTR(0x5f, 0x09), 1313 + ILI9881C_COMMAND_INSTR(0x60, 0x08), 1314 + ILI9881C_COMMAND_INSTR(0x61, 0x0f), 1315 + ILI9881C_COMMAND_INSTR(0x62, 0x0e), 1316 + ILI9881C_COMMAND_INSTR(0x63, 0x0d), 1317 + ILI9881C_COMMAND_INSTR(0x64, 0x0c), 1318 + ILI9881C_COMMAND_INSTR(0x65, 0x02), 1319 + ILI9881C_COMMAND_INSTR(0x66, 0x02), 1320 + ILI9881C_COMMAND_INSTR(0x67, 0x02), 1321 + ILI9881C_COMMAND_INSTR(0x68, 0x02), 1322 + ILI9881C_COMMAND_INSTR(0x69, 0x02), 1323 + ILI9881C_COMMAND_INSTR(0x6a, 0x02), 1324 + ILI9881C_COMMAND_INSTR(0x6b, 0x02), 1325 + ILI9881C_COMMAND_INSTR(0x6c, 0x02), 1326 + ILI9881C_COMMAND_INSTR(0x6d, 0x02), 1327 + ILI9881C_COMMAND_INSTR(0x6e, 0x02), 1328 + ILI9881C_COMMAND_INSTR(0x6f, 0x02), 1329 + ILI9881C_COMMAND_INSTR(0x70, 0x02), 1330 + ILI9881C_COMMAND_INSTR(0x71, 0x06), 1331 + ILI9881C_COMMAND_INSTR(0x72, 0x07), 1332 + ILI9881C_COMMAND_INSTR(0x73, 0x02), 1333 + ILI9881C_COMMAND_INSTR(0x74, 0x02), 1334 + ILI9881C_COMMAND_INSTR(0x75, 0x06), 1335 + ILI9881C_COMMAND_INSTR(0x76, 0x07), 1336 + ILI9881C_COMMAND_INSTR(0x77, 0x0e), 1337 + ILI9881C_COMMAND_INSTR(0x78, 0x0f), 1338 + ILI9881C_COMMAND_INSTR(0x79, 0x0c), 1339 + ILI9881C_COMMAND_INSTR(0x7a, 0x0d), 1340 + ILI9881C_COMMAND_INSTR(0x7b, 0x02), 1341 + ILI9881C_COMMAND_INSTR(0x7c, 0x02), 1342 + ILI9881C_COMMAND_INSTR(0x7d, 0x02), 1343 + ILI9881C_COMMAND_INSTR(0x7e, 0x02), 1344 + ILI9881C_COMMAND_INSTR(0x7f, 0x02), 1345 + ILI9881C_COMMAND_INSTR(0x80, 0x02), 1346 + ILI9881C_COMMAND_INSTR(0x81, 0x02), 1347 + ILI9881C_COMMAND_INSTR(0x82, 0x02), 1348 + ILI9881C_COMMAND_INSTR(0x83, 0x02), 1349 + ILI9881C_COMMAND_INSTR(0x84, 0x02), 1350 + ILI9881C_COMMAND_INSTR(0x85, 0x02), 1351 + ILI9881C_COMMAND_INSTR(0x86, 0x02), 1352 + ILI9881C_COMMAND_INSTR(0x87, 0x09), 1353 + ILI9881C_COMMAND_INSTR(0x88, 0x08), 1354 + ILI9881C_COMMAND_INSTR(0x89, 0x02), 1355 + ILI9881C_COMMAND_INSTR(0x8a, 0x02), 1356 + ILI9881C_SWITCH_PAGE_INSTR(4), 1357 + ILI9881C_COMMAND_INSTR(0x6c, 0x15), 1358 + ILI9881C_COMMAND_INSTR(0x6e, 0x2a), 1359 + ILI9881C_COMMAND_INSTR(0x6f, 0x57), 1360 + ILI9881C_COMMAND_INSTR(0x3a, 0xa4), 1361 + ILI9881C_COMMAND_INSTR(0x8d, 0x1a), 1362 + ILI9881C_COMMAND_INSTR(0x87, 0xba), 1363 + ILI9881C_COMMAND_INSTR(0x26, 0x76), 1364 + ILI9881C_COMMAND_INSTR(0xb2, 0xd1), 1365 + ILI9881C_SWITCH_PAGE_INSTR(1), 1366 + ILI9881C_COMMAND_INSTR(0x22, 0x0a), 1367 + ILI9881C_COMMAND_INSTR(0x31, 0x00), 1368 + ILI9881C_COMMAND_INSTR(0x53, 0x35), 1369 + ILI9881C_COMMAND_INSTR(0x55, 0x50), 1370 + ILI9881C_COMMAND_INSTR(0x50, 0xaf), 1371 + ILI9881C_COMMAND_INSTR(0x51, 0xaf), 1372 + ILI9881C_COMMAND_INSTR(0x60, 0x14), 1373 + ILI9881C_COMMAND_INSTR(0xa0, 0x08), 1374 + ILI9881C_COMMAND_INSTR(0xa1, 0x1d), 1375 + ILI9881C_COMMAND_INSTR(0xa2, 0x2c), 1376 + ILI9881C_COMMAND_INSTR(0xa3, 0x14), 1377 + ILI9881C_COMMAND_INSTR(0xa4, 0x19), 1378 + ILI9881C_COMMAND_INSTR(0xa5, 0x2e), 1379 + ILI9881C_COMMAND_INSTR(0xa6, 0x22), 1380 + ILI9881C_COMMAND_INSTR(0xa7, 0x23), 1381 + ILI9881C_COMMAND_INSTR(0xa8, 0x97), 1382 + ILI9881C_COMMAND_INSTR(0xa9, 0x1e), 1383 + ILI9881C_COMMAND_INSTR(0xaa, 0x29), 1384 + ILI9881C_COMMAND_INSTR(0xab, 0x7b), 1385 + ILI9881C_COMMAND_INSTR(0xac, 0x18), 1386 + ILI9881C_COMMAND_INSTR(0xad, 0x17), 1387 + ILI9881C_COMMAND_INSTR(0xae, 0x4b), 1388 + ILI9881C_COMMAND_INSTR(0xaf, 0x1f), 1389 + ILI9881C_COMMAND_INSTR(0xb0, 0x27), 1390 + ILI9881C_COMMAND_INSTR(0xb1, 0x52), 1391 + ILI9881C_COMMAND_INSTR(0xb2, 0x63), 1392 + ILI9881C_COMMAND_INSTR(0xb3, 0x39), 1393 + ILI9881C_COMMAND_INSTR(0xc0, 0x08), 1394 + ILI9881C_COMMAND_INSTR(0xc1, 0x1d), 1395 + ILI9881C_COMMAND_INSTR(0xc2, 0x2c), 1396 + ILI9881C_COMMAND_INSTR(0xc3, 0x14), 1397 + ILI9881C_COMMAND_INSTR(0xc4, 0x19), 1398 + ILI9881C_COMMAND_INSTR(0xc5, 0x2e), 1399 + ILI9881C_COMMAND_INSTR(0xc6, 0x22), 1400 + ILI9881C_COMMAND_INSTR(0xc7, 0x23), 1401 + ILI9881C_COMMAND_INSTR(0xc8, 0x97), 1402 + ILI9881C_COMMAND_INSTR(0xc9, 0x1e), 1403 + ILI9881C_COMMAND_INSTR(0xca, 0x29), 1404 + ILI9881C_COMMAND_INSTR(0xcb, 0x7b), 1405 + ILI9881C_COMMAND_INSTR(0xcc, 0x18), 1406 + ILI9881C_COMMAND_INSTR(0xcd, 0x17), 1407 + ILI9881C_COMMAND_INSTR(0xce, 0x4b), 1408 + ILI9881C_COMMAND_INSTR(0xcf, 0x1f), 1409 + ILI9881C_COMMAND_INSTR(0xd0, 0x27), 1410 + ILI9881C_COMMAND_INSTR(0xd1, 0x52), 1411 + ILI9881C_COMMAND_INSTR(0xd2, 0x63), 1412 + ILI9881C_COMMAND_INSTR(0xd3, 0x39), 1225 1413 }; 1226 1414 1227 1415 static const struct ili9881c_instr rpi_7inch_init[] = { ··· 1540 1352 ILI9881C_COMMAND_INSTR(0x87, 0x02), 1541 1353 ILI9881C_COMMAND_INSTR(0x88, 0x02), 1542 1354 ILI9881C_COMMAND_INSTR(0x89, 0x02), 1543 - ILI9881C_COMMAND_INSTR(0x8A, 0x02), 1355 + ILI9881C_COMMAND_INSTR(0x8a, 0x02), 1544 1356 ILI9881C_SWITCH_PAGE_INSTR(4), 1545 - ILI9881C_COMMAND_INSTR(0x6C, 0x15), 1546 - ILI9881C_COMMAND_INSTR(0x6E, 0x2A), 1547 - ILI9881C_COMMAND_INSTR(0x6F, 0x33), 1548 - ILI9881C_COMMAND_INSTR(0x3B, 0x98), 1357 + ILI9881C_COMMAND_INSTR(0x6c, 0x15), 1358 + ILI9881C_COMMAND_INSTR(0x6e, 0x2a), 1359 + ILI9881C_COMMAND_INSTR(0x6f, 0x33), 1360 + ILI9881C_COMMAND_INSTR(0x3b, 0x98), 1549 1361 ILI9881C_COMMAND_INSTR(0x3a, 0x94), 1550 - ILI9881C_COMMAND_INSTR(0x8D, 0x14), 1551 - ILI9881C_COMMAND_INSTR(0x87, 0xBA), 1362 + ILI9881C_COMMAND_INSTR(0x8d, 0x14), 1363 + ILI9881C_COMMAND_INSTR(0x87, 0xba), 1552 1364 ILI9881C_COMMAND_INSTR(0x26, 0x76), 1553 - ILI9881C_COMMAND_INSTR(0xB2, 0xD1), 1554 - ILI9881C_COMMAND_INSTR(0xB5, 0x06), 1365 + ILI9881C_COMMAND_INSTR(0xb2, 0xd1), 1366 + ILI9881C_COMMAND_INSTR(0xb5, 0x06), 1555 1367 ILI9881C_COMMAND_INSTR(0x38, 0x01), 1556 1368 ILI9881C_COMMAND_INSTR(0x39, 0x00), 1557 1369 ILI9881C_SWITCH_PAGE_INSTR(1), 1558 - ILI9881C_COMMAND_INSTR(0x22, 0x0A), 1370 + ILI9881C_COMMAND_INSTR(0x22, 0x0a), 1559 1371 ILI9881C_COMMAND_INSTR(0x31, 0x00), 1560 1372 ILI9881C_COMMAND_INSTR(0x53, 0x7d), 1561 1373 ILI9881C_COMMAND_INSTR(0x55, 0x8f), ··· 1563 1375 ILI9881C_COMMAND_INSTR(0x50, 0x96), 1564 1376 ILI9881C_COMMAND_INSTR(0x51, 0x96), 1565 1377 ILI9881C_COMMAND_INSTR(0x60, 0x23), 1566 - ILI9881C_COMMAND_INSTR(0xA0, 0x08), 1567 - ILI9881C_COMMAND_INSTR(0xA1, 0x1d), 1568 - ILI9881C_COMMAND_INSTR(0xA2, 0x2a), 1569 - ILI9881C_COMMAND_INSTR(0xA3, 0x10), 1570 - ILI9881C_COMMAND_INSTR(0xA4, 0x15), 1571 - ILI9881C_COMMAND_INSTR(0xA5, 0x28), 1572 - ILI9881C_COMMAND_INSTR(0xA6, 0x1c), 1573 - ILI9881C_COMMAND_INSTR(0xA7, 0x1d), 1574 - ILI9881C_COMMAND_INSTR(0xA8, 0x7e), 1575 - ILI9881C_COMMAND_INSTR(0xA9, 0x1d), 1576 - ILI9881C_COMMAND_INSTR(0xAA, 0x29), 1577 - ILI9881C_COMMAND_INSTR(0xAB, 0x6b), 1578 - ILI9881C_COMMAND_INSTR(0xAC, 0x1a), 1579 - ILI9881C_COMMAND_INSTR(0xAD, 0x18), 1580 - ILI9881C_COMMAND_INSTR(0xAE, 0x4b), 1581 - ILI9881C_COMMAND_INSTR(0xAF, 0x20), 1582 - ILI9881C_COMMAND_INSTR(0xB0, 0x27), 1583 - ILI9881C_COMMAND_INSTR(0xB1, 0x50), 1584 - ILI9881C_COMMAND_INSTR(0xB2, 0x64), 1585 - ILI9881C_COMMAND_INSTR(0xB3, 0x39), 1586 - ILI9881C_COMMAND_INSTR(0xC0, 0x08), 1587 - ILI9881C_COMMAND_INSTR(0xC1, 0x1d), 1588 - ILI9881C_COMMAND_INSTR(0xC2, 0x2a), 1589 - ILI9881C_COMMAND_INSTR(0xC3, 0x10), 1590 - ILI9881C_COMMAND_INSTR(0xC4, 0x15), 1591 - ILI9881C_COMMAND_INSTR(0xC5, 0x28), 1592 - ILI9881C_COMMAND_INSTR(0xC6, 0x1c), 1593 - ILI9881C_COMMAND_INSTR(0xC7, 0x1d), 1594 - ILI9881C_COMMAND_INSTR(0xC8, 0x7e), 1595 - ILI9881C_COMMAND_INSTR(0xC9, 0x1d), 1596 - ILI9881C_COMMAND_INSTR(0xCA, 0x29), 1597 - ILI9881C_COMMAND_INSTR(0xCB, 0x6b), 1598 - ILI9881C_COMMAND_INSTR(0xCC, 0x1a), 1599 - ILI9881C_COMMAND_INSTR(0xCD, 0x18), 1600 - ILI9881C_COMMAND_INSTR(0xCE, 0x4b), 1601 - ILI9881C_COMMAND_INSTR(0xCF, 0x20), 1602 - ILI9881C_COMMAND_INSTR(0xD0, 0x27), 1603 - ILI9881C_COMMAND_INSTR(0xD1, 0x50), 1604 - ILI9881C_COMMAND_INSTR(0xD2, 0x64), 1605 - ILI9881C_COMMAND_INSTR(0xD3, 0x39), 1378 + ILI9881C_COMMAND_INSTR(0xa0, 0x08), 1379 + ILI9881C_COMMAND_INSTR(0xa1, 0x1d), 1380 + ILI9881C_COMMAND_INSTR(0xa2, 0x2a), 1381 + ILI9881C_COMMAND_INSTR(0xa3, 0x10), 1382 + ILI9881C_COMMAND_INSTR(0xa4, 0x15), 1383 + ILI9881C_COMMAND_INSTR(0xa5, 0x28), 1384 + ILI9881C_COMMAND_INSTR(0xa6, 0x1c), 1385 + ILI9881C_COMMAND_INSTR(0xa7, 0x1d), 1386 + ILI9881C_COMMAND_INSTR(0xa8, 0x7e), 1387 + ILI9881C_COMMAND_INSTR(0xa9, 0x1d), 1388 + ILI9881C_COMMAND_INSTR(0xaa, 0x29), 1389 + ILI9881C_COMMAND_INSTR(0xab, 0x6b), 1390 + ILI9881C_COMMAND_INSTR(0xac, 0x1a), 1391 + ILI9881C_COMMAND_INSTR(0xad, 0x18), 1392 + ILI9881C_COMMAND_INSTR(0xae, 0x4b), 1393 + ILI9881C_COMMAND_INSTR(0xaf, 0x20), 1394 + ILI9881C_COMMAND_INSTR(0xb0, 0x27), 1395 + ILI9881C_COMMAND_INSTR(0xb1, 0x50), 1396 + ILI9881C_COMMAND_INSTR(0xb2, 0x64), 1397 + ILI9881C_COMMAND_INSTR(0xb3, 0x39), 1398 + ILI9881C_COMMAND_INSTR(0xc0, 0x08), 1399 + ILI9881C_COMMAND_INSTR(0xc1, 0x1d), 1400 + ILI9881C_COMMAND_INSTR(0xc2, 0x2a), 1401 + ILI9881C_COMMAND_INSTR(0xc3, 0x10), 1402 + ILI9881C_COMMAND_INSTR(0xc4, 0x15), 1403 + ILI9881C_COMMAND_INSTR(0xc5, 0x28), 1404 + ILI9881C_COMMAND_INSTR(0xc6, 0x1c), 1405 + ILI9881C_COMMAND_INSTR(0xc7, 0x1d), 1406 + ILI9881C_COMMAND_INSTR(0xc8, 0x7e), 1407 + ILI9881C_COMMAND_INSTR(0xc9, 0x1d), 1408 + ILI9881C_COMMAND_INSTR(0xca, 0x29), 1409 + ILI9881C_COMMAND_INSTR(0xcb, 0x6b), 1410 + ILI9881C_COMMAND_INSTR(0xcc, 0x1a), 1411 + ILI9881C_COMMAND_INSTR(0xcd, 0x18), 1412 + ILI9881C_COMMAND_INSTR(0xce, 0x4b), 1413 + ILI9881C_COMMAND_INSTR(0xcf, 0x20), 1414 + ILI9881C_COMMAND_INSTR(0xd0, 0x27), 1415 + ILI9881C_COMMAND_INSTR(0xd1, 0x50), 1416 + ILI9881C_COMMAND_INSTR(0xd2, 0x64), 1417 + ILI9881C_COMMAND_INSTR(0xd3, 0x39), 1606 1418 }; 1607 1419 1608 1420 static const struct ili9881c_instr bsd1218_a101kl68_init[] = { ··· 1994 1806 .height_mm = 151, 1995 1807 }; 1996 1808 1809 + static const struct drm_display_mode rpi_5inch_default_mode = { 1810 + .clock = 83333, 1811 + 1812 + .hdisplay = 720, 1813 + .hsync_start = 720 + 110, 1814 + .hsync_end = 720 + 110 + 12, 1815 + .htotal = 720 + 110 + 12 + 95, 1816 + 1817 + .vdisplay = 1280, 1818 + .vsync_start = 1280 + 100, 1819 + .vsync_end = 1280 + 100 + 2, 1820 + .vtotal = 1280 + 100 + 2 + 100, 1821 + 1822 + .width_mm = 62, 1823 + .height_mm = 110, 1824 + }; 1825 + 1997 1826 static const struct drm_display_mode rpi_7inch_default_mode = { 1998 1827 .clock = 83330, 1999 1828 ··· 2205 2000 MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM, 2206 2001 }; 2207 2002 2003 + static const struct ili9881c_desc rpi_5inch_desc = { 2004 + .init = rpi_5inch_init, 2005 + .init_length = ARRAY_SIZE(rpi_5inch_init), 2006 + .mode = &rpi_5inch_default_mode, 2007 + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM, 2008 + .lanes = 2, 2009 + }; 2010 + 2208 2011 static const struct ili9881c_desc rpi_7inch_desc = { 2209 2012 .init = rpi_7inch_init, 2210 2013 .init_length = ARRAY_SIZE(rpi_7inch_init), ··· 2238 2025 { .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc }, 2239 2026 { .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc }, 2240 2027 { .compatible = "ampire,am8001280g", .data = &am8001280g_desc }, 2028 + { .compatible = "raspberrypi,dsi-5inch", &rpi_5inch_desc }, 2241 2029 { .compatible = "raspberrypi,dsi-7inch", &rpi_7inch_desc }, 2242 2030 { } 2243 2031 };
+225
drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2016 XiaoMi, Inc. 4 + * Copyright (c) 2024 Svyatoslav Ryhel <clamor95@gmail.com> 5 + */ 6 + 7 + #include <linux/delay.h> 8 + #include <linux/gpio/consumer.h> 9 + #include <linux/module.h> 10 + #include <linux/of.h> 11 + #include <linux/of_graph.h> 12 + #include <linux/regulator/consumer.h> 13 + 14 + #include <video/mipi_display.h> 15 + 16 + #include <drm/drm_connector.h> 17 + #include <drm/drm_crtc.h> 18 + #include <drm/drm_device.h> 19 + #include <drm/drm_mipi_dsi.h> 20 + #include <drm/drm_modes.h> 21 + #include <drm/drm_panel.h> 22 + #include <drm/drm_probe_helper.h> 23 + 24 + static const struct regulator_bulk_data sharp_supplies[] = { 25 + { .supply = "avdd" }, { .supply = "vddio" }, 26 + { .supply = "vsp" }, { .supply = "vsn" }, 27 + }; 28 + 29 + struct sharp_panel { 30 + struct drm_panel panel; 31 + struct mipi_dsi_device *dsi[2]; 32 + 33 + struct gpio_desc *reset_gpio; 34 + struct regulator_bulk_data *supplies; 35 + 36 + const struct drm_display_mode *mode; 37 + }; 38 + 39 + static inline struct sharp_panel *to_sharp_panel(struct drm_panel *panel) 40 + { 41 + return container_of(panel, struct sharp_panel, panel); 42 + } 43 + 44 + static void sharp_panel_reset(struct sharp_panel *sharp) 45 + { 46 + gpiod_set_value_cansleep(sharp->reset_gpio, 1); 47 + usleep_range(2000, 3000); 48 + gpiod_set_value_cansleep(sharp->reset_gpio, 0); 49 + usleep_range(2000, 3000); 50 + } 51 + 52 + static int sharp_panel_prepare(struct drm_panel *panel) 53 + { 54 + struct sharp_panel *sharp = to_sharp_panel(panel); 55 + struct device *dev = panel->dev; 56 + struct mipi_dsi_device *dsi0 = sharp->dsi[0]; 57 + struct mipi_dsi_device *dsi1 = sharp->dsi[1]; 58 + struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL }; 59 + int ret; 60 + 61 + ret = regulator_bulk_enable(ARRAY_SIZE(sharp_supplies), sharp->supplies); 62 + if (ret) { 63 + dev_err(dev, "error enabling regulators (%d)\n", ret); 64 + return ret; 65 + } 66 + 67 + msleep(24); 68 + 69 + if (sharp->reset_gpio) 70 + sharp_panel_reset(sharp); 71 + 72 + msleep(32); 73 + 74 + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_EXIT_SLEEP_MODE); 75 + mipi_dsi_msleep(&dsi_ctx, 120); 76 + 77 + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 78 + MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0xff); 79 + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 80 + MIPI_DCS_WRITE_POWER_SAVE, 0x01); 81 + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 82 + MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c); 83 + 84 + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_SET_DISPLAY_ON); 85 + 86 + return 0; 87 + } 88 + 89 + static int sharp_panel_unprepare(struct drm_panel *panel) 90 + { 91 + struct sharp_panel *sharp = to_sharp_panel(panel); 92 + struct mipi_dsi_device *dsi0 = sharp->dsi[0]; 93 + struct mipi_dsi_device *dsi1 = sharp->dsi[1]; 94 + struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL }; 95 + 96 + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_SET_DISPLAY_OFF); 97 + mipi_dsi_msleep(&dsi_ctx, 100); 98 + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_ENTER_SLEEP_MODE); 99 + mipi_dsi_msleep(&dsi_ctx, 150); 100 + 101 + if (sharp->reset_gpio) 102 + gpiod_set_value_cansleep(sharp->reset_gpio, 1); 103 + 104 + return regulator_bulk_disable(ARRAY_SIZE(sharp_supplies), sharp->supplies); 105 + } 106 + 107 + static const struct drm_display_mode default_mode = { 108 + .clock = (1536 + 136 + 28 + 28) * (2048 + 14 + 8 + 2) * 60 / 1000, 109 + .hdisplay = 1536, 110 + .hsync_start = 1536 + 136, 111 + .hsync_end = 1536 + 136 + 28, 112 + .htotal = 1536 + 136 + 28 + 28, 113 + .vdisplay = 2048, 114 + .vsync_start = 2048 + 14, 115 + .vsync_end = 2048 + 14 + 8, 116 + .vtotal = 2048 + 14 + 8 + 2, 117 + .width_mm = 120, 118 + .height_mm = 160, 119 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 120 + }; 121 + 122 + static int sharp_panel_get_modes(struct drm_panel *panel, 123 + struct drm_connector *connector) 124 + { 125 + return drm_connector_helper_get_modes_fixed(connector, &default_mode); 126 + } 127 + 128 + static const struct drm_panel_funcs sharp_panel_funcs = { 129 + .unprepare = sharp_panel_unprepare, 130 + .prepare = sharp_panel_prepare, 131 + .get_modes = sharp_panel_get_modes, 132 + }; 133 + 134 + static int sharp_panel_probe(struct mipi_dsi_device *dsi) 135 + { 136 + const struct mipi_dsi_device_info info = { "sharp-link1", 0, NULL }; 137 + struct device *dev = &dsi->dev; 138 + struct device_node *dsi_r; 139 + struct mipi_dsi_host *dsi_r_host; 140 + struct sharp_panel *sharp; 141 + int i, ret; 142 + 143 + sharp = devm_drm_panel_alloc(dev, struct sharp_panel, panel, 144 + &sharp_panel_funcs, DRM_MODE_CONNECTOR_DSI); 145 + if (IS_ERR(sharp)) 146 + return PTR_ERR(sharp); 147 + 148 + ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(sharp_supplies), 149 + sharp_supplies, &sharp->supplies); 150 + if (ret) 151 + return dev_err_probe(dev, ret, "failed to get supplies\n"); 152 + 153 + sharp->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); 154 + if (IS_ERR(sharp->reset_gpio)) 155 + return dev_err_probe(dev, PTR_ERR(sharp->reset_gpio), 156 + "failed to get reset GPIO\n"); 157 + 158 + /* Panel is always connected to two DSI hosts, DSI0 is left, DSI1 is right */ 159 + dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1); 160 + if (!dsi_r) 161 + return dev_err_probe(dev, -ENODEV, "failed to find second DSI host node\n"); 162 + 163 + dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r); 164 + of_node_put(dsi_r); 165 + if (!dsi_r_host) 166 + return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n"); 167 + 168 + sharp->dsi[1] = devm_mipi_dsi_device_register_full(dev, dsi_r_host, &info); 169 + if (IS_ERR(sharp->dsi[1])) 170 + return dev_err_probe(dev, PTR_ERR(sharp->dsi[1]), 171 + "second link registration failed\n"); 172 + 173 + sharp->dsi[0] = dsi; 174 + mipi_dsi_set_drvdata(dsi, sharp); 175 + 176 + ret = drm_panel_of_backlight(&sharp->panel); 177 + if (ret) 178 + return dev_err_probe(dev, ret, "Failed to get backlight\n"); 179 + 180 + drm_panel_add(&sharp->panel); 181 + 182 + for (i = 0; i < ARRAY_SIZE(sharp->dsi); i++) { 183 + if (!sharp->dsi[i]) 184 + continue; 185 + 186 + sharp->dsi[i]->lanes = 4; 187 + sharp->dsi[i]->format = MIPI_DSI_FMT_RGB888; 188 + sharp->dsi[i]->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM; 189 + 190 + ret = devm_mipi_dsi_attach(dev, sharp->dsi[i]); 191 + if (ret < 0) { 192 + drm_panel_remove(&sharp->panel); 193 + return dev_err_probe(dev, ret, "failed to attach to DSI%d\n", i); 194 + } 195 + } 196 + 197 + return 0; 198 + } 199 + 200 + static void sharp_panel_remove(struct mipi_dsi_device *dsi) 201 + { 202 + struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi); 203 + 204 + drm_panel_remove(&sharp->panel); 205 + } 206 + 207 + static const struct of_device_id sharp_of_match[] = { 208 + { .compatible = "sharp,lq079l1sx01" }, 209 + { } 210 + }; 211 + MODULE_DEVICE_TABLE(of, sharp_of_match); 212 + 213 + static struct mipi_dsi_driver sharp_panel_driver = { 214 + .driver = { 215 + .name = "panel-sharp-lq079l1sx01", 216 + .of_match_table = sharp_of_match, 217 + }, 218 + .probe = sharp_panel_probe, 219 + .remove = sharp_panel_remove, 220 + }; 221 + module_mipi_dsi_driver(sharp_panel_driver); 222 + 223 + MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>"); 224 + MODULE_DESCRIPTION("Sharp LQ079L1SX01 panel driver"); 225 + MODULE_LICENSE("GPL");
+35
drivers/gpu/drm/panel/panel-simple.c
··· 2889 2889 }, 2890 2890 }; 2891 2891 2892 + static const struct display_timing jutouch_jt101tm023_timing = { 2893 + .pixelclock = { 66300000, 72400000, 78900000 }, 2894 + .hactive = { 1280, 1280, 1280 }, 2895 + .hfront_porch = { 12, 72, 132 }, 2896 + .hback_porch = { 88, 88, 88 }, 2897 + .hsync_len = { 10, 10, 48 }, 2898 + .vactive = { 800, 800, 800 }, 2899 + .vfront_porch = { 1, 15, 49 }, 2900 + .vback_porch = { 23, 23, 23 }, 2901 + .vsync_len = { 5, 6, 13 }, 2902 + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | 2903 + DISPLAY_FLAGS_DE_HIGH, 2904 + }; 2905 + 2906 + static const struct panel_desc jutouch_jt101tm023 = { 2907 + .timings = &jutouch_jt101tm023_timing, 2908 + .num_timings = 1, 2909 + .bpc = 8, 2910 + .size = { 2911 + .width = 217, 2912 + .height = 136, 2913 + }, 2914 + .delay = { 2915 + .enable = 50, 2916 + .disable = 50, 2917 + }, 2918 + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 2919 + .bus_flags = DRM_BUS_FLAG_DE_HIGH, 2920 + .connector_type = DRM_MODE_CONNECTOR_LVDS, 2921 + }; 2922 + 2923 + 2892 2924 static const struct display_timing koe_tx14d24vm1bpa_timing = { 2893 2925 .pixelclock = { 5580000, 5850000, 6200000 }, 2894 2926 .hactive = { 320, 320, 320 }, ··· 5240 5208 }, { 5241 5209 .compatible = "innolux,zj070na-01p", 5242 5210 .data = &innolux_zj070na_01p, 5211 + }, { 5212 + .compatible = "jutouch,jt101tm023", 5213 + .data = &jutouch_jt101tm023, 5243 5214 }, { 5244 5215 .compatible = "koe,tx14d24vm1bpa", 5245 5216 .data = &koe_tx14d24vm1bpa,
+69 -2
drivers/gpu/drm/panel/panel-visionox-rm69299.c
··· 3 3 * Copyright (c) 2019, The Linux Foundation. All rights reserved. 4 4 */ 5 5 6 + #include <linux/backlight.h> 6 7 #include <linux/delay.h> 7 8 #include <linux/module.h> 8 9 #include <linux/property.h> ··· 21 20 const struct drm_display_mode *mode; 22 21 const u8 *init_seq; 23 22 unsigned int init_seq_len; 23 + int max_brightness; 24 + int initial_brightness; 24 25 }; 25 26 26 27 struct visionox_rm69299 { ··· 195 192 struct visionox_rm69299 *ctx = panel_to_ctx(panel); 196 193 struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi }; 197 194 198 - ctx->dsi->mode_flags = 0; 195 + ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; 199 196 200 197 mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); 201 198 ··· 250 247 }; 251 248 252 249 static const struct drm_display_mode visionox_rm69299_1080x2160_60hz = { 253 - .clock = 158695, 250 + .clock = (2160 + 8 + 4 + 4) * (1080 + 26 + 2 + 36) * 60 / 1000, 254 251 .hdisplay = 1080, 255 252 .hsync_start = 1080 + 26, 256 253 .hsync_end = 1080 + 26 + 2, ··· 288 285 .get_modes = visionox_rm69299_get_modes, 289 286 }; 290 287 288 + static int visionox_rm69299_bl_get_brightness(struct backlight_device *bl) 289 + { 290 + struct mipi_dsi_device *dsi = bl_get_data(bl); 291 + u16 brightness; 292 + int ret; 293 + 294 + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; 295 + 296 + ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness); 297 + if (ret < 0) 298 + return ret; 299 + 300 + dsi->mode_flags |= MIPI_DSI_MODE_LPM; 301 + 302 + return brightness; 303 + } 304 + 305 + static int visionox_rm69299_bl_update_status(struct backlight_device *bl) 306 + { 307 + struct mipi_dsi_device *dsi = bl_get_data(bl); 308 + u16 brightness = backlight_get_brightness(bl); 309 + int ret; 310 + 311 + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; 312 + 313 + ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness); 314 + if (ret < 0) 315 + return ret; 316 + 317 + dsi->mode_flags |= MIPI_DSI_MODE_LPM; 318 + 319 + return 0; 320 + } 321 + 322 + static const struct backlight_ops visionox_rm69299_bl_ops = { 323 + .update_status = visionox_rm69299_bl_update_status, 324 + .get_brightness = visionox_rm69299_bl_get_brightness, 325 + }; 326 + 327 + static struct backlight_device * 328 + visionox_rm69299_create_backlight(struct visionox_rm69299 *ctx) 329 + { 330 + struct device *dev = &ctx->dsi->dev; 331 + const struct backlight_properties props = { 332 + .type = BACKLIGHT_RAW, 333 + .brightness = ctx->desc->initial_brightness, 334 + .max_brightness = ctx->desc->max_brightness, 335 + }; 336 + 337 + if (!ctx->desc->max_brightness) 338 + return 0; 339 + 340 + return devm_backlight_device_register(dev, dev_name(dev), dev, ctx->dsi, 341 + &visionox_rm69299_bl_ops, 342 + &props); 343 + } 344 + 291 345 static int visionox_rm69299_probe(struct mipi_dsi_device *dsi) 292 346 { 293 347 struct device *dev = &dsi->dev; ··· 375 315 dev_err(dev, "cannot get reset gpio %ld\n", PTR_ERR(ctx->reset_gpio)); 376 316 return PTR_ERR(ctx->reset_gpio); 377 317 } 318 + 319 + ctx->panel.backlight = visionox_rm69299_create_backlight(ctx); 320 + if (IS_ERR(ctx->panel.backlight)) 321 + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), 322 + "Failed to create backlight\n"); 378 323 379 324 drm_panel_add(&ctx->panel); 380 325 ··· 418 353 .mode = &visionox_rm69299_1080x2160_60hz, 419 354 .init_seq = (const u8 *)visionox_rm69299_1080x2160_60hz_init_seq, 420 355 .init_seq_len = ARRAY_SIZE(visionox_rm69299_1080x2160_60hz_init_seq), 356 + .max_brightness = 255, 357 + .initial_brightness = 50, 421 358 }; 422 359 423 360 static const struct of_device_id visionox_rm69299_of_match[] = {
+9 -2
drivers/gpu/drm/panfrost/panfrost_device.h
··· 10 10 #include <linux/pm.h> 11 11 #include <linux/regulator/consumer.h> 12 12 #include <linux/spinlock.h> 13 + #include <drm/drm_auth.h> 13 14 #include <drm/drm_device.h> 14 15 #include <drm/drm_mm.h> 15 16 #include <drm/gpu_scheduler.h> 16 17 17 18 #include "panfrost_devfreq.h" 19 + #include "panfrost_job.h" 18 20 19 21 struct panfrost_device; 20 22 struct panfrost_mmu; ··· 24 22 struct panfrost_job; 25 23 struct panfrost_perfcnt; 26 24 27 - #define NUM_JOB_SLOTS 3 28 25 #define MAX_PM_DOMAINS 5 29 26 30 27 enum panfrost_drv_comp_bits { ··· 207 206 struct panfrost_file_priv { 208 207 struct panfrost_device *pfdev; 209 208 210 - struct drm_sched_entity sched_entity[NUM_JOB_SLOTS]; 209 + struct xarray jm_ctxs; 211 210 212 211 struct panfrost_mmu *mmu; 213 212 214 213 struct panfrost_engine_usage engine_usage; 215 214 }; 215 + 216 + static inline bool panfrost_high_prio_allowed(struct drm_file *file) 217 + { 218 + /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */ 219 + return (capable(CAP_SYS_NICE) || drm_is_current_master(file)); 220 + } 216 221 217 222 static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev) 218 223 {
+147 -5
drivers/gpu/drm/panfrost/panfrost_drv.c
··· 109 109 #endif 110 110 break; 111 111 112 + case DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES: 113 + param->value = BIT(PANFROST_JM_CTX_PRIORITY_LOW) | 114 + BIT(PANFROST_JM_CTX_PRIORITY_MEDIUM); 115 + 116 + if (panfrost_high_prio_allowed(file)) 117 + param->value |= BIT(PANFROST_JM_CTX_PRIORITY_HIGH); 118 + break; 119 + 112 120 default: 113 121 return -EINVAL; 114 122 } ··· 287 279 struct panfrost_file_priv *file_priv = file->driver_priv; 288 280 struct drm_panfrost_submit *args = data; 289 281 struct drm_syncobj *sync_out = NULL; 282 + struct panfrost_jm_ctx *jm_ctx; 290 283 struct panfrost_job *job; 291 284 int ret = 0, slot; 285 + 286 + if (args->pad) 287 + return -EINVAL; 292 288 293 289 if (!args->jc) 294 290 return -EINVAL; ··· 306 294 return -ENODEV; 307 295 } 308 296 297 + jm_ctx = panfrost_jm_ctx_from_handle(file, args->jm_ctx_handle); 298 + if (!jm_ctx) { 299 + ret = -EINVAL; 300 + goto out_put_syncout; 301 + } 302 + 309 303 job = kzalloc(sizeof(*job), GFP_KERNEL); 310 304 if (!job) { 311 305 ret = -ENOMEM; 312 - goto out_put_syncout; 306 + goto out_put_jm_ctx; 313 307 } 314 308 315 309 kref_init(&job->refcount); ··· 325 307 job->requirements = args->requirements; 326 308 job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); 327 309 job->mmu = file_priv->mmu; 310 + job->ctx = panfrost_jm_ctx_get(jm_ctx); 328 311 job->engine_usage = &file_priv->engine_usage; 329 312 330 313 slot = panfrost_job_get_slot(job); 331 314 332 315 ret = drm_sched_job_init(&job->base, 333 - &file_priv->sched_entity[slot], 316 + &jm_ctx->slot_entity[slot], 334 317 1, NULL, file->client_id); 335 318 if (ret) 336 319 goto out_put_job; ··· 357 338 drm_sched_job_cleanup(&job->base); 358 339 out_put_job: 359 340 panfrost_job_put(job); 341 + out_put_jm_ctx: 342 + panfrost_jm_ctx_put(jm_ctx); 360 343 out_put_syncout: 361 344 if (sync_out) 362 345 drm_syncobj_put(sync_out); ··· 557 536 return ret; 558 537 } 559 538 539 + static int panfrost_ioctl_jm_ctx_create(struct drm_device *dev, void *data, 540 + struct drm_file *file) 541 + { 542 + return panfrost_jm_ctx_create(file, data); 543 + } 544 + 545 + static int panfrost_ioctl_jm_ctx_destroy(struct drm_device *dev, void *data, 546 + struct drm_file *file) 547 + { 548 + const struct drm_panfrost_jm_ctx_destroy *args = data; 549 + 550 + if (args->pad) 551 + return -EINVAL; 552 + 553 + /* We can't destroy the default context created when the file is opened. */ 554 + if (!args->handle) 555 + return -EINVAL; 556 + 557 + return panfrost_jm_ctx_destroy(file, args->handle); 558 + } 559 + 560 560 int panfrost_unstable_ioctl_check(void) 561 561 { 562 562 if (!unstable_ioctls) ··· 606 564 goto err_free; 607 565 } 608 566 609 - ret = panfrost_job_open(panfrost_priv); 567 + ret = panfrost_job_open(file); 610 568 if (ret) 611 569 goto err_job; 612 570 ··· 625 583 struct panfrost_file_priv *panfrost_priv = file->driver_priv; 626 584 627 585 panfrost_perfcnt_close(file); 628 - panfrost_job_close(panfrost_priv); 586 + panfrost_job_close(file); 629 587 630 588 panfrost_mmu_ctx_put(panfrost_priv->mmu); 631 589 kfree(panfrost_priv); ··· 645 603 PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW), 646 604 PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW), 647 605 PANFROST_IOCTL(SET_LABEL_BO, set_label_bo, DRM_RENDER_ALLOW), 606 + PANFROST_IOCTL(JM_CTX_CREATE, jm_ctx_create, DRM_RENDER_ALLOW), 607 + PANFROST_IOCTL(JM_CTX_DESTROY, jm_ctx_destroy, DRM_RENDER_ALLOW), 648 608 }; 649 609 650 610 static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev, ··· 716 672 return 0; 717 673 } 718 674 675 + static void show_panfrost_jm_ctx(struct panfrost_jm_ctx *jm_ctx, u32 handle, 676 + struct seq_file *m) 677 + { 678 + struct drm_device *ddev = ((struct drm_info_node *)m->private)->minor->dev; 679 + const char *prio = "UNKNOWN"; 680 + 681 + static const char * const prios[] = { 682 + [DRM_SCHED_PRIORITY_HIGH] = "HIGH", 683 + [DRM_SCHED_PRIORITY_NORMAL] = "NORMAL", 684 + [DRM_SCHED_PRIORITY_LOW] = "LOW", 685 + }; 686 + 687 + if (jm_ctx->slot_entity[0].priority != 688 + jm_ctx->slot_entity[1].priority) 689 + drm_warn(ddev, "Slot priorities should be the same in a single context"); 690 + 691 + if (jm_ctx->slot_entity[0].priority < ARRAY_SIZE(prios)) 692 + prio = prios[jm_ctx->slot_entity[0].priority]; 693 + 694 + seq_printf(m, " JM context %u: priority %s\n", handle, prio); 695 + } 696 + 697 + static int show_file_jm_ctxs(struct panfrost_file_priv *pfile, 698 + struct seq_file *m) 699 + { 700 + struct panfrost_jm_ctx *jm_ctx; 701 + unsigned long i; 702 + 703 + xa_lock(&pfile->jm_ctxs); 704 + xa_for_each(&pfile->jm_ctxs, i, jm_ctx) { 705 + jm_ctx = panfrost_jm_ctx_get(jm_ctx); 706 + xa_unlock(&pfile->jm_ctxs); 707 + show_panfrost_jm_ctx(jm_ctx, i, m); 708 + panfrost_jm_ctx_put(jm_ctx); 709 + xa_lock(&pfile->jm_ctxs); 710 + } 711 + xa_unlock(&pfile->jm_ctxs); 712 + 713 + return 0; 714 + } 715 + 719 716 static struct drm_info_list panthor_debugfs_list[] = { 720 717 {"gems", panthor_gems_show, 0, NULL}, 721 718 }; ··· 770 685 return 0; 771 686 } 772 687 688 + static int show_each_file(struct seq_file *m, void *arg) 689 + { 690 + struct drm_info_node *node = (struct drm_info_node *)m->private; 691 + struct drm_device *ddev = node->minor->dev; 692 + int (*show)(struct panfrost_file_priv *, struct seq_file *) = 693 + node->info_ent->data; 694 + struct drm_file *file; 695 + int ret; 696 + 697 + ret = mutex_lock_interruptible(&ddev->filelist_mutex); 698 + if (ret) 699 + return ret; 700 + 701 + list_for_each_entry(file, &ddev->filelist, lhead) { 702 + struct task_struct *task; 703 + struct panfrost_file_priv *pfile = file->driver_priv; 704 + struct pid *pid; 705 + 706 + /* 707 + * Although we have a valid reference on file->pid, that does 708 + * not guarantee that the task_struct who called get_pid() is 709 + * still alive (e.g. get_pid(current) => fork() => exit()). 710 + * Therefore, we need to protect this ->comm access using RCU. 711 + */ 712 + rcu_read_lock(); 713 + pid = rcu_dereference(file->pid); 714 + task = pid_task(pid, PIDTYPE_TGID); 715 + seq_printf(m, "client_id %8llu pid %8d command %s:\n", 716 + file->client_id, pid_nr(pid), 717 + task ? task->comm : "<unknown>"); 718 + rcu_read_unlock(); 719 + 720 + ret = show(pfile, m); 721 + if (ret < 0) 722 + break; 723 + 724 + seq_puts(m, "\n"); 725 + } 726 + 727 + mutex_unlock(&ddev->filelist_mutex); 728 + return ret; 729 + } 730 + 731 + static struct drm_info_list panfrost_sched_debugfs_list[] = { 732 + { "sched_ctxs", show_each_file, 0, show_file_jm_ctxs }, 733 + }; 734 + 735 + static void panfrost_sched_debugfs_init(struct drm_minor *minor) 736 + { 737 + drm_debugfs_create_files(panfrost_sched_debugfs_list, 738 + ARRAY_SIZE(panfrost_sched_debugfs_list), 739 + minor->debugfs_root, minor); 740 + } 741 + 773 742 static void panfrost_debugfs_init(struct drm_minor *minor) 774 743 { 775 744 panthor_gems_debugfs_init(minor); 745 + panfrost_sched_debugfs_init(minor); 776 746 } 777 747 #endif 778 748 ··· 839 699 * - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT 840 700 * - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries 841 701 * - 1.4 - adds SET_LABEL_BO 702 + * - 1.5 - adds JM_CTX_{CREATE,DESTROY} ioctls and extend SUBMIT to allow 703 + * context creation with configurable priorities/affinity 842 704 */ 843 705 static const struct drm_driver panfrost_drm_driver = { 844 706 .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, ··· 853 711 .name = "panfrost", 854 712 .desc = "panfrost DRM", 855 713 .major = 1, 856 - .minor = 4, 714 + .minor = 5, 857 715 858 716 .gem_create_object = panfrost_gem_create_object, 859 717 .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
+165 -34
drivers/gpu/drm/panfrost/panfrost_job.c
··· 22 22 #include "panfrost_mmu.h" 23 23 #include "panfrost_dump.h" 24 24 25 + #define MAX_JM_CTX_PER_FILE 64 25 26 #define JOB_TIMEOUT_MS 500 26 27 27 28 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) ··· 360 359 kvfree(job->bos); 361 360 } 362 361 362 + panfrost_jm_ctx_put(job->ctx); 363 363 kfree(job); 364 364 } 365 365 ··· 384 382 struct panfrost_device *pfdev = job->pfdev; 385 383 int slot = panfrost_job_get_slot(job); 386 384 struct dma_fence *fence = NULL; 385 + 386 + if (job->ctx->destroyed) 387 + return ERR_PTR(-ECANCELED); 387 388 388 389 if (unlikely(job->base.s_fence->finished.error)) 389 390 return NULL; ··· 922 917 destroy_workqueue(pfdev->reset.wq); 923 918 } 924 919 925 - int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) 920 + int panfrost_job_open(struct drm_file *file) 926 921 { 927 - struct panfrost_device *pfdev = panfrost_priv->pfdev; 928 - struct panfrost_job_slot *js = pfdev->js; 929 - struct drm_gpu_scheduler *sched; 930 - int ret, i; 922 + struct panfrost_file_priv *panfrost_priv = file->driver_priv; 923 + int ret; 931 924 932 - for (i = 0; i < NUM_JOB_SLOTS; i++) { 933 - sched = &js->queue[i].sched; 934 - ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], 935 - DRM_SCHED_PRIORITY_NORMAL, &sched, 936 - 1, NULL); 937 - if (WARN_ON(ret)) 938 - return ret; 939 - } 925 + struct drm_panfrost_jm_ctx_create default_jm_ctx = { 926 + .priority = PANFROST_JM_CTX_PRIORITY_MEDIUM, 927 + }; 928 + 929 + xa_init_flags(&panfrost_priv->jm_ctxs, XA_FLAGS_ALLOC); 930 + 931 + ret = panfrost_jm_ctx_create(file, &default_jm_ctx); 932 + if (ret) 933 + return ret; 934 + 935 + /* We expect the default context to be assigned handle 0. */ 936 + if (WARN_ON(default_jm_ctx.handle)) 937 + return -EINVAL; 938 + 940 939 return 0; 941 940 } 942 941 943 - void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) 942 + void panfrost_job_close(struct drm_file *file) 944 943 { 945 - struct panfrost_device *pfdev = panfrost_priv->pfdev; 944 + struct panfrost_file_priv *panfrost_priv = file->driver_priv; 945 + struct panfrost_jm_ctx *jm_ctx; 946 + unsigned long i; 947 + 948 + xa_for_each(&panfrost_priv->jm_ctxs, i, jm_ctx) 949 + panfrost_jm_ctx_destroy(file, i); 950 + 951 + xa_destroy(&panfrost_priv->jm_ctxs); 952 + } 953 + 954 + int panfrost_job_is_idle(struct panfrost_device *pfdev) 955 + { 956 + struct panfrost_job_slot *js = pfdev->js; 946 957 int i; 947 958 948 - for (i = 0; i < NUM_JOB_SLOTS; i++) 949 - drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]); 959 + for (i = 0; i < NUM_JOB_SLOTS; i++) { 960 + /* If there are any jobs in the HW queue, we're not idle */ 961 + if (atomic_read(&js->queue[i].sched.credit_count)) 962 + return false; 963 + } 964 + 965 + return true; 966 + } 967 + 968 + static void panfrost_jm_ctx_release(struct kref *kref) 969 + { 970 + struct panfrost_jm_ctx *jm_ctx = container_of(kref, struct panfrost_jm_ctx, refcnt); 971 + 972 + WARN_ON(!jm_ctx->destroyed); 973 + 974 + for (u32 i = 0; i < ARRAY_SIZE(jm_ctx->slot_entity); i++) 975 + drm_sched_entity_destroy(&jm_ctx->slot_entity[i]); 976 + 977 + kfree(jm_ctx); 978 + } 979 + 980 + void 981 + panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx) 982 + { 983 + if (jm_ctx) 984 + kref_put(&jm_ctx->refcnt, panfrost_jm_ctx_release); 985 + } 986 + 987 + struct panfrost_jm_ctx * 988 + panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx) 989 + { 990 + if (jm_ctx) 991 + kref_get(&jm_ctx->refcnt); 992 + 993 + return jm_ctx; 994 + } 995 + 996 + struct panfrost_jm_ctx * 997 + panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle) 998 + { 999 + struct panfrost_file_priv *priv = file->driver_priv; 1000 + struct panfrost_jm_ctx *jm_ctx; 1001 + 1002 + xa_lock(&priv->jm_ctxs); 1003 + jm_ctx = panfrost_jm_ctx_get(xa_load(&priv->jm_ctxs, handle)); 1004 + xa_unlock(&priv->jm_ctxs); 1005 + 1006 + return jm_ctx; 1007 + } 1008 + 1009 + static int jm_ctx_prio_to_drm_sched_prio(struct drm_file *file, 1010 + enum drm_panfrost_jm_ctx_priority in, 1011 + enum drm_sched_priority *out) 1012 + { 1013 + switch (in) { 1014 + case PANFROST_JM_CTX_PRIORITY_LOW: 1015 + *out = DRM_SCHED_PRIORITY_LOW; 1016 + return 0; 1017 + case PANFROST_JM_CTX_PRIORITY_MEDIUM: 1018 + *out = DRM_SCHED_PRIORITY_NORMAL; 1019 + return 0; 1020 + case PANFROST_JM_CTX_PRIORITY_HIGH: 1021 + if (!panfrost_high_prio_allowed(file)) 1022 + return -EACCES; 1023 + 1024 + *out = DRM_SCHED_PRIORITY_HIGH; 1025 + return 0; 1026 + default: 1027 + return -EINVAL; 1028 + } 1029 + } 1030 + 1031 + int panfrost_jm_ctx_create(struct drm_file *file, 1032 + struct drm_panfrost_jm_ctx_create *args) 1033 + { 1034 + struct panfrost_file_priv *priv = file->driver_priv; 1035 + struct panfrost_device *pfdev = priv->pfdev; 1036 + enum drm_sched_priority sched_prio; 1037 + struct panfrost_jm_ctx *jm_ctx; 1038 + int ret; 1039 + 1040 + jm_ctx = kzalloc(sizeof(*jm_ctx), GFP_KERNEL); 1041 + if (!jm_ctx) 1042 + return -ENOMEM; 1043 + 1044 + kref_init(&jm_ctx->refcnt); 1045 + 1046 + ret = jm_ctx_prio_to_drm_sched_prio(file, args->priority, &sched_prio); 1047 + if (ret) 1048 + goto err_put_jm_ctx; 1049 + 1050 + for (u32 i = 0; i < NUM_JOB_SLOTS; i++) { 1051 + struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched; 1052 + 1053 + ret = drm_sched_entity_init(&jm_ctx->slot_entity[i], sched_prio, 1054 + &sched, 1, NULL); 1055 + if (ret) 1056 + goto err_put_jm_ctx; 1057 + } 1058 + 1059 + ret = xa_alloc(&priv->jm_ctxs, &args->handle, jm_ctx, 1060 + XA_LIMIT(0, MAX_JM_CTX_PER_FILE), GFP_KERNEL); 1061 + if (ret) 1062 + goto err_put_jm_ctx; 1063 + 1064 + return 0; 1065 + 1066 + err_put_jm_ctx: 1067 + jm_ctx->destroyed = true; 1068 + panfrost_jm_ctx_put(jm_ctx); 1069 + return ret; 1070 + } 1071 + 1072 + int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle) 1073 + { 1074 + struct panfrost_file_priv *priv = file->driver_priv; 1075 + struct panfrost_device *pfdev = priv->pfdev; 1076 + struct panfrost_jm_ctx *jm_ctx; 1077 + 1078 + jm_ctx = xa_erase(&priv->jm_ctxs, handle); 1079 + if (!jm_ctx) 1080 + return -EINVAL; 1081 + 1082 + jm_ctx->destroyed = true; 950 1083 951 1084 /* Kill in-flight jobs */ 952 1085 spin_lock(&pfdev->js->job_lock); 953 - for (i = 0; i < NUM_JOB_SLOTS; i++) { 954 - struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i]; 955 - int j; 1086 + for (u32 i = 0; i < ARRAY_SIZE(jm_ctx->slot_entity); i++) { 1087 + struct drm_sched_entity *entity = &jm_ctx->slot_entity[i]; 956 1088 957 - for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) { 1089 + for (int j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) { 958 1090 struct panfrost_job *job = pfdev->jobs[i][j]; 959 1091 u32 cmd; 960 1092 ··· 1122 980 } 1123 981 } 1124 982 spin_unlock(&pfdev->js->job_lock); 1125 - } 1126 983 1127 - int panfrost_job_is_idle(struct panfrost_device *pfdev) 1128 - { 1129 - struct panfrost_job_slot *js = pfdev->js; 1130 - int i; 1131 - 1132 - for (i = 0; i < NUM_JOB_SLOTS; i++) { 1133 - /* If there are any jobs in the HW queue, we're not idle */ 1134 - if (atomic_read(&js->queue[i].sched.credit_count)) 1135 - return false; 1136 - } 1137 - 1138 - return true; 984 + panfrost_jm_ctx_put(jm_ctx); 985 + return 0; 1139 986 }
+23 -2
drivers/gpu/drm/panfrost/panfrost_job.h
··· 18 18 19 19 struct panfrost_device *pfdev; 20 20 struct panfrost_mmu *mmu; 21 + struct panfrost_jm_ctx *ctx; 21 22 22 23 /* Fence to be signaled by IRQ handler when the job is complete. */ 23 24 struct dma_fence *done_fence; ··· 40 39 u64 start_cycles; 41 40 }; 42 41 42 + struct panfrost_js_ctx { 43 + struct drm_sched_entity sched_entity; 44 + bool enabled; 45 + }; 46 + 47 + #define NUM_JOB_SLOTS 3 48 + 49 + struct panfrost_jm_ctx { 50 + struct kref refcnt; 51 + bool destroyed; 52 + struct drm_sched_entity slot_entity[NUM_JOB_SLOTS]; 53 + }; 54 + 55 + int panfrost_jm_ctx_create(struct drm_file *file, 56 + struct drm_panfrost_jm_ctx_create *args); 57 + int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle); 58 + void panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx); 59 + struct panfrost_jm_ctx *panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx); 60 + struct panfrost_jm_ctx *panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle); 61 + 43 62 int panfrost_job_init(struct panfrost_device *pfdev); 44 63 void panfrost_job_fini(struct panfrost_device *pfdev); 45 - int panfrost_job_open(struct panfrost_file_priv *panfrost_priv); 46 - void panfrost_job_close(struct panfrost_file_priv *panfrost_priv); 64 + int panfrost_job_open(struct drm_file *file); 65 + void panfrost_job_close(struct drm_file *file); 47 66 int panfrost_job_get_slot(struct panfrost_job *job); 48 67 int panfrost_job_push(struct panfrost_job *job); 49 68 void panfrost_job_put(struct panfrost_job *job);
+1 -1
drivers/gpu/drm/panthor/panthor_drv.c
··· 1105 1105 if (ret) 1106 1106 goto out; 1107 1107 1108 - ret = panthor_group_create(pfile, args, queue_args); 1108 + ret = panthor_group_create(pfile, args, queue_args, file->client_id); 1109 1109 if (ret < 0) 1110 1110 goto out; 1111 1111 args->group_handle = ret;
+29 -11
drivers/gpu/drm/panthor/panthor_sched.c
··· 360 360 /** @entity: DRM scheduling entity used for this queue. */ 361 361 struct drm_sched_entity entity; 362 362 363 + /** @name: DRM scheduler name for this queue. */ 364 + char *name; 365 + 363 366 /** 364 367 * @remaining_time: Time remaining before the job timeout expires. 365 368 * ··· 903 900 if (queue->scheduler.ops) 904 901 drm_sched_fini(&queue->scheduler); 905 902 903 + kfree(queue->name); 904 + 906 905 panthor_queue_put_syncwait_obj(queue); 907 906 908 907 panthor_kernel_bo_destroy(queue->ringbuf); ··· 1416 1411 fault = cs_iface->output->fault; 1417 1412 info = cs_iface->output->fault_info; 1418 1413 1419 - if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) { 1414 + if (queue) { 1420 1415 u64 cs_extract = queue->iface.output->extract; 1421 1416 struct panthor_job *job; 1422 1417 ··· 3312 3307 3313 3308 static struct panthor_queue * 3314 3309 group_create_queue(struct panthor_group *group, 3315 - const struct drm_panthor_queue_create *args) 3310 + const struct drm_panthor_queue_create *args, 3311 + u64 drm_client_id, u32 gid, u32 qid) 3316 3312 { 3317 - const struct drm_sched_init_args sched_args = { 3313 + struct drm_sched_init_args sched_args = { 3318 3314 .ops = &panthor_queue_sched_ops, 3319 3315 .submit_wq = group->ptdev->scheduler->wq, 3320 3316 .num_rqs = 1, ··· 3328 3322 .credit_limit = args->ringbuf_size / sizeof(u64), 3329 3323 .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), 3330 3324 .timeout_wq = group->ptdev->reset.wq, 3331 - .name = "panthor-queue", 3332 3325 .dev = group->ptdev->base.dev, 3333 3326 }; 3334 3327 struct drm_gpu_scheduler *drm_sched; ··· 3402 3397 if (ret) 3403 3398 goto err_free_queue; 3404 3399 3400 + /* assign a unique name */ 3401 + queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid); 3402 + if (!queue->name) { 3403 + ret = -ENOMEM; 3404 + goto err_free_queue; 3405 + } 3406 + 3407 + sched_args.name = queue->name; 3408 + 3405 3409 ret = drm_sched_init(&queue->scheduler, &sched_args); 3406 3410 if (ret) 3407 3411 goto err_free_queue; ··· 3460 3446 3461 3447 int panthor_group_create(struct panthor_file *pfile, 3462 3448 const struct drm_panthor_group_create *group_args, 3463 - const struct drm_panthor_queue_create *queue_args) 3449 + const struct drm_panthor_queue_create *queue_args, 3450 + u64 drm_client_id) 3464 3451 { 3465 3452 struct panthor_device *ptdev = pfile->ptdev; 3466 3453 struct panthor_group_pool *gpool = pfile->groups; ··· 3554 3539 memset(group->syncobjs->kmap, 0, 3555 3540 group_args->queues.count * sizeof(struct panthor_syncobj_64b)); 3556 3541 3542 + ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL); 3543 + if (ret) 3544 + goto err_put_group; 3545 + 3557 3546 for (i = 0; i < group_args->queues.count; i++) { 3558 - group->queues[i] = group_create_queue(group, &queue_args[i]); 3547 + group->queues[i] = group_create_queue(group, &queue_args[i], drm_client_id, gid, i); 3559 3548 if (IS_ERR(group->queues[i])) { 3560 3549 ret = PTR_ERR(group->queues[i]); 3561 3550 group->queues[i] = NULL; 3562 - goto err_put_group; 3551 + goto err_erase_gid; 3563 3552 } 3564 3553 3565 3554 group->queue_count++; 3566 3555 } 3567 3556 3568 3557 group->idle_queues = GENMASK(group->queue_count - 1, 0); 3569 - 3570 - ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL); 3571 - if (ret) 3572 - goto err_put_group; 3573 3558 3574 3559 mutex_lock(&sched->reset.lock); 3575 3560 if (atomic_read(&sched->reset.in_progress)) { ··· 3588 3573 group_init_task_info(group); 3589 3574 3590 3575 return gid; 3576 + 3577 + err_erase_gid: 3578 + xa_erase(&gpool->xa, gid); 3591 3579 3592 3580 err_put_group: 3593 3581 group_put(group);
+2 -1
drivers/gpu/drm/panthor/panthor_sched.h
··· 21 21 22 22 int panthor_group_create(struct panthor_file *pfile, 23 23 const struct drm_panthor_group_create *group_args, 24 - const struct drm_panthor_queue_create *queue_args); 24 + const struct drm_panthor_queue_create *queue_args, 25 + u64 drm_client_id); 25 26 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle); 26 27 int panthor_group_get_state(struct panthor_file *pfile, 27 28 struct drm_panthor_group_get_state *get_state);
+8 -5
drivers/gpu/drm/pl111/pl111_display.c
··· 473 473 return best_div; 474 474 } 475 475 476 - static long pl111_clk_div_round_rate(struct clk_hw *hw, unsigned long rate, 477 - unsigned long *prate) 476 + static int pl111_clk_div_determine_rate(struct clk_hw *hw, 477 + struct clk_rate_request *req) 478 478 { 479 - int div = pl111_clk_div_choose_div(hw, rate, prate, true); 479 + int div = pl111_clk_div_choose_div(hw, req->rate, 480 + &req->best_parent_rate, true); 480 481 481 - return DIV_ROUND_UP_ULL(*prate, div); 482 + req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div); 483 + 484 + return 0; 482 485 } 483 486 484 487 static unsigned long pl111_clk_div_recalc_rate(struct clk_hw *hw, ··· 531 528 532 529 static const struct clk_ops pl111_clk_div_ops = { 533 530 .recalc_rate = pl111_clk_div_recalc_rate, 534 - .round_rate = pl111_clk_div_round_rate, 531 + .determine_rate = pl111_clk_div_determine_rate, 535 532 .set_rate = pl111_clk_div_set_rate, 536 533 }; 537 534
+1 -1
drivers/gpu/drm/qxl/qxl_gem.c
··· 39 39 qxl_surface_evict(qdev, qobj, false); 40 40 41 41 tbo = &qobj->tbo; 42 - ttm_bo_put(tbo); 42 + ttm_bo_fini(tbo); 43 43 } 44 44 45 45 int qxl_gem_object_create(struct qxl_device *qdev, int size,
+1 -1
drivers/gpu/drm/radeon/radeon_gem.c
··· 86 86 87 87 if (robj) { 88 88 radeon_mn_unregister(robj); 89 - ttm_bo_put(&robj->tbo); 89 + ttm_bo_fini(&robj->tbo); 90 90 } 91 91 } 92 92
+5 -2
drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
··· 11 11 #include <drm/drm_atomic_helper.h> 12 12 #include <drm/drm_crtc.h> 13 13 #include <drm/drm_device.h> 14 + #include <drm/drm_dumb_buffers.h> 14 15 #include <drm/drm_framebuffer.h> 15 16 #include <drm/drm_gem_dma_helper.h> 16 17 #include <drm/drm_gem_framebuffer_helper.h> ··· 408 407 struct drm_mode_create_dumb *args) 409 408 { 410 409 struct rcar_du_device *rcdu = to_rcar_du_device(dev); 411 - unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 412 410 unsigned int align; 411 + int ret; 413 412 414 413 /* 415 414 * The R8A7779 DU requires a 16 pixels pitch alignment as documented, ··· 420 419 else 421 420 align = 16 * args->bpp / 8; 422 421 423 - args->pitch = roundup(min_pitch, align); 422 + ret = drm_mode_size_dumb(dev, args, align, 0); 423 + if (ret) 424 + return ret; 424 425 425 426 return drm_gem_dma_dumb_create_internal(file, dev, args); 426 427 }
+6 -6
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
··· 9 9 #include <linux/vmalloc.h> 10 10 11 11 #include <drm/drm.h> 12 + #include <drm/drm_dumb_buffers.h> 12 13 #include <drm/drm_fb_helper.h> 13 14 #include <drm/drm_gem.h> 14 15 #include <drm/drm_gem_dma_helper.h> ··· 404 403 struct drm_mode_create_dumb *args) 405 404 { 406 405 struct rockchip_gem_object *rk_obj; 407 - int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 406 + int ret; 408 407 409 - /* 410 - * align to 64 bytes since Mali requires it. 411 - */ 412 - args->pitch = ALIGN(min_pitch, 64); 413 - args->size = args->pitch * args->height; 408 + /* 64-byte alignment required by Mali */ 409 + ret = drm_mode_size_dumb(dev, args, SZ_64, 0); 410 + if (ret) 411 + return ret; 414 412 415 413 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, 416 414 &args->handle);
+1 -2
drivers/gpu/drm/scheduler/tests/sched_tests.h
··· 31 31 * 32 32 * @base: DRM scheduler base class 33 33 * @test: Backpointer to owning the kunit test case 34 - * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list 34 + * @lock: Lock to protect the simulated @hw_timeline and @job_list 35 35 * @job_list: List of jobs submitted to the mock GPU 36 - * @done_list: List of jobs completed by the mock GPU 37 36 * @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and 38 37 * @cur_seqno for implementing a struct dma_fence signaling the 39 38 * simulated job completion.
+35 -51
drivers/gpu/drm/solomon/ssd130x.c
··· 1016 1016 1017 1017 dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8); 1018 1018 1019 - ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); 1020 - if (ret) 1021 - return ret; 1022 - 1023 1019 iosys_map_set_vaddr(&dst, buf); 1024 1020 drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); 1025 - 1026 - drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); 1027 1021 1028 1022 ssd130x_update_rect(ssd130x, rect, buf, data_array); 1029 1023 ··· 1042 1048 1043 1049 dst_pitch = drm_rect_width(rect); 1044 1050 1045 - ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); 1046 - if (ret) 1047 - return ret; 1048 - 1049 1051 iosys_map_set_vaddr(&dst, buf); 1050 1052 drm_fb_xrgb8888_to_gray8(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); 1051 - 1052 - drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); 1053 1053 1054 1054 ssd132x_update_rect(ssd130x, rect, buf, data_array); 1055 1055 ··· 1066 1078 1067 1079 dst_pitch = drm_format_info_min_pitch(fi, 0, drm_rect_width(rect)); 1068 1080 1069 - ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); 1070 - if (ret) 1071 - return ret; 1072 - 1073 1081 iosys_map_set_vaddr(&dst, data_array); 1074 1082 drm_fb_xrgb8888_to_rgb332(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); 1075 - 1076 - drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); 1077 1083 1078 1084 ssd133x_update_rect(ssd130x, rect, data_array, dst_pitch); 1079 1085 ··· 1214 1232 if (!drm_dev_enter(drm, &idx)) 1215 1233 return; 1216 1234 1235 + if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE)) 1236 + goto out_drm_dev_exit; 1237 + 1217 1238 drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); 1218 1239 drm_atomic_for_each_plane_damage(&iter, &damage) { 1219 1240 dst_clip = plane_state->dst; ··· 1230 1245 &shadow_plane_state->fmtcnv_state); 1231 1246 } 1232 1247 1248 + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); 1249 + 1250 + out_drm_dev_exit: 1233 1251 drm_dev_exit(idx); 1234 1252 } 1235 1253 ··· 1255 1267 if (!drm_dev_enter(drm, &idx)) 1256 1268 return; 1257 1269 1270 + if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE)) 1271 + goto out_drm_dev_exit; 1272 + 1258 1273 drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); 1259 1274 drm_atomic_for_each_plane_damage(&iter, &damage) { 1260 1275 dst_clip = plane_state->dst; ··· 1271 1280 &shadow_plane_state->fmtcnv_state); 1272 1281 } 1273 1282 1283 + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); 1284 + 1285 + out_drm_dev_exit: 1274 1286 drm_dev_exit(idx); 1275 1287 } 1276 1288 ··· 1295 1301 if (!drm_dev_enter(drm, &idx)) 1296 1302 return; 1297 1303 1304 + if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE)) 1305 + goto out_drm_dev_exit; 1306 + 1298 1307 drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); 1299 1308 drm_atomic_for_each_plane_damage(&iter, &damage) { 1300 1309 dst_clip = plane_state->dst; ··· 1310 1313 &shadow_plane_state->fmtcnv_state); 1311 1314 } 1312 1315 1316 + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); 1317 + 1318 + out_drm_dev_exit: 1313 1319 drm_dev_exit(idx); 1314 1320 } 1315 1321 ··· 1393 1393 { 1394 1394 struct ssd130x_plane_state *ssd130x_state; 1395 1395 1396 - WARN_ON(plane->state); 1396 + drm_WARN_ON_ONCE(plane->dev, plane->state); 1397 1397 1398 1398 ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL); 1399 1399 if (!ssd130x_state) ··· 1408 1408 struct ssd130x_plane_state *old_ssd130x_state; 1409 1409 struct ssd130x_plane_state *ssd130x_state; 1410 1410 1411 - if (WARN_ON(!plane->state)) 1411 + if (drm_WARN_ON_ONCE(plane->dev, !plane->state)) 1412 1412 return NULL; 1413 1413 1414 1414 old_ssd130x_state = to_ssd130x_plane_state(plane->state); ··· 1473 1473 { 1474 1474 struct ssd130x_device *ssd130x = drm_to_ssd130x(crtc->dev); 1475 1475 1476 - if (mode->hdisplay != ssd130x->mode.hdisplay && 1477 - mode->vdisplay != ssd130x->mode.vdisplay) 1478 - return MODE_ONE_SIZE; 1479 - else if (mode->hdisplay != ssd130x->mode.hdisplay) 1480 - return MODE_ONE_WIDTH; 1481 - else if (mode->vdisplay != ssd130x->mode.vdisplay) 1482 - return MODE_ONE_HEIGHT; 1483 - 1484 - return MODE_OK; 1476 + return drm_crtc_helper_mode_valid_fixed(crtc, mode, &ssd130x->mode); 1485 1477 } 1486 1478 1487 1479 static int ssd130x_crtc_atomic_check(struct drm_crtc *crtc, ··· 1490 1498 if (ret) 1491 1499 return ret; 1492 1500 1493 - ssd130x_state->data_array = kmalloc(ssd130x->width * pages, GFP_KERNEL); 1501 + ssd130x_state->data_array = kmalloc_array(ssd130x->width, pages, GFP_KERNEL); 1494 1502 if (!ssd130x_state->data_array) 1495 1503 return -ENOMEM; 1496 1504 ··· 1511 1519 if (ret) 1512 1520 return ret; 1513 1521 1514 - ssd130x_state->data_array = kmalloc(columns * ssd130x->height, GFP_KERNEL); 1522 + ssd130x_state->data_array = kmalloc_array(columns, ssd130x->height, GFP_KERNEL); 1515 1523 if (!ssd130x_state->data_array) 1516 1524 return -ENOMEM; 1517 1525 ··· 1538 1546 1539 1547 pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width); 1540 1548 1541 - ssd130x_state->data_array = kmalloc(pitch * ssd130x->height, GFP_KERNEL); 1549 + ssd130x_state->data_array = kmalloc_array(pitch, ssd130x->height, GFP_KERNEL); 1542 1550 if (!ssd130x_state->data_array) 1543 1551 return -ENOMEM; 1544 1552 ··· 1550 1558 { 1551 1559 struct ssd130x_crtc_state *ssd130x_state; 1552 1560 1553 - WARN_ON(crtc->state); 1561 + drm_WARN_ON_ONCE(crtc->dev, crtc->state); 1554 1562 1555 1563 ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL); 1556 1564 if (!ssd130x_state) ··· 1564 1572 struct ssd130x_crtc_state *old_ssd130x_state; 1565 1573 struct ssd130x_crtc_state *ssd130x_state; 1566 1574 1567 - if (WARN_ON(!crtc->state)) 1575 + if (drm_WARN_ON_ONCE(crtc->dev, !crtc->state)) 1568 1576 return NULL; 1569 1577 1570 1578 old_ssd130x_state = to_ssd130x_crtc_state(crtc->state); ··· 1732 1740 static int ssd130x_connector_get_modes(struct drm_connector *connector) 1733 1741 { 1734 1742 struct ssd130x_device *ssd130x = drm_to_ssd130x(connector->dev); 1735 - struct drm_display_mode *mode; 1736 - struct device *dev = ssd130x->dev; 1737 1743 1738 - mode = drm_mode_duplicate(connector->dev, &ssd130x->mode); 1739 - if (!mode) { 1740 - dev_err(dev, "Failed to duplicated mode\n"); 1741 - return 0; 1742 - } 1743 - 1744 - drm_mode_probed_add(connector, mode); 1745 - drm_set_preferred_mode(connector, mode->hdisplay, mode->vdisplay); 1746 - 1747 - /* There is only a single mode */ 1748 - return 1; 1744 + return drm_connector_helper_get_modes_fixed(connector, &ssd130x->mode); 1749 1745 } 1750 1746 1751 1747 static const struct drm_connector_helper_funcs ssd130x_connector_helper_funcs = { ··· 1867 1887 1868 1888 mode->type = DRM_MODE_TYPE_DRIVER; 1869 1889 mode->clock = 1; 1870 - mode->hdisplay = mode->htotal = ssd130x->width; 1871 - mode->hsync_start = mode->hsync_end = ssd130x->width; 1872 - mode->vdisplay = mode->vtotal = ssd130x->height; 1873 - mode->vsync_start = mode->vsync_end = ssd130x->height; 1890 + mode->hdisplay = ssd130x->width; 1891 + mode->htotal = ssd130x->width; 1892 + mode->hsync_start = ssd130x->width; 1893 + mode->hsync_end = ssd130x->width; 1894 + mode->vdisplay = ssd130x->height; 1895 + mode->vtotal = ssd130x->height; 1896 + mode->vsync_start = ssd130x->height; 1897 + mode->vsync_end = ssd130x->height; 1874 1898 mode->width_mm = 27; 1875 1899 mode->height_mm = 27; 1876 1900
+8 -6
drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
··· 274 274 return (unsigned long)pll_out_khz * 1000; 275 275 } 276 276 277 - static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate, 278 - unsigned long *parent_rate) 277 + static int dw_mipi_dsi_clk_determine_rate(struct clk_hw *hw, 278 + struct clk_rate_request *req) 279 279 { 280 280 struct dw_mipi_dsi_stm *dsi = clk_to_dw_mipi_dsi_stm(hw); 281 281 unsigned int idf, ndiv, odf, pll_in_khz, pll_out_khz; ··· 283 283 284 284 DRM_DEBUG_DRIVER("\n"); 285 285 286 - pll_in_khz = (unsigned int)(*parent_rate / 1000); 286 + pll_in_khz = (unsigned int)(req->best_parent_rate / 1000); 287 287 288 288 /* Compute best pll parameters */ 289 289 idf = 0; 290 290 ndiv = 0; 291 291 odf = 0; 292 292 293 - ret = dsi_pll_get_params(dsi, pll_in_khz, rate / 1000, 293 + ret = dsi_pll_get_params(dsi, pll_in_khz, req->rate / 1000, 294 294 &idf, &ndiv, &odf); 295 295 if (ret) 296 296 DRM_WARN("Warning dsi_pll_get_params(): bad params\n"); ··· 298 298 /* Get the adjusted pll out value */ 299 299 pll_out_khz = dsi_pll_get_clkout_khz(pll_in_khz, idf, ndiv, odf); 300 300 301 - return pll_out_khz * 1000; 301 + req->rate = pll_out_khz * 1000; 302 + 303 + return 0; 302 304 } 303 305 304 306 static int dw_mipi_dsi_clk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 353 351 .disable = dw_mipi_dsi_clk_disable, 354 352 .is_enabled = dw_mipi_dsi_clk_is_enabled, 355 353 .recalc_rate = dw_mipi_dsi_clk_recalc_rate, 356 - .round_rate = dw_mipi_dsi_clk_round_rate, 354 + .determine_rate = dw_mipi_dsi_clk_determine_rate, 357 355 .set_rate = dw_mipi_dsi_clk_set_rate, 358 356 }; 359 357
+7 -5
drivers/gpu/drm/stm/lvds.c
··· 682 682 return (unsigned long)lvds->pixel_clock_rate; 683 683 } 684 684 685 - static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate, 686 - unsigned long *parent_rate) 685 + static int lvds_pixel_clk_determine_rate(struct clk_hw *hw, 686 + struct clk_rate_request *req) 687 687 { 688 688 struct stm_lvds *lvds = container_of(hw, struct stm_lvds, lvds_ck_px); 689 689 unsigned int pll_in_khz, bdiv = 0, mdiv = 0, ndiv = 0; ··· 703 703 mode = list_first_entry(&connector->modes, 704 704 struct drm_display_mode, head); 705 705 706 - pll_in_khz = (unsigned int)(*parent_rate / 1000); 706 + pll_in_khz = (unsigned int)(req->best_parent_rate / 1000); 707 707 708 708 if (lvds_is_dual_link(lvds->link_type)) 709 709 multiplier = 2; ··· 719 719 lvds->pixel_clock_rate = (unsigned long)pll_get_clkout_khz(pll_in_khz, bdiv, mdiv, ndiv) 720 720 * 1000 * multiplier / 7; 721 721 722 - return lvds->pixel_clock_rate; 722 + req->rate = lvds->pixel_clock_rate; 723 + 724 + return 0; 723 725 } 724 726 725 727 static const struct clk_ops lvds_pixel_clk_ops = { 726 728 .enable = lvds_pixel_clk_enable, 727 729 .disable = lvds_pixel_clk_disable, 728 730 .recalc_rate = lvds_pixel_clk_recalc_rate, 729 - .round_rate = lvds_pixel_clk_round_rate, 731 + .determine_rate = lvds_pixel_clk_determine_rate, 730 732 }; 731 733 732 734 static const struct clk_init_data clk_data = {
+7 -5
drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
··· 59 59 return best_rate; 60 60 } 61 61 62 - static long sun4i_ddc_round_rate(struct clk_hw *hw, unsigned long rate, 63 - unsigned long *prate) 62 + static int sun4i_ddc_determine_rate(struct clk_hw *hw, 63 + struct clk_rate_request *req) 64 64 { 65 65 struct sun4i_ddc *ddc = hw_to_ddc(hw); 66 66 67 - return sun4i_ddc_calc_divider(rate, *prate, ddc->pre_div, 68 - ddc->m_offset, NULL, NULL); 67 + req->rate = sun4i_ddc_calc_divider(req->rate, req->best_parent_rate, 68 + ddc->pre_div, ddc->m_offset, NULL, NULL); 69 + 70 + return 0; 69 71 } 70 72 71 73 static unsigned long sun4i_ddc_recalc_rate(struct clk_hw *hw, ··· 103 101 104 102 static const struct clk_ops sun4i_ddc_ops = { 105 103 .recalc_rate = sun4i_ddc_recalc_rate, 106 - .round_rate = sun4i_ddc_round_rate, 104 + .determine_rate = sun4i_ddc_determine_rate, 107 105 .set_rate = sun4i_ddc_set_rate, 108 106 }; 109 107
+10 -8
drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c
··· 67 67 return parent_rate / val; 68 68 } 69 69 70 - static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, 71 - unsigned long *parent_rate) 70 + static int sun4i_dclk_determine_rate(struct clk_hw *hw, 71 + struct clk_rate_request *req) 72 72 { 73 73 struct sun4i_dclk *dclk = hw_to_dclk(hw); 74 74 struct sun4i_tcon *tcon = dclk->tcon; ··· 77 77 int i; 78 78 79 79 for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) { 80 - u64 ideal = (u64)rate * i; 80 + u64 ideal = (u64)req->rate * i; 81 81 unsigned long rounded; 82 82 83 83 /* ··· 99 99 goto out; 100 100 } 101 101 102 - if (abs(rate - rounded / i) < 103 - abs(rate - best_parent / best_div)) { 102 + if (abs(req->rate - rounded / i) < 103 + abs(req->rate - best_parent / best_div)) { 104 104 best_parent = rounded; 105 105 best_div = i; 106 106 } 107 107 } 108 108 109 109 out: 110 - *parent_rate = best_parent; 110 + req->best_parent_rate = best_parent; 111 111 112 - return best_parent / best_div; 112 + req->rate = best_parent / best_div; 113 + 114 + return 0; 113 115 } 114 116 115 117 static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 157 155 .is_enabled = sun4i_dclk_is_enabled, 158 156 159 157 .recalc_rate = sun4i_dclk_recalc_rate, 160 - .round_rate = sun4i_dclk_round_rate, 158 + .determine_rate = sun4i_dclk_determine_rate, 161 159 .set_rate = sun4i_dclk_set_rate, 162 160 163 161 .get_phase = sun4i_dclk_get_phase,
+32 -2
drivers/gpu/drm/sysfb/drm_sysfb_helper.h
··· 10 10 11 11 #include <drm/drm_crtc.h> 12 12 #include <drm/drm_device.h> 13 + #include <drm/drm_gem_atomic_helper.h> 13 14 #include <drm/drm_modes.h> 14 15 15 16 struct drm_format_info; 16 17 struct drm_scanout_buffer; 17 18 struct screen_info; 19 + 20 + typedef void (*drm_sysfb_blit_func)(struct iosys_map *, const unsigned int *, 21 + const struct iosys_map *, 22 + const struct drm_framebuffer *, 23 + const struct drm_rect *, 24 + struct drm_format_conv_state *); 18 25 19 26 /* 20 27 * Input parsing ··· 100 93 * Plane 101 94 */ 102 95 96 + struct drm_sysfb_plane_state { 97 + struct drm_shadow_plane_state base; 98 + 99 + /* transfers framebuffer data to scanout buffer in CRTC format */ 100 + drm_sysfb_blit_func blit_to_crtc; 101 + }; 102 + 103 + static inline struct drm_sysfb_plane_state * 104 + to_drm_sysfb_plane_state(struct drm_plane_state *base) 105 + { 106 + return container_of(to_drm_shadow_plane_state(base), struct drm_sysfb_plane_state, base); 107 + } 108 + 103 109 size_t drm_sysfb_build_fourcc_list(struct drm_device *dev, 104 110 const u32 *native_fourccs, size_t native_nfourccs, 105 111 u32 *fourccs_out, size_t nfourccs_out); 106 112 113 + int drm_sysfb_plane_helper_begin_fb_access(struct drm_plane *plane, 114 + struct drm_plane_state *plane_state); 107 115 int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane, 108 116 struct drm_atomic_state *new_state); 109 117 void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, ··· 136 114 DRM_FORMAT_MOD_INVALID 137 115 138 116 #define DRM_SYSFB_PLANE_HELPER_FUNCS \ 139 - DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \ 117 + .begin_fb_access = drm_sysfb_plane_helper_begin_fb_access, \ 118 + .end_fb_access = drm_gem_end_shadow_fb_access, \ 140 119 .atomic_check = drm_sysfb_plane_helper_atomic_check, \ 141 120 .atomic_update = drm_sysfb_plane_helper_atomic_update, \ 142 121 .atomic_disable = drm_sysfb_plane_helper_atomic_disable, \ 143 122 .get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer 144 123 124 + void drm_sysfb_plane_reset(struct drm_plane *plane); 125 + struct drm_plane_state *drm_sysfb_plane_atomic_duplicate_state(struct drm_plane *plane); 126 + void drm_sysfb_plane_atomic_destroy_state(struct drm_plane *plane, 127 + struct drm_plane_state *plane_state); 128 + 145 129 #define DRM_SYSFB_PLANE_FUNCS \ 130 + .reset = drm_sysfb_plane_reset, \ 146 131 .update_plane = drm_atomic_helper_update_plane, \ 147 132 .disable_plane = drm_atomic_helper_disable_plane, \ 148 - DRM_GEM_SHADOW_PLANE_FUNCS 133 + .atomic_duplicate_state = drm_sysfb_plane_atomic_duplicate_state, \ 134 + .atomic_destroy_state = drm_sysfb_plane_atomic_destroy_state 149 135 150 136 /* 151 137 * CRTC
+149 -4
drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
··· 11 11 #include <drm/drm_edid.h> 12 12 #include <drm/drm_fourcc.h> 13 13 #include <drm/drm_framebuffer.h> 14 - #include <drm/drm_gem_atomic_helper.h> 15 14 #include <drm/drm_gem_framebuffer_helper.h> 16 15 #include <drm/drm_panic.h> 17 16 #include <drm/drm_print.h> ··· 184 185 } 185 186 EXPORT_SYMBOL(drm_sysfb_build_fourcc_list); 186 187 188 + static void drm_sysfb_plane_state_destroy(struct drm_sysfb_plane_state *sysfb_plane_state) 189 + { 190 + __drm_gem_destroy_shadow_plane_state(&sysfb_plane_state->base); 191 + 192 + kfree(sysfb_plane_state); 193 + } 194 + 195 + static void drm_sysfb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch, 196 + const struct iosys_map *src, const struct drm_framebuffer *fb, 197 + const struct drm_rect *clip, struct drm_format_conv_state *state) 198 + { 199 + drm_fb_memcpy(dst, dst_pitch, src, fb, clip); 200 + } 201 + 202 + static drm_sysfb_blit_func drm_sysfb_get_blit_func(u32 dst_format, u32 src_format) 203 + { 204 + if (src_format == dst_format) { 205 + return drm_sysfb_memcpy; 206 + } else if (src_format == DRM_FORMAT_XRGB8888) { 207 + switch (dst_format) { 208 + case DRM_FORMAT_RGB565: 209 + return drm_fb_xrgb8888_to_rgb565; 210 + case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN: 211 + return drm_fb_xrgb8888_to_rgb565be; 212 + case DRM_FORMAT_XRGB1555: 213 + return drm_fb_xrgb8888_to_xrgb1555; 214 + case DRM_FORMAT_ARGB1555: 215 + return drm_fb_xrgb8888_to_argb1555; 216 + case DRM_FORMAT_RGBA5551: 217 + return drm_fb_xrgb8888_to_rgba5551; 218 + case DRM_FORMAT_RGB888: 219 + return drm_fb_xrgb8888_to_rgb888; 220 + case DRM_FORMAT_BGR888: 221 + return drm_fb_xrgb8888_to_bgr888; 222 + case DRM_FORMAT_ARGB8888: 223 + return drm_fb_xrgb8888_to_argb8888; 224 + case DRM_FORMAT_XBGR8888: 225 + return drm_fb_xrgb8888_to_xbgr8888; 226 + case DRM_FORMAT_ABGR8888: 227 + return drm_fb_xrgb8888_to_abgr8888; 228 + case DRM_FORMAT_XRGB2101010: 229 + return drm_fb_xrgb8888_to_xrgb2101010; 230 + case DRM_FORMAT_ARGB2101010: 231 + return drm_fb_xrgb8888_to_argb2101010; 232 + case DRM_FORMAT_BGRX8888: 233 + return drm_fb_xrgb8888_to_bgrx8888; 234 + case DRM_FORMAT_RGB332: 235 + return drm_fb_xrgb8888_to_rgb332; 236 + } 237 + } 238 + 239 + return NULL; 240 + } 241 + 242 + int drm_sysfb_plane_helper_begin_fb_access(struct drm_plane *plane, 243 + struct drm_plane_state *plane_state) 244 + { 245 + struct drm_device *dev = plane->dev; 246 + struct drm_sysfb_plane_state *sysfb_plane_state = to_drm_sysfb_plane_state(plane_state); 247 + struct drm_framebuffer *fb = plane_state->fb; 248 + struct drm_crtc_state *crtc_state; 249 + struct drm_sysfb_crtc_state *sysfb_crtc_state; 250 + drm_sysfb_blit_func blit_to_crtc; 251 + int ret; 252 + 253 + ret = drm_gem_begin_shadow_fb_access(plane, plane_state); 254 + if (ret) 255 + return ret; 256 + 257 + if (!fb) 258 + return 0; 259 + 260 + ret = -EINVAL; 261 + 262 + crtc_state = drm_atomic_get_crtc_state(plane_state->state, plane_state->crtc); 263 + if (drm_WARN_ON_ONCE(dev, !crtc_state)) 264 + goto err_drm_gem_end_shadow_fb_access; 265 + sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state); 266 + 267 + if (drm_WARN_ON_ONCE(dev, !sysfb_crtc_state->format)) 268 + goto err_drm_gem_end_shadow_fb_access; 269 + blit_to_crtc = drm_sysfb_get_blit_func(sysfb_crtc_state->format->format, 270 + fb->format->format); 271 + if (!blit_to_crtc) { 272 + drm_warn_once(dev, "No blit helper from %p4cc to %p4cc found.\n", 273 + &fb->format->format, &sysfb_crtc_state->format->format); 274 + goto err_drm_gem_end_shadow_fb_access; 275 + } 276 + sysfb_plane_state->blit_to_crtc = blit_to_crtc; 277 + 278 + return 0; 279 + 280 + err_drm_gem_end_shadow_fb_access: 281 + drm_gem_end_shadow_fb_access(plane, plane_state); 282 + return ret; 283 + } 284 + EXPORT_SYMBOL(drm_sysfb_plane_helper_begin_fb_access); 285 + 187 286 int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane, 188 287 struct drm_atomic_state *new_state) 189 288 { ··· 332 235 struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev); 333 236 struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); 334 237 struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); 335 - struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 238 + struct drm_sysfb_plane_state *sysfb_plane_state = to_drm_sysfb_plane_state(plane_state); 239 + struct drm_shadow_plane_state *shadow_plane_state = &sysfb_plane_state->base; 336 240 struct drm_framebuffer *fb = plane_state->fb; 337 241 unsigned int dst_pitch = sysfb->fb_pitch; 338 242 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); 339 243 struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state); 340 244 const struct drm_format_info *dst_format = sysfb_crtc_state->format; 245 + drm_sysfb_blit_func blit_to_crtc = sysfb_plane_state->blit_to_crtc; 341 246 struct drm_atomic_helper_damage_iter iter; 342 247 struct drm_rect damage; 343 248 int ret, idx; ··· 360 261 continue; 361 262 362 263 iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip)); 363 - drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb, 364 - &damage, &shadow_plane_state->fmtcnv_state); 264 + blit_to_crtc(&dst, &dst_pitch, shadow_plane_state->data, fb, &damage, 265 + &shadow_plane_state->fmtcnv_state); 365 266 } 366 267 367 268 drm_dev_exit(idx); ··· 419 320 return 0; 420 321 } 421 322 EXPORT_SYMBOL(drm_sysfb_plane_helper_get_scanout_buffer); 323 + 324 + void drm_sysfb_plane_reset(struct drm_plane *plane) 325 + { 326 + struct drm_sysfb_plane_state *sysfb_plane_state; 327 + 328 + if (plane->state) 329 + drm_sysfb_plane_state_destroy(to_drm_sysfb_plane_state(plane->state)); 330 + 331 + sysfb_plane_state = kzalloc(sizeof(*sysfb_plane_state), GFP_KERNEL); 332 + if (sysfb_plane_state) 333 + __drm_gem_reset_shadow_plane(plane, &sysfb_plane_state->base); 334 + else 335 + __drm_gem_reset_shadow_plane(plane, NULL); 336 + } 337 + EXPORT_SYMBOL(drm_sysfb_plane_reset); 338 + 339 + struct drm_plane_state *drm_sysfb_plane_atomic_duplicate_state(struct drm_plane *plane) 340 + { 341 + struct drm_device *dev = plane->dev; 342 + struct drm_plane_state *plane_state = plane->state; 343 + struct drm_sysfb_plane_state *sysfb_plane_state; 344 + struct drm_sysfb_plane_state *new_sysfb_plane_state; 345 + struct drm_shadow_plane_state *new_shadow_plane_state; 346 + 347 + if (drm_WARN_ON(dev, !plane_state)) 348 + return NULL; 349 + sysfb_plane_state = to_drm_sysfb_plane_state(plane_state); 350 + 351 + new_sysfb_plane_state = kzalloc(sizeof(*new_sysfb_plane_state), GFP_KERNEL); 352 + if (!new_sysfb_plane_state) 353 + return NULL; 354 + new_shadow_plane_state = &new_sysfb_plane_state->base; 355 + 356 + __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state); 357 + new_sysfb_plane_state->blit_to_crtc = sysfb_plane_state->blit_to_crtc; 358 + 359 + return &new_shadow_plane_state->base; 360 + } 361 + EXPORT_SYMBOL(drm_sysfb_plane_atomic_duplicate_state); 362 + 363 + void drm_sysfb_plane_atomic_destroy_state(struct drm_plane *plane, 364 + struct drm_plane_state *plane_state) 365 + { 366 + drm_sysfb_plane_state_destroy(to_drm_sysfb_plane_state(plane_state)); 367 + } 368 + EXPORT_SYMBOL(drm_sysfb_plane_atomic_destroy_state); 422 369 423 370 /* 424 371 * CRTC
+2 -1
drivers/gpu/drm/sysfb/simpledrm.c
··· 2 2 3 3 #include <linux/aperture.h> 4 4 #include <linux/clk.h> 5 - #include <linux/of_clk.h> 6 5 #include <linux/minmax.h> 6 + #include <linux/of_address.h> 7 + #include <linux/of_clk.h> 7 8 #include <linux/of_reserved_mem.h> 8 9 #include <linux/platform_data/simplefb.h> 9 10 #include <linux/platform_device.h>
+2 -1
drivers/gpu/drm/sysfb/vesadrm.c
··· 295 295 } 296 296 297 297 static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = { 298 - DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, 298 + .begin_fb_access = drm_sysfb_plane_helper_begin_fb_access, 299 + .end_fb_access = drm_gem_end_shadow_fb_access, 299 300 .atomic_check = vesadrm_primary_plane_helper_atomic_check, 300 301 .atomic_update = drm_sysfb_plane_helper_atomic_update, 301 302 .atomic_disable = drm_sysfb_plane_helper_atomic_disable,
+5 -3
drivers/gpu/drm/tegra/gem.c
··· 16 16 #include <linux/vmalloc.h> 17 17 18 18 #include <drm/drm_drv.h> 19 + #include <drm/drm_dumb_buffers.h> 19 20 #include <drm/drm_prime.h> 20 21 21 22 #include "drm.h" ··· 543 542 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 544 543 struct drm_mode_create_dumb *args) 545 544 { 546 - unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 547 545 struct tegra_drm *tegra = drm->dev_private; 548 546 struct tegra_bo *bo; 547 + int ret; 549 548 550 - args->pitch = round_up(min_pitch, tegra->pitch_align); 551 - args->size = args->pitch * args->height; 549 + ret = drm_mode_size_dumb(drm, args, tegra->pitch_align, 0); 550 + if (ret) 551 + return ret; 552 552 553 553 bo = tegra_bo_create_with_handle(file, drm, args->size, 0, 554 554 &args->handle);
+2 -2
drivers/gpu/drm/tegra/hdmi.c
··· 658 658 { 659 659 const u8 *ptr = data; 660 660 unsigned long offset; 661 - size_t i, j; 661 + size_t i; 662 662 u32 value; 663 663 664 664 switch (ptr[0]) { ··· 691 691 * - subpack_low: bytes 0 - 3 692 692 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00) 693 693 */ 694 - for (i = 3, j = 0; i < size; i += 7, j += 8) { 694 + for (i = 3; i < size; i += 7) { 695 695 size_t rem = size - i, num = min_t(size_t, rem, 4); 696 696 697 697 value = tegra_hdmi_subpack(&ptr[i], num);
+2 -2
drivers/gpu/drm/tegra/sor.c
··· 1864 1864 { 1865 1865 const u8 *ptr = data; 1866 1866 unsigned long offset; 1867 - size_t i, j; 1867 + size_t i; 1868 1868 u32 value; 1869 1869 1870 1870 switch (ptr[0]) { ··· 1897 1897 * - subpack_low: bytes 0 - 3 1898 1898 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00) 1899 1899 */ 1900 - for (i = 3, j = 0; i < size; i += 7, j += 8) { 1900 + for (i = 3; i < size; i += 7) { 1901 1901 size_t rem = size - i, num = min_t(size_t, rem, 4); 1902 1902 1903 1903 value = tegra_sor_hdmi_subpack(&ptr[i], num);
+2
drivers/gpu/drm/tests/.kunitconfig
··· 1 1 CONFIG_KUNIT=y 2 2 CONFIG_DRM=y 3 + CONFIG_DRM_VKMS=y 4 + CONFIG_DRM_FBDEV_EMULATION=y 3 5 CONFIG_DRM_KUNIT_TEST=y
+17 -16
drivers/gpu/drm/tidss/tidss_crtc.c
··· 94 94 struct drm_display_mode *mode; 95 95 enum drm_mode_status ok; 96 96 97 - dev_dbg(ddev->dev, "%s\n", __func__); 98 - 99 97 if (!crtc_state->enable) 100 98 return 0; 101 99 ··· 101 103 102 104 ok = dispc_vp_mode_valid(dispc, hw_videoport, mode); 103 105 if (ok != MODE_OK) { 104 - dev_dbg(ddev->dev, "%s: bad mode: %ux%u pclk %u kHz\n", 106 + drm_dbg(ddev, "%s: bad mode: %ux%u pclk %u kHz\n", 105 107 __func__, mode->hdisplay, mode->vdisplay, mode->clock); 106 108 return -EINVAL; 107 109 } ··· 170 172 struct tidss_device *tidss = to_tidss(ddev); 171 173 unsigned long flags; 172 174 173 - dev_dbg(ddev->dev, "%s: %s is %sactive, %s modeset, event %p\n", 175 + drm_dbg(ddev, "%s: %s is %sactive, %s modeset, event %p\n", 174 176 __func__, crtc->name, crtc->state->active ? "" : "not ", 175 177 drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need", 176 178 crtc->state->event); ··· 326 328 struct drm_device *ddev = crtc->dev; 327 329 struct tidss_device *tidss = to_tidss(ddev); 328 330 329 - dev_dbg(ddev->dev, "%s\n", __func__); 330 - 331 331 tidss_runtime_get(tidss); 332 332 333 333 tidss_irq_enable_vblank(crtc); ··· 338 342 struct drm_device *ddev = crtc->dev; 339 343 struct tidss_device *tidss = to_tidss(ddev); 340 344 341 - dev_dbg(ddev->dev, "%s\n", __func__); 342 - 343 345 tidss_irq_disable_vblank(crtc); 344 346 345 347 tidss_runtime_put(tidss); 346 348 } 347 349 350 + static void tidss_crtc_destroy_state(struct drm_crtc *crtc, 351 + struct drm_crtc_state *state) 352 + { 353 + struct tidss_crtc_state *tstate = to_tidss_crtc_state(state); 354 + 355 + __drm_atomic_helper_crtc_destroy_state(&tstate->base); 356 + kfree(tstate); 357 + } 358 + 348 359 static void tidss_crtc_reset(struct drm_crtc *crtc) 349 360 { 350 - struct tidss_crtc_state *tcrtc; 361 + struct tidss_crtc_state *tstate; 351 362 352 363 if (crtc->state) 353 - __drm_atomic_helper_crtc_destroy_state(crtc->state); 364 + tidss_crtc_destroy_state(crtc, crtc->state); 354 365 355 - kfree(crtc->state); 356 - 357 - tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL); 358 - if (!tcrtc) { 366 + tstate = kzalloc(sizeof(*tstate), GFP_KERNEL); 367 + if (!tstate) { 359 368 crtc->state = NULL; 360 369 return; 361 370 } 362 371 363 - __drm_atomic_helper_crtc_reset(crtc, &tcrtc->base); 372 + __drm_atomic_helper_crtc_reset(crtc, &tstate->base); 364 373 } 365 374 366 375 static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc) ··· 405 404 .set_config = drm_atomic_helper_set_config, 406 405 .page_flip = drm_atomic_helper_page_flip, 407 406 .atomic_duplicate_state = tidss_crtc_duplicate_state, 408 - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 407 + .atomic_destroy_state = tidss_crtc_destroy_state, 409 408 .enable_vblank = tidss_crtc_enable_vblank, 410 409 .disable_vblank = tidss_crtc_disable_vblank, 411 410 };
+4 -6
drivers/gpu/drm/tidss/tidss_dispc.c
··· 1051 1051 int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport, 1052 1052 const struct drm_crtc_state *state) 1053 1053 { 1054 + struct tidss_device *tidss = dispc->tidss; 1055 + struct drm_device *dev = &tidss->ddev; 1054 1056 const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state); 1055 1057 const struct dispc_bus_format *fmt; 1056 1058 1057 1059 fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format, 1058 1060 tstate->bus_flags); 1059 1061 if (!fmt) { 1060 - dev_dbg(dispc->dev, "%s: Unsupported bus format: %u\n", 1062 + drm_dbg(dev, "%s: Unsupported bus format: %u\n", 1061 1063 __func__, tstate->bus_format); 1062 1064 return -EINVAL; 1063 1065 } 1064 1066 1065 1067 if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI_AM65X && 1066 1068 fmt->is_oldi_fmt) { 1067 - dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n", 1069 + drm_dbg(dev, "%s: %s is not OLDI-port\n", 1068 1070 __func__, dispc->feat->vp_name[hw_videoport]); 1069 1071 return -EINVAL; 1070 1072 } ··· 2851 2849 2852 2850 void dispc_remove(struct tidss_device *tidss) 2853 2851 { 2854 - dev_dbg(tidss->dev, "%s\n", __func__); 2855 - 2856 2852 tidss->dispc = NULL; 2857 2853 } 2858 2854 ··· 2991 2991 const struct dispc_features *feat; 2992 2992 unsigned int i, num_fourccs; 2993 2993 int r = 0; 2994 - 2995 - dev_dbg(dev, "%s\n", __func__); 2996 2994 2997 2995 feat = tidss->feat; 2998 2996
-16
drivers/gpu/drm/tidss/tidss_drv.c
··· 33 33 { 34 34 int r; 35 35 36 - dev_dbg(tidss->dev, "%s\n", __func__); 37 - 38 36 r = pm_runtime_resume_and_get(tidss->dev); 39 37 WARN_ON(r < 0); 40 38 return r; ··· 41 43 void tidss_runtime_put(struct tidss_device *tidss) 42 44 { 43 45 int r; 44 - 45 - dev_dbg(tidss->dev, "%s\n", __func__); 46 46 47 47 pm_runtime_mark_last_busy(tidss->dev); 48 48 ··· 52 56 { 53 57 struct tidss_device *tidss = dev_get_drvdata(dev); 54 58 55 - dev_dbg(dev, "%s\n", __func__); 56 - 57 59 return dispc_runtime_suspend(tidss->dispc); 58 60 } 59 61 ··· 59 65 { 60 66 struct tidss_device *tidss = dev_get_drvdata(dev); 61 67 int r; 62 - 63 - dev_dbg(dev, "%s\n", __func__); 64 68 65 69 r = dispc_runtime_resume(tidss->dispc); 66 70 if (r) ··· 71 79 { 72 80 struct tidss_device *tidss = dev_get_drvdata(dev); 73 81 74 - dev_dbg(dev, "%s\n", __func__); 75 - 76 82 return drm_mode_config_helper_suspend(&tidss->ddev); 77 83 } 78 84 79 85 static int __maybe_unused tidss_resume(struct device *dev) 80 86 { 81 87 struct tidss_device *tidss = dev_get_drvdata(dev); 82 - 83 - dev_dbg(dev, "%s\n", __func__); 84 88 85 89 return drm_mode_config_helper_resume(&tidss->ddev); 86 90 } ··· 114 126 struct drm_device *ddev; 115 127 int ret; 116 128 int irq; 117 - 118 - dev_dbg(dev, "%s\n", __func__); 119 129 120 130 tidss = devm_drm_dev_alloc(&pdev->dev, &tidss_driver, 121 131 struct tidss_device, ddev); ··· 213 227 struct device *dev = &pdev->dev; 214 228 struct tidss_device *tidss = platform_get_drvdata(pdev); 215 229 struct drm_device *ddev = &tidss->ddev; 216 - 217 - dev_dbg(dev, "%s\n", __func__); 218 230 219 231 drm_dev_unregister(ddev); 220 232
-4
drivers/gpu/drm/tidss/tidss_kms.c
··· 24 24 struct drm_device *ddev = old_state->dev; 25 25 struct tidss_device *tidss = to_tidss(ddev); 26 26 27 - dev_dbg(ddev->dev, "%s\n", __func__); 28 - 29 27 tidss_runtime_get(tidss); 30 28 31 29 drm_atomic_helper_commit_modeset_disables(ddev, old_state); ··· 242 244 { 243 245 struct drm_device *ddev = &tidss->ddev; 244 246 int ret; 245 - 246 - dev_dbg(tidss->dev, "%s\n", __func__); 247 247 248 248 ret = drmm_mode_config_init(ddev); 249 249 if (ret)
-8
drivers/gpu/drm/tidss/tidss_plane.c
··· 42 42 u32 hw_videoport; 43 43 int ret; 44 44 45 - dev_dbg(ddev->dev, "%s\n", __func__); 46 - 47 45 if (!new_plane_state->crtc) { 48 46 /* 49 47 * The visible field is not reset by the DRM core but only ··· 122 124 plane); 123 125 u32 hw_videoport; 124 126 125 - dev_dbg(ddev->dev, "%s\n", __func__); 126 - 127 127 if (!new_state->visible) { 128 128 dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false); 129 129 return; ··· 139 143 struct tidss_device *tidss = to_tidss(ddev); 140 144 struct tidss_plane *tplane = to_tidss_plane(plane); 141 145 142 - dev_dbg(ddev->dev, "%s\n", __func__); 143 - 144 146 dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true); 145 147 } 146 148 ··· 148 154 struct drm_device *ddev = plane->dev; 149 155 struct tidss_device *tidss = to_tidss(ddev); 150 156 struct tidss_plane *tplane = to_tidss_plane(plane); 151 - 152 - dev_dbg(ddev->dev, "%s\n", __func__); 153 157 154 158 dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false); 155 159 }
+6 -6
drivers/gpu/drm/ttm/tests/ttm_bo_test.c
··· 379 379 dma_resv_fini(resv); 380 380 } 381 381 382 - static void ttm_bo_put_basic(struct kunit *test) 382 + static void ttm_bo_fini_basic(struct kunit *test) 383 383 { 384 384 struct ttm_test_devices *priv = test->priv; 385 385 struct ttm_buffer_object *bo; ··· 410 410 dma_resv_unlock(bo->base.resv); 411 411 KUNIT_EXPECT_EQ(test, err, 0); 412 412 413 - ttm_bo_put(bo); 413 + ttm_bo_fini(bo); 414 414 } 415 415 416 416 static const char *mock_name(struct dma_fence *f) ··· 423 423 .get_timeline_name = mock_name, 424 424 }; 425 425 426 - static void ttm_bo_put_shared_resv(struct kunit *test) 426 + static void ttm_bo_fini_shared_resv(struct kunit *test) 427 427 { 428 428 struct ttm_test_devices *priv = test->priv; 429 429 struct ttm_buffer_object *bo; ··· 463 463 bo->type = ttm_bo_type_device; 464 464 bo->base.resv = external_resv; 465 465 466 - ttm_bo_put(bo); 466 + ttm_bo_fini(bo); 467 467 } 468 468 469 469 static void ttm_bo_pin_basic(struct kunit *test) ··· 616 616 KUNIT_CASE(ttm_bo_unreserve_basic), 617 617 KUNIT_CASE(ttm_bo_unreserve_pinned), 618 618 KUNIT_CASE(ttm_bo_unreserve_bulk), 619 - KUNIT_CASE(ttm_bo_put_basic), 620 - KUNIT_CASE(ttm_bo_put_shared_resv), 619 + KUNIT_CASE(ttm_bo_fini_basic), 620 + KUNIT_CASE(ttm_bo_fini_shared_resv), 621 621 KUNIT_CASE(ttm_bo_pin_basic), 622 622 KUNIT_CASE(ttm_bo_pin_unpin_resource), 623 623 KUNIT_CASE(ttm_bo_multiple_pin_one_unpin),
+30 -30
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
··· 144 144 drm_mm_node_allocated(&bo->base.vma_node.vm_node)); 145 145 146 146 ttm_resource_free(bo, &bo->resource); 147 - ttm_bo_put(bo); 147 + ttm_bo_fini(bo); 148 148 } 149 149 150 150 static void ttm_bo_init_reserved_mock_man(struct kunit *test) ··· 186 186 drm_mm_node_allocated(&bo->base.vma_node.vm_node)); 187 187 188 188 ttm_resource_free(bo, &bo->resource); 189 - ttm_bo_put(bo); 189 + ttm_bo_fini(bo); 190 190 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 191 191 } 192 192 ··· 221 221 KUNIT_EXPECT_PTR_EQ(test, bo->base.resv, &resv); 222 222 223 223 ttm_resource_free(bo, &bo->resource); 224 - ttm_bo_put(bo); 224 + ttm_bo_fini(bo); 225 225 } 226 226 227 227 static void ttm_bo_validate_basic(struct kunit *test) ··· 265 265 KUNIT_EXPECT_EQ(test, bo->resource->placement, 266 266 DRM_BUDDY_TOPDOWN_ALLOCATION); 267 267 268 - ttm_bo_put(bo); 268 + ttm_bo_fini(bo); 269 269 ttm_mock_manager_fini(priv->ttm_dev, snd_mem); 270 270 } 271 271 ··· 292 292 293 293 KUNIT_EXPECT_EQ(test, err, -ENOMEM); 294 294 295 - ttm_bo_put(bo); 295 + ttm_bo_fini(bo); 296 296 } 297 297 298 298 static void ttm_bo_validate_failed_alloc(struct kunit *test) ··· 321 321 322 322 KUNIT_EXPECT_EQ(test, err, -ENOMEM); 323 323 324 - ttm_bo_put(bo); 324 + ttm_bo_fini(bo); 325 325 ttm_bad_manager_fini(priv->ttm_dev, mem_type); 326 326 } 327 327 ··· 353 353 ttm_bo_unpin(bo); 354 354 dma_resv_unlock(bo->base.resv); 355 355 356 - ttm_bo_put(bo); 356 + ttm_bo_fini(bo); 357 357 } 358 358 359 359 static const struct ttm_bo_validate_test_case ttm_mem_type_cases[] = { ··· 403 403 KUNIT_EXPECT_EQ(test, err, 0); 404 404 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, 0); 405 405 406 - ttm_bo_put(bo); 406 + ttm_bo_fini(bo); 407 407 408 408 if (params->mem_type != TTM_PL_SYSTEM) 409 409 ttm_mock_manager_fini(priv->ttm_dev, params->mem_type); ··· 452 452 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem); 453 453 KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority])); 454 454 455 - ttm_bo_put(bo); 455 + ttm_bo_fini(bo); 456 456 ttm_bad_manager_fini(priv->ttm_dev, fst_mem); 457 457 ttm_mock_manager_fini(priv->ttm_dev, snd_mem); 458 458 } ··· 495 495 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2); 496 496 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, final_mem); 497 497 498 - ttm_bo_put(bo); 498 + ttm_bo_fini(bo); 499 499 500 500 ttm_mock_manager_fini(priv->ttm_dev, fst_mem); 501 501 ttm_mock_manager_fini(priv->ttm_dev, tmp_mem); ··· 567 567 KUNIT_ASSERT_TRUE(test, flags & TTM_TT_FLAG_ZERO_ALLOC); 568 568 } 569 569 570 - ttm_bo_put(bo); 570 + ttm_bo_fini(bo); 571 571 } 572 572 573 573 static int threaded_dma_resv_signal(void *arg) ··· 635 635 /* Make sure we have an idle object at this point */ 636 636 dma_resv_wait_timeout(bo->base.resv, usage, false, MAX_SCHEDULE_TIMEOUT); 637 637 638 - ttm_bo_put(bo); 638 + ttm_bo_fini(bo); 639 639 } 640 640 641 641 static void ttm_bo_validate_move_fence_signaled(struct kunit *test) ··· 668 668 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type); 669 669 KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size); 670 670 671 - ttm_bo_put(bo); 671 + ttm_bo_fini(bo); 672 672 dma_fence_put(man->move); 673 673 } 674 674 ··· 753 753 else 754 754 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, fst_mem); 755 755 756 - ttm_bo_put(bo); 756 + ttm_bo_fini(bo); 757 757 ttm_mock_manager_fini(priv->ttm_dev, fst_mem); 758 758 ttm_mock_manager_fini(priv->ttm_dev, snd_mem); 759 759 } ··· 807 807 KUNIT_EXPECT_EQ(test, bos[1].resource->mem_type, mem_type); 808 808 809 809 for (i = 0; i < bo_no; i++) 810 - ttm_bo_put(&bos[i]); 811 - ttm_bo_put(bo_val); 810 + ttm_bo_fini(&bos[i]); 811 + ttm_bo_fini(bo_val); 812 812 813 813 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 814 814 ttm_mock_manager_fini(priv->ttm_dev, mem_multihop); ··· 852 852 853 853 KUNIT_EXPECT_EQ(test, err, -ENOMEM); 854 854 855 - ttm_bo_put(bo_small); 855 + ttm_bo_fini(bo_small); 856 856 857 857 ttm_bo_reserve(bo_big, false, false, NULL); 858 858 ttm_bo_unpin(bo_big); 859 859 dma_resv_unlock(bo_big->base.resv); 860 - ttm_bo_put(bo_big); 860 + ttm_bo_fini(bo_big); 861 861 862 862 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 863 863 ttm_mock_manager_fini(priv->ttm_dev, mem_multihop); ··· 916 916 KUNIT_EXPECT_EQ(test, bo_evictable->resource->mem_type, mem_type_evict); 917 917 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2 + BO_SIZE); 918 918 919 - ttm_bo_put(bo); 920 - ttm_bo_put(bo_evictable); 919 + ttm_bo_fini(bo); 920 + ttm_bo_fini(bo_evictable); 921 921 922 922 ttm_bo_reserve(bo_pinned, false, false, NULL); 923 923 ttm_bo_unpin(bo_pinned); 924 924 dma_resv_unlock(bo_pinned->base.resv); 925 - ttm_bo_put(bo_pinned); 925 + ttm_bo_fini(bo_pinned); 926 926 927 927 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 928 928 ttm_mock_manager_fini(priv->ttm_dev, mem_multihop); ··· 973 973 KUNIT_EXPECT_NULL(test, bo_big->ttm); 974 974 KUNIT_EXPECT_NULL(test, bo_big->resource); 975 975 976 - ttm_bo_put(bo_small); 977 - ttm_bo_put(bo_big); 976 + ttm_bo_fini(bo_small); 977 + ttm_bo_fini(bo_big); 978 978 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 979 979 } 980 980 ··· 1025 1025 KUNIT_EXPECT_EQ(test, bo_init->resource->mem_type, mem_type); 1026 1026 KUNIT_EXPECT_NULL(test, bo_val->resource); 1027 1027 1028 - ttm_bo_put(bo_init); 1029 - ttm_bo_put(bo_val); 1028 + ttm_bo_fini(bo_init); 1029 + ttm_bo_fini(bo_val); 1030 1030 1031 1031 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 1032 1032 ttm_bad_manager_fini(priv->ttm_dev, mem_type_evict); ··· 1070 1070 KUNIT_ASSERT_NULL(test, bo_evict->resource); 1071 1071 KUNIT_ASSERT_TRUE(test, bo_evict->ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC); 1072 1072 1073 - ttm_bo_put(bo_evict); 1074 - ttm_bo_put(bo); 1073 + ttm_bo_fini(bo_evict); 1074 + ttm_bo_fini(bo); 1075 1075 1076 1076 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 1077 1077 } ··· 1128 1128 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 1129 1129 ttm_mock_manager_fini(priv->ttm_dev, mem_type_evict); 1130 1130 1131 - ttm_bo_put(bo_val); 1132 - ttm_bo_put(bo_tt); 1133 - ttm_bo_put(bo_mock); 1131 + ttm_bo_fini(bo_val); 1132 + ttm_bo_fini(bo_tt); 1133 + ttm_bo_fini(bo_mock); 1134 1134 } 1135 1135 1136 1136 static struct kunit_case ttm_bo_validate_test_cases[] = {
+7 -8
drivers/gpu/drm/ttm/ttm_bo.c
··· 318 318 bo->destroy(bo); 319 319 } 320 320 321 - /** 322 - * ttm_bo_put 323 - * 324 - * @bo: The buffer object. 325 - * 326 - * Unreference a buffer object. 327 - */ 321 + /* TODO: remove! */ 328 322 void ttm_bo_put(struct ttm_buffer_object *bo) 329 323 { 330 324 kref_put(&bo->kref, ttm_bo_release); 331 325 } 332 - EXPORT_SYMBOL(ttm_bo_put); 326 + 327 + void ttm_bo_fini(struct ttm_buffer_object *bo) 328 + { 329 + ttm_bo_put(bo); 330 + } 331 + EXPORT_SYMBOL(ttm_bo_fini); 333 332 334 333 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, 335 334 struct ttm_operation_ctx *ctx,
+2
drivers/gpu/drm/ttm/ttm_bo_internal.h
··· 55 55 return bo; 56 56 } 57 57 58 + void ttm_bo_put(struct ttm_buffer_object *bo); 59 + 58 60 #endif
+1
drivers/gpu/drm/vc4/Kconfig
··· 35 35 bool "Broadcom VC4 HDMI CEC Support" 36 36 depends on DRM_VC4 37 37 select CEC_CORE 38 + select DRM_DISPLAY_HDMI_CEC_HELPER 38 39 help 39 40 Choose this option if you have a Broadcom VC4 GPU 40 41 and want to use CEC.
+54 -83
drivers/gpu/drm/vc4/vc4_hdmi.c
··· 32 32 */ 33 33 34 34 #include <drm/display/drm_hdmi_audio_helper.h> 35 + #include <drm/display/drm_hdmi_cec_helper.h> 35 36 #include <drm/display/drm_hdmi_helper.h> 36 37 #include <drm/display/drm_hdmi_state_helper.h> 37 38 #include <drm/display/drm_scdc_helper.h> ··· 375 374 */ 376 375 377 376 drm_atomic_helper_connector_hdmi_hotplug(connector, status); 378 - 379 - if (status == connector_status_disconnected) { 380 - cec_phys_addr_invalidate(vc4_hdmi->cec_adap); 381 - return; 382 - } 383 - 384 - cec_s_phys_addr(vc4_hdmi->cec_adap, 385 - connector->display_info.source_physical_address, false); 386 377 387 378 if (status != connector_status_connected) 388 379 return; ··· 2377 2384 struct vc4_hdmi *vc4_hdmi = priv; 2378 2385 2379 2386 if (vc4_hdmi->cec_rx_msg.len) 2380 - cec_received_msg(vc4_hdmi->cec_adap, 2381 - &vc4_hdmi->cec_rx_msg); 2387 + drm_connector_hdmi_cec_received_msg(&vc4_hdmi->connector, 2388 + &vc4_hdmi->cec_rx_msg); 2382 2389 2383 2390 return IRQ_HANDLED; 2384 2391 } ··· 2388 2395 struct vc4_hdmi *vc4_hdmi = priv; 2389 2396 2390 2397 if (vc4_hdmi->cec_tx_ok) { 2391 - cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK, 2392 - 0, 0, 0, 0); 2398 + drm_connector_hdmi_cec_transmit_done(&vc4_hdmi->connector, 2399 + CEC_TX_STATUS_OK, 2400 + 0, 0, 0, 0); 2393 2401 } else { 2394 2402 /* 2395 2403 * This CEC implementation makes 1 retry, so if we 2396 2404 * get a NACK, then that means it made 2 attempts. 2397 2405 */ 2398 - cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_NACK, 2399 - 0, 2, 0, 0); 2406 + drm_connector_hdmi_cec_transmit_done(&vc4_hdmi->connector, 2407 + CEC_TX_STATUS_NACK, 2408 + 0, 2, 0, 0); 2400 2409 } 2401 2410 return IRQ_HANDLED; 2402 2411 } ··· 2555 2560 return ret; 2556 2561 } 2557 2562 2558 - static int vc4_hdmi_cec_enable(struct cec_adapter *adap) 2563 + static int vc4_hdmi_cec_enable(struct drm_connector *connector) 2559 2564 { 2560 - struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); 2565 + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); 2561 2566 struct drm_device *drm = vc4_hdmi->connector.dev; 2562 2567 /* clock period in microseconds */ 2563 2568 const u32 usecs = 1000000 / CEC_CLOCK_FREQ; ··· 2622 2627 return 0; 2623 2628 } 2624 2629 2625 - static int vc4_hdmi_cec_disable(struct cec_adapter *adap) 2630 + static int vc4_hdmi_cec_disable(struct drm_connector *connector) 2626 2631 { 2627 - struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); 2632 + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); 2628 2633 struct drm_device *drm = vc4_hdmi->connector.dev; 2629 2634 unsigned long flags; 2630 2635 int idx; ··· 2658 2663 return 0; 2659 2664 } 2660 2665 2661 - static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) 2666 + static int vc4_hdmi_cec_adap_enable(struct drm_connector *connector, bool enable) 2662 2667 { 2663 2668 if (enable) 2664 - return vc4_hdmi_cec_enable(adap); 2669 + return vc4_hdmi_cec_enable(connector); 2665 2670 else 2666 - return vc4_hdmi_cec_disable(adap); 2671 + return vc4_hdmi_cec_disable(connector); 2667 2672 } 2668 2673 2669 - static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) 2674 + static int vc4_hdmi_cec_adap_log_addr(struct drm_connector *connector, u8 log_addr) 2670 2675 { 2671 - struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); 2676 + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); 2672 2677 struct drm_device *drm = vc4_hdmi->connector.dev; 2673 2678 unsigned long flags; 2674 2679 int idx; ··· 2694 2699 return 0; 2695 2700 } 2696 2701 2697 - static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, 2702 + static int vc4_hdmi_cec_adap_transmit(struct drm_connector *connector, u8 attempts, 2698 2703 u32 signal_free_time, struct cec_msg *msg) 2699 2704 { 2700 - struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); 2705 + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); 2701 2706 struct drm_device *dev = vc4_hdmi->connector.dev; 2702 2707 unsigned long flags; 2703 2708 u32 val; ··· 2740 2745 return 0; 2741 2746 } 2742 2747 2743 - static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = { 2744 - .adap_enable = vc4_hdmi_cec_adap_enable, 2745 - .adap_log_addr = vc4_hdmi_cec_adap_log_addr, 2746 - .adap_transmit = vc4_hdmi_cec_adap_transmit, 2747 - }; 2748 - 2749 - static void vc4_hdmi_cec_release(void *ptr) 2748 + static int vc4_hdmi_cec_init(struct drm_connector *connector) 2750 2749 { 2751 - struct vc4_hdmi *vc4_hdmi = ptr; 2752 - 2753 - cec_unregister_adapter(vc4_hdmi->cec_adap); 2754 - vc4_hdmi->cec_adap = NULL; 2755 - } 2756 - 2757 - static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) 2758 - { 2759 - struct cec_connector_info conn_info; 2750 + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); 2760 2751 struct platform_device *pdev = vc4_hdmi->pdev; 2761 2752 struct device *dev = &pdev->dev; 2762 2753 int ret; 2763 - 2764 - if (!of_property_present(dev->of_node, "interrupts")) { 2765 - dev_warn(dev, "'interrupts' DT property is missing, no CEC\n"); 2766 - return 0; 2767 - } 2768 - 2769 - vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, 2770 - vc4_hdmi, 2771 - vc4_hdmi->variant->card_name, 2772 - CEC_CAP_DEFAULTS | 2773 - CEC_CAP_CONNECTOR_INFO, 1); 2774 - ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap); 2775 - if (ret < 0) 2776 - return ret; 2777 - 2778 - cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector); 2779 - cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info); 2780 2754 2781 2755 if (vc4_hdmi->variant->external_irq_controller) { 2782 2756 ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-rx"), ··· 2753 2789 vc4_cec_irq_handler_rx_thread, 0, 2754 2790 "vc4 hdmi cec rx", vc4_hdmi); 2755 2791 if (ret) 2756 - goto err_delete_cec_adap; 2792 + return ret; 2757 2793 2758 2794 ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-tx"), 2759 2795 vc4_cec_irq_handler_tx_bare, 2760 2796 vc4_cec_irq_handler_tx_thread, 0, 2761 2797 "vc4 hdmi cec tx", vc4_hdmi); 2762 2798 if (ret) 2763 - goto err_delete_cec_adap; 2799 + return ret; 2764 2800 } else { 2765 2801 ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), 2766 2802 vc4_cec_irq_handler, 2767 2803 vc4_cec_irq_handler_thread, 0, 2768 2804 "vc4 hdmi cec", vc4_hdmi); 2769 2805 if (ret) 2770 - goto err_delete_cec_adap; 2806 + return ret; 2771 2807 } 2772 2808 2773 - ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev); 2774 - if (ret < 0) 2775 - goto err_delete_cec_adap; 2809 + return 0; 2810 + } 2811 + 2812 + static const struct drm_connector_hdmi_cec_funcs vc4_hdmi_cec_funcs = { 2813 + .init = vc4_hdmi_cec_init, 2814 + .enable = vc4_hdmi_cec_adap_enable, 2815 + .log_addr = vc4_hdmi_cec_adap_log_addr, 2816 + .transmit = vc4_hdmi_cec_adap_transmit, 2817 + }; 2818 + 2819 + static int vc4_hdmi_cec_register(struct vc4_hdmi *vc4_hdmi) 2820 + { 2821 + struct platform_device *pdev = vc4_hdmi->pdev; 2822 + struct device *dev = &pdev->dev; 2823 + 2824 + if (!of_property_present(dev->of_node, "interrupts")) { 2825 + dev_warn(dev, "'interrupts' DT property is missing, no CEC\n"); 2826 + return 0; 2827 + } 2776 2828 2777 2829 /* 2778 - * NOTE: Strictly speaking, we should probably use a DRM-managed 2779 - * registration there to avoid removing the CEC adapter by the 2780 - * time the DRM driver doesn't have any user anymore. 2830 + * NOTE: the CEC adapter will be unregistered by drmm cleanup from 2831 + * drm_managed_release(), which is called from drm_dev_release() 2832 + * during device unbind. 2781 2833 * 2782 2834 * However, the CEC framework already cleans up the CEC adapter 2783 2835 * only when the last user has closed its file descriptor, so we 2784 2836 * don't need to handle it in DRM. 2785 - * 2786 - * By the time the device-managed hook is executed, we will give 2787 - * up our reference to the CEC adapter and therefore don't 2788 - * really care when it's actually freed. 2789 2837 * 2790 2838 * There's still a problematic sequence: if we unregister our 2791 2839 * CEC adapter, but the userspace keeps a handle on the CEC ··· 2809 2833 * the CEC framework already handles this too, by calling 2810 2834 * cec_is_registered() in cec_ioctl() and cec_poll(). 2811 2835 */ 2812 - ret = devm_add_action_or_reset(dev, vc4_hdmi_cec_release, vc4_hdmi); 2813 - if (ret) 2814 - return ret; 2815 - 2816 - return 0; 2817 - 2818 - err_delete_cec_adap: 2819 - cec_delete_adapter(vc4_hdmi->cec_adap); 2820 - 2821 - return ret; 2836 + return drmm_connector_hdmi_cec_register(&vc4_hdmi->connector, 2837 + &vc4_hdmi_cec_funcs, 2838 + vc4_hdmi->variant->card_name, 2839 + 1, 2840 + &pdev->dev); 2822 2841 } 2823 2842 #else 2824 - static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) 2843 + static int vc4_hdmi_cec_register(struct vc4_hdmi *vc4_hdmi) 2825 2844 { 2826 2845 return 0; 2827 2846 } ··· 3221 3250 if (ret) 3222 3251 goto err_put_runtime_pm; 3223 3252 3224 - ret = vc4_hdmi_cec_init(vc4_hdmi); 3253 + ret = vc4_hdmi_cec_register(vc4_hdmi); 3225 3254 if (ret) 3226 3255 goto err_put_runtime_pm; 3227 3256
-1
drivers/gpu/drm/vc4/vc4_hdmi.h
··· 147 147 */ 148 148 bool disable_wifi_frequencies; 149 149 150 - struct cec_adapter *cec_adap; 151 150 struct cec_msg cec_rx_msg; 152 151 bool cec_tx_ok; 153 152 bool cec_irq_was_rx;
+7 -76
drivers/gpu/drm/vkms/vkms_crtc.c
··· 7 7 #include <drm/drm_managed.h> 8 8 #include <drm/drm_probe_helper.h> 9 9 #include <drm/drm_vblank.h> 10 + #include <drm/drm_vblank_helper.h> 10 11 11 12 #include "vkms_drv.h" 12 13 13 - static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) 14 + static bool vkms_crtc_handle_vblank_timeout(struct drm_crtc *crtc) 14 15 { 15 - struct vkms_output *output = container_of(timer, struct vkms_output, 16 - vblank_hrtimer); 17 - struct drm_crtc *crtc = &output->crtc; 16 + struct vkms_output *output = drm_crtc_to_vkms_output(crtc); 18 17 struct vkms_crtc_state *state; 19 - u64 ret_overrun; 20 18 bool ret, fence_cookie; 21 19 22 20 fence_cookie = dma_fence_begin_signalling(); 23 - 24 - ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, 25 - output->period_ns); 26 - if (ret_overrun != 1) 27 - pr_warn("%s: vblank timer overrun\n", __func__); 28 21 29 22 spin_lock(&output->lock); 30 23 ret = drm_crtc_handle_vblank(crtc); ··· 49 56 } 50 57 51 58 dma_fence_end_signalling(fence_cookie); 52 - 53 - return HRTIMER_RESTART; 54 - } 55 - 56 - static int vkms_enable_vblank(struct drm_crtc *crtc) 57 - { 58 - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 59 - struct vkms_output *out = drm_crtc_to_vkms_output(crtc); 60 - 61 - hrtimer_setup(&out->vblank_hrtimer, &vkms_vblank_simulate, CLOCK_MONOTONIC, 62 - HRTIMER_MODE_REL); 63 - out->period_ns = ktime_set(0, vblank->framedur_ns); 64 - hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL); 65 - 66 - return 0; 67 - } 68 - 69 - static void vkms_disable_vblank(struct drm_crtc *crtc) 70 - { 71 - struct vkms_output *out = drm_crtc_to_vkms_output(crtc); 72 - 73 - hrtimer_cancel(&out->vblank_hrtimer); 74 - } 75 - 76 - static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc, 77 - int *max_error, ktime_t *vblank_time, 78 - bool in_vblank_irq) 79 - { 80 - struct vkms_output *output = drm_crtc_to_vkms_output(crtc); 81 - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 82 - 83 - if (!READ_ONCE(vblank->enabled)) { 84 - *vblank_time = ktime_get(); 85 - return true; 86 - } 87 - 88 - *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires); 89 - 90 - if (WARN_ON(*vblank_time == vblank->time)) 91 - return true; 92 - 93 - /* 94 - * To prevent races we roll the hrtimer forward before we do any 95 - * interrupt processing - this is how real hw works (the interrupt is 96 - * only generated after all the vblank registers are updated) and what 97 - * the vblank core expects. Therefore we need to always correct the 98 - * timestampe by one frame. 99 - */ 100 - *vblank_time -= output->period_ns; 101 59 102 60 return true; 103 61 } ··· 103 159 .reset = vkms_atomic_crtc_reset, 104 160 .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state, 105 161 .atomic_destroy_state = vkms_atomic_crtc_destroy_state, 106 - .enable_vblank = vkms_enable_vblank, 107 - .disable_vblank = vkms_disable_vblank, 108 - .get_vblank_timestamp = vkms_get_vblank_timestamp, 162 + DRM_CRTC_VBLANK_TIMER_FUNCS, 109 163 .get_crc_sources = vkms_get_crc_sources, 110 164 .set_crc_source = vkms_set_crc_source, 111 165 .verify_crc_source = vkms_verify_crc_source, ··· 155 213 return 0; 156 214 } 157 215 158 - static void vkms_crtc_atomic_enable(struct drm_crtc *crtc, 159 - struct drm_atomic_state *state) 160 - { 161 - drm_crtc_vblank_on(crtc); 162 - } 163 - 164 - static void vkms_crtc_atomic_disable(struct drm_crtc *crtc, 165 - struct drm_atomic_state *state) 166 - { 167 - drm_crtc_vblank_off(crtc); 168 - } 169 - 170 216 static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, 171 217 struct drm_atomic_state *state) 172 218 __acquires(&vkms_output->lock) ··· 195 265 .atomic_check = vkms_crtc_atomic_check, 196 266 .atomic_begin = vkms_crtc_atomic_begin, 197 267 .atomic_flush = vkms_crtc_atomic_flush, 198 - .atomic_enable = vkms_crtc_atomic_enable, 199 - .atomic_disable = vkms_crtc_atomic_disable, 268 + .atomic_enable = drm_crtc_vblank_atomic_enable, 269 + .atomic_disable = drm_crtc_vblank_atomic_disable, 270 + .handle_vblank_timeout = vkms_crtc_handle_vblank_timeout, 200 271 }; 201 272 202 273 struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *primary,
-2
drivers/gpu/drm/vkms/vkms_drv.h
··· 215 215 struct drm_crtc crtc; 216 216 struct drm_writeback_connector wb_connector; 217 217 struct drm_encoder wb_encoder; 218 - struct hrtimer vblank_hrtimer; 219 - ktime_t period_ns; 220 218 struct workqueue_struct *composer_workq; 221 219 spinlock_t lock; 222 220
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
··· 37 37 { 38 38 struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj); 39 39 if (bo) 40 - ttm_bo_put(bo); 40 + ttm_bo_fini(bo); 41 41 } 42 42 43 43 static int vmw_gem_object_open(struct drm_gem_object *obj,
+4 -17
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 15 15 #include "vmw_surface_cache.h" 16 16 #include "device_include/svga3d_surfacedefs.h" 17 17 18 + #include <drm/drm_dumb_buffers.h> 18 19 #include <drm/ttm/ttm_placement.h> 19 20 20 21 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32) ··· 2268 2267 * contents is going to be rendered guest side. 2269 2268 */ 2270 2269 if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) { 2271 - int cpp = DIV_ROUND_UP(args->bpp, 8); 2272 - 2273 - switch (cpp) { 2274 - case 1: /* DRM_FORMAT_C8 */ 2275 - case 2: /* DRM_FORMAT_RGB565 */ 2276 - case 4: /* DRM_FORMAT_XRGB8888 */ 2277 - break; 2278 - default: 2279 - /* 2280 - * Dumb buffers don't allow anything else. 2281 - * This is tested via IGT's dumb_buffers 2282 - */ 2283 - return -EINVAL; 2284 - } 2285 - 2286 - args->pitch = args->width * cpp; 2287 - args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); 2270 + ret = drm_mode_size_dumb(dev, args, 0, 0); 2271 + if (ret) 2272 + return ret; 2288 2273 2289 2274 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 2290 2275 args->size, &args->handle,
+5 -5
drivers/gpu/drm/xe/xe_bo.c
··· 9 9 #include <linux/nospec.h> 10 10 11 11 #include <drm/drm_drv.h> 12 + #include <drm/drm_dumb_buffers.h> 12 13 #include <drm/drm_gem_ttm_helper.h> 13 14 #include <drm/drm_managed.h> 14 15 #include <drm/ttm/ttm_backup.h> ··· 1738 1737 * refcount directly if needed. 1739 1738 */ 1740 1739 __xe_bo_vunmap(gem_to_xe_bo(obj)); 1741 - ttm_bo_put(container_of(obj, struct ttm_buffer_object, base)); 1740 + ttm_bo_fini(container_of(obj, struct ttm_buffer_object, base)); 1742 1741 } 1743 1742 1744 1743 static void xe_gem_object_close(struct drm_gem_object *obj, ··· 3604 3603 struct xe_device *xe = to_xe_device(dev); 3605 3604 struct xe_bo *bo; 3606 3605 uint32_t handle; 3607 - int cpp = DIV_ROUND_UP(args->bpp, 8); 3608 3606 int err; 3609 3607 u32 page_size = max_t(u32, PAGE_SIZE, 3610 3608 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); 3611 3609 3612 - args->pitch = ALIGN(args->width * cpp, 64); 3613 - args->size = ALIGN(mul_u32_u32(args->pitch, args->height), 3614 - page_size); 3610 + err = drm_mode_size_dumb(dev, args, SZ_64, page_size); 3611 + if (err) 3612 + return err; 3615 3613 3616 3614 bo = xe_bo_create_user(xe, NULL, args->size, 3617 3615 DRM_XE_GEM_CPU_CACHING_WC,
+5 -2
drivers/gpu/drm/xlnx/zynqmp_kms.c
··· 19 19 #include <drm/drm_crtc.h> 20 20 #include <drm/drm_device.h> 21 21 #include <drm/drm_drv.h> 22 + #include <drm/drm_dumb_buffers.h> 22 23 #include <drm/drm_encoder.h> 23 24 #include <drm/drm_fbdev_dma.h> 24 25 #include <drm/drm_fourcc.h> ··· 364 363 struct drm_mode_create_dumb *args) 365 364 { 366 365 struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm); 367 - unsigned int pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 366 + int ret; 368 367 369 368 /* Enforce the alignment constraints of the DMA engine. */ 370 - args->pitch = ALIGN(pitch, dpsub->dma_align); 369 + ret = drm_mode_size_dumb(drm, args, dpsub->dma_align, 0); 370 + if (ret) 371 + return ret; 371 372 372 373 return drm_gem_dma_dumb_create_internal(file_priv, drm, args); 373 374 }
+12
drivers/gpu/host1x/bus.c
··· 471 471 472 472 mutex_unlock(&clients_lock); 473 473 474 + /* 475 + * Add device even if there are no subdevs to ensure syncpoint functionality 476 + * is available regardless of whether any engine subdevices are present 477 + */ 478 + if (list_empty(&device->subdevs)) { 479 + err = device_add(&device->dev); 480 + if (err < 0) 481 + dev_err(&device->dev, "failed to add device: %d\n", err); 482 + else 483 + device->registered = true; 484 + } 485 + 474 486 return 0; 475 487 } 476 488
+3 -8
drivers/gpu/host1x/dev.c
··· 585 585 } 586 586 587 587 host->clk = devm_clk_get(&pdev->dev, NULL); 588 - if (IS_ERR(host->clk)) { 589 - err = PTR_ERR(host->clk); 590 - 591 - if (err != -EPROBE_DEFER) 592 - dev_err(&pdev->dev, "failed to get clock: %d\n", err); 593 - 594 - return err; 595 - } 588 + if (IS_ERR(host->clk)) 589 + return dev_err_probe(&pdev->dev, PTR_ERR(host->clk), "failed to get clock\n"); 596 590 597 591 err = host1x_get_resets(host); 598 592 if (err) ··· 815 821 } 816 822 EXPORT_SYMBOL(host1x_get_dma_mask); 817 823 824 + MODULE_SOFTDEP("post: tegra-drm"); 818 825 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 819 826 MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>"); 820 827 MODULE_DESCRIPTION("Host1x driver for Tegra products");
+69 -43
drivers/gpu/host1x/hw/channel_hw.c
··· 47 47 } 48 48 } 49 49 50 - static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, 51 - u32 next_class) 50 + static void submit_wait(struct host1x_job *job, u32 id, u32 threshold) 51 + { 52 + struct host1x_cdma *cdma = &job->channel->cdma; 53 + 54 + #if HOST1X_HW >= 2 55 + host1x_cdma_push_wide(cdma, 56 + host1x_opcode_setclass( 57 + HOST1X_CLASS_HOST1X, 58 + HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32, 59 + /* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */ 60 + BIT(0) | BIT(2) 61 + ), 62 + threshold, 63 + id, 64 + HOST1X_OPCODE_NOP 65 + ); 66 + #else 67 + /* TODO add waitchk or use waitbases or other mitigation */ 68 + host1x_cdma_push(cdma, 69 + host1x_opcode_setclass( 70 + HOST1X_CLASS_HOST1X, 71 + host1x_uclass_wait_syncpt_r(), 72 + BIT(0) 73 + ), 74 + host1x_class_host_wait_syncpt(id, threshold) 75 + ); 76 + #endif 77 + } 78 + 79 + static void submit_setclass(struct host1x_job *job, u32 next_class) 52 80 { 53 81 struct host1x_cdma *cdma = &job->channel->cdma; 54 82 ··· 94 66 stream_id = job->engine_fallback_streamid; 95 67 96 68 host1x_cdma_push_wide(cdma, 97 - host1x_opcode_setclass( 98 - HOST1X_CLASS_HOST1X, 99 - HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32, 100 - /* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */ 101 - BIT(0) | BIT(2) 102 - ), 103 - threshold, 104 - id, 105 - HOST1X_OPCODE_NOP 106 - ); 107 - host1x_cdma_push_wide(&job->channel->cdma, 108 - host1x_opcode_setclass(job->class, 0, 0), 69 + host1x_opcode_setclass(next_class, 0, 0), 109 70 host1x_opcode_setpayload(stream_id), 110 71 host1x_opcode_setstreamid(job->engine_streamid_offset / 4), 111 72 HOST1X_OPCODE_NOP); 112 - #elif HOST1X_HW >= 2 113 - host1x_cdma_push_wide(cdma, 114 - host1x_opcode_setclass( 115 - HOST1X_CLASS_HOST1X, 116 - HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32, 117 - /* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */ 118 - BIT(0) | BIT(2) 119 - ), 120 - threshold, 121 - id, 122 - host1x_opcode_setclass(next_class, 0, 0) 123 - ); 124 73 #else 125 - /* TODO add waitchk or use waitbases or other mitigation */ 126 - host1x_cdma_push(cdma, 127 - host1x_opcode_setclass( 128 - HOST1X_CLASS_HOST1X, 129 - host1x_uclass_wait_syncpt_r(), 130 - BIT(0) 131 - ), 132 - host1x_class_host_wait_syncpt(id, threshold) 133 - ); 134 74 host1x_cdma_push(cdma, 135 75 host1x_opcode_setclass(next_class, 0, 0), 136 76 HOST1X_OPCODE_NOP ··· 106 110 #endif 107 111 } 108 112 109 - static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base) 113 + static void submit_gathers(struct host1x_job *job, struct host1x_job_cmd *cmds, u32 num_cmds, 114 + u32 job_syncpt_base) 110 115 { 111 116 struct host1x_cdma *cdma = &job->channel->cdma; 112 117 #if HOST1X_HW < 6 ··· 116 119 unsigned int i; 117 120 u32 threshold; 118 121 119 - for (i = 0; i < job->num_cmds; i++) { 120 - struct host1x_job_cmd *cmd = &job->cmds[i]; 122 + for (i = 0; i < num_cmds; i++) { 123 + struct host1x_job_cmd *cmd = &cmds[i]; 121 124 122 125 if (cmd->is_wait) { 123 126 if (cmd->wait.relative) ··· 125 128 else 126 129 threshold = cmd->wait.threshold; 127 130 128 - submit_wait(job, cmd->wait.id, threshold, cmd->wait.next_class); 131 + submit_wait(job, cmd->wait.id, threshold); 132 + submit_setclass(job, cmd->wait.next_class); 129 133 } else { 130 134 struct host1x_job_gather *g = &cmd->gather; 131 135 ··· 214 216 215 217 #if HOST1X_HW >= 6 216 218 u32 fence; 219 + int i = 0; 217 220 221 + if (job->num_cmds == 0) 222 + goto prefences_done; 223 + if (!job->cmds[0].is_wait || job->cmds[0].wait.relative) 224 + goto prefences_done; 225 + 226 + /* Enter host1x class with invalid stream ID for prefence waits. */ 227 + host1x_cdma_push_wide(cdma, 228 + host1x_opcode_acquire_mlock(1), 229 + host1x_opcode_setclass(1, 0, 0), 230 + host1x_opcode_setpayload(0), 231 + host1x_opcode_setstreamid(0x1fffff)); 232 + 233 + for (i = 0; i < job->num_cmds; i++) { 234 + struct host1x_job_cmd *cmd = &job->cmds[i]; 235 + 236 + if (!cmd->is_wait || cmd->wait.relative) 237 + break; 238 + 239 + submit_wait(job, cmd->wait.id, cmd->wait.threshold); 240 + } 241 + 242 + host1x_cdma_push(cdma, 243 + HOST1X_OPCODE_NOP, 244 + host1x_opcode_release_mlock(1)); 245 + 246 + prefences_done: 218 247 /* Enter engine class with invalid stream ID. */ 219 248 host1x_cdma_push_wide(cdma, 220 249 host1x_opcode_acquire_mlock(job->class), ··· 255 230 host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1), 256 231 HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) | 257 232 HOST1X_UCLASS_INCR_SYNCPT_COND_F(4)); 258 - submit_wait(job, job->syncpt->id, fence, job->class); 233 + submit_wait(job, job->syncpt->id, fence); 234 + submit_setclass(job, job->class); 259 235 260 236 /* Submit work. */ 261 237 job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs); 262 - submit_gathers(job, job->syncpt_end - job->syncpt_incrs); 238 + submit_gathers(job, job->cmds + i, job->num_cmds - i, job->syncpt_end - job->syncpt_incrs); 263 239 264 240 /* Before releasing MLOCK, ensure engine is idle again. */ 265 241 fence = host1x_syncpt_incr_max(sp, 1); ··· 268 242 host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1), 269 243 HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) | 270 244 HOST1X_UCLASS_INCR_SYNCPT_COND_F(4)); 271 - submit_wait(job, job->syncpt->id, fence, job->class); 245 + submit_wait(job, job->syncpt->id, fence); 272 246 273 247 /* Release MLOCK. */ 274 248 host1x_cdma_push(cdma, ··· 298 272 299 273 job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs); 300 274 301 - submit_gathers(job, job->syncpt_end - job->syncpt_incrs); 275 + submit_gathers(job, job->cmds, job->num_cmds, job->syncpt_end - job->syncpt_incrs); 302 276 #endif 303 277 } 304 278
+1 -3
drivers/gpu/host1x/syncpt.c
··· 345 345 346 346 sp->locked = false; 347 347 348 - mutex_lock(&sp->host->syncpt_mutex); 349 - 350 348 host1x_syncpt_base_free(sp->base); 351 349 kfree(sp->name); 352 350 sp->base = NULL; ··· 367 369 if (!sp) 368 370 return; 369 371 370 - kref_put(&sp->ref, syncpt_release); 372 + kref_put_mutex(&sp->ref, syncpt_release, &sp->host->syncpt_mutex); 371 373 } 372 374 EXPORT_SYMBOL(host1x_syncpt_put); 373 375
+4 -4
drivers/video/fbdev/Kconfig
··· 816 816 config FB_MATROX 817 817 tristate "Matrox acceleration" 818 818 depends on FB && PCI 819 + depends on FB_TILEBLITTING 819 820 select FB_CFB_FILLRECT 820 821 select FB_CFB_COPYAREA 821 822 select FB_CFB_IMAGEBLIT 822 823 select FB_IOMEM_FOPS 823 - select FB_TILEBLITTING 824 824 select FB_MACMODES if PPC_PMAC 825 825 help 826 826 Say Y here if you have a Matrox Millennium, Matrox Millennium II, ··· 1050 1050 config FB_S3 1051 1051 tristate "S3 Trio/Virge support" 1052 1052 depends on FB && PCI && HAS_IOPORT 1053 + depends on FB_TILEBLITTING 1053 1054 select FB_CFB_FILLRECT 1054 1055 select FB_CFB_COPYAREA 1055 1056 select FB_CFB_IMAGEBLIT 1056 1057 select FB_IOMEM_FOPS 1057 - select FB_TILEBLITTING 1058 1058 select FB_SVGALIB 1059 1059 select VGASTATE 1060 1060 select FB_CFB_REV_PIXELS_IN_BYTE ··· 1256 1256 config FB_VT8623 1257 1257 tristate "VIA VT8623 support" 1258 1258 depends on FB && PCI && HAS_IOPORT 1259 + depends on FB_TILEBLITTING 1259 1260 select FB_CFB_FILLRECT 1260 1261 select FB_CFB_COPYAREA 1261 1262 select FB_CFB_IMAGEBLIT 1262 1263 select FB_IOMEM_FOPS 1263 - select FB_TILEBLITTING 1264 1264 select FB_SVGALIB 1265 1265 select VGASTATE 1266 1266 select FONT_8x16 if FRAMEBUFFER_CONSOLE ··· 1294 1294 config FB_ARK 1295 1295 tristate "ARK 2000PV support" 1296 1296 depends on FB && PCI && HAS_IOPORT 1297 + depends on FB_TILEBLITTING 1297 1298 select FB_CFB_FILLRECT 1298 1299 select FB_CFB_COPYAREA 1299 1300 select FB_CFB_IMAGEBLIT 1300 1301 select FB_IOMEM_FOPS 1301 - select FB_TILEBLITTING 1302 1302 select FB_SVGALIB 1303 1303 select VGASTATE 1304 1304 select FONT_8x16 if FRAMEBUFFER_CONSOLE
+1 -1
drivers/video/fbdev/core/Kconfig
··· 180 180 depends on FB 181 181 182 182 config FB_MODE_HELPERS 183 - bool "Enable Video Mode Handling Helpers" 183 + bool 184 184 depends on FB 185 185 help 186 186 This enables functions for handling video modes using the
+61 -61
drivers/video/fbdev/core/bitblit.c
··· 253 253 int fg, int bg) 254 254 { 255 255 struct fb_cursor cursor; 256 - struct fbcon_ops *ops = info->fbcon_par; 256 + struct fbcon_par *par = info->fbcon_par; 257 257 unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; 258 258 int w = DIV_ROUND_UP(vc->vc_font.width, 8), c; 259 - int y = real_y(ops->p, vc->state.y); 259 + int y = real_y(par->p, vc->state.y); 260 260 int attribute, use_sw = vc->vc_cursor_type & CUR_SW; 261 261 int err = 1; 262 262 char *src; ··· 270 270 attribute = get_attribute(info, c); 271 271 src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); 272 272 273 - if (ops->cursor_state.image.data != src || 274 - ops->cursor_reset) { 275 - ops->cursor_state.image.data = src; 276 - cursor.set |= FB_CUR_SETIMAGE; 273 + if (par->cursor_state.image.data != src || 274 + par->cursor_reset) { 275 + par->cursor_state.image.data = src; 276 + cursor.set |= FB_CUR_SETIMAGE; 277 277 } 278 278 279 279 if (attribute) { ··· 282 282 dst = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC); 283 283 if (!dst) 284 284 return; 285 - kfree(ops->cursor_data); 286 - ops->cursor_data = dst; 285 + kfree(par->cursor_data); 286 + par->cursor_data = dst; 287 287 update_attr(dst, src, attribute, vc); 288 288 src = dst; 289 289 } 290 290 291 - if (ops->cursor_state.image.fg_color != fg || 292 - ops->cursor_state.image.bg_color != bg || 293 - ops->cursor_reset) { 294 - ops->cursor_state.image.fg_color = fg; 295 - ops->cursor_state.image.bg_color = bg; 291 + if (par->cursor_state.image.fg_color != fg || 292 + par->cursor_state.image.bg_color != bg || 293 + par->cursor_reset) { 294 + par->cursor_state.image.fg_color = fg; 295 + par->cursor_state.image.bg_color = bg; 296 296 cursor.set |= FB_CUR_SETCMAP; 297 297 } 298 298 299 - if ((ops->cursor_state.image.dx != (vc->vc_font.width * vc->state.x)) || 300 - (ops->cursor_state.image.dy != (vc->vc_font.height * y)) || 301 - ops->cursor_reset) { 302 - ops->cursor_state.image.dx = vc->vc_font.width * vc->state.x; 303 - ops->cursor_state.image.dy = vc->vc_font.height * y; 299 + if ((par->cursor_state.image.dx != (vc->vc_font.width * vc->state.x)) || 300 + (par->cursor_state.image.dy != (vc->vc_font.height * y)) || 301 + par->cursor_reset) { 302 + par->cursor_state.image.dx = vc->vc_font.width * vc->state.x; 303 + par->cursor_state.image.dy = vc->vc_font.height * y; 304 304 cursor.set |= FB_CUR_SETPOS; 305 305 } 306 306 307 - if (ops->cursor_state.image.height != vc->vc_font.height || 308 - ops->cursor_state.image.width != vc->vc_font.width || 309 - ops->cursor_reset) { 310 - ops->cursor_state.image.height = vc->vc_font.height; 311 - ops->cursor_state.image.width = vc->vc_font.width; 307 + if (par->cursor_state.image.height != vc->vc_font.height || 308 + par->cursor_state.image.width != vc->vc_font.width || 309 + par->cursor_reset) { 310 + par->cursor_state.image.height = vc->vc_font.height; 311 + par->cursor_state.image.width = vc->vc_font.width; 312 312 cursor.set |= FB_CUR_SETSIZE; 313 313 } 314 314 315 - if (ops->cursor_state.hot.x || ops->cursor_state.hot.y || 316 - ops->cursor_reset) { 317 - ops->cursor_state.hot.x = cursor.hot.y = 0; 315 + if (par->cursor_state.hot.x || par->cursor_state.hot.y || 316 + par->cursor_reset) { 317 + par->cursor_state.hot.x = cursor.hot.y = 0; 318 318 cursor.set |= FB_CUR_SETHOT; 319 319 } 320 320 321 321 if (cursor.set & FB_CUR_SETSIZE || 322 - vc->vc_cursor_type != ops->p->cursor_shape || 323 - ops->cursor_state.mask == NULL || 324 - ops->cursor_reset) { 322 + vc->vc_cursor_type != par->p->cursor_shape || 323 + par->cursor_state.mask == NULL || 324 + par->cursor_reset) { 325 325 char *mask = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC); 326 326 int cur_height, size, i = 0; 327 327 u8 msk = 0xff; ··· 329 329 if (!mask) 330 330 return; 331 331 332 - kfree(ops->cursor_state.mask); 333 - ops->cursor_state.mask = mask; 332 + kfree(par->cursor_state.mask); 333 + par->cursor_state.mask = mask; 334 334 335 - ops->p->cursor_shape = vc->vc_cursor_type; 335 + par->p->cursor_shape = vc->vc_cursor_type; 336 336 cursor.set |= FB_CUR_SETSHAPE; 337 337 338 - switch (CUR_SIZE(ops->p->cursor_shape)) { 338 + switch (CUR_SIZE(par->p->cursor_shape)) { 339 339 case CUR_NONE: 340 340 cur_height = 0; 341 341 break; ··· 364 364 mask[i++] = msk; 365 365 } 366 366 367 - ops->cursor_state.enable = enable && !use_sw; 367 + par->cursor_state.enable = enable && !use_sw; 368 368 369 369 cursor.image.data = src; 370 - cursor.image.fg_color = ops->cursor_state.image.fg_color; 371 - cursor.image.bg_color = ops->cursor_state.image.bg_color; 372 - cursor.image.dx = ops->cursor_state.image.dx; 373 - cursor.image.dy = ops->cursor_state.image.dy; 374 - cursor.image.height = ops->cursor_state.image.height; 375 - cursor.image.width = ops->cursor_state.image.width; 376 - cursor.hot.x = ops->cursor_state.hot.x; 377 - cursor.hot.y = ops->cursor_state.hot.y; 378 - cursor.mask = ops->cursor_state.mask; 379 - cursor.enable = ops->cursor_state.enable; 370 + cursor.image.fg_color = par->cursor_state.image.fg_color; 371 + cursor.image.bg_color = par->cursor_state.image.bg_color; 372 + cursor.image.dx = par->cursor_state.image.dx; 373 + cursor.image.dy = par->cursor_state.image.dy; 374 + cursor.image.height = par->cursor_state.image.height; 375 + cursor.image.width = par->cursor_state.image.width; 376 + cursor.hot.x = par->cursor_state.hot.x; 377 + cursor.hot.y = par->cursor_state.hot.y; 378 + cursor.mask = par->cursor_state.mask; 379 + cursor.enable = par->cursor_state.enable; 380 380 cursor.image.depth = 1; 381 381 cursor.rop = ROP_XOR; 382 382 ··· 386 386 if (err) 387 387 soft_cursor(info, &cursor); 388 388 389 - ops->cursor_reset = 0; 389 + par->cursor_reset = 0; 390 390 } 391 391 392 392 static int bit_update_start(struct fb_info *info) 393 393 { 394 - struct fbcon_ops *ops = info->fbcon_par; 394 + struct fbcon_par *par = info->fbcon_par; 395 395 int err; 396 396 397 - err = fb_pan_display(info, &ops->var); 398 - ops->var.xoffset = info->var.xoffset; 399 - ops->var.yoffset = info->var.yoffset; 400 - ops->var.vmode = info->var.vmode; 397 + err = fb_pan_display(info, &par->var); 398 + par->var.xoffset = info->var.xoffset; 399 + par->var.yoffset = info->var.yoffset; 400 + par->var.vmode = info->var.vmode; 401 401 return err; 402 402 } 403 403 404 - void fbcon_set_bitops(struct fbcon_ops *ops) 405 - { 406 - ops->bmove = bit_bmove; 407 - ops->clear = bit_clear; 408 - ops->putcs = bit_putcs; 409 - ops->clear_margins = bit_clear_margins; 410 - ops->cursor = bit_cursor; 411 - ops->update_start = bit_update_start; 412 - ops->rotate_font = NULL; 404 + static const struct fbcon_bitops bit_fbcon_bitops = { 405 + .bmove = bit_bmove, 406 + .clear = bit_clear, 407 + .putcs = bit_putcs, 408 + .clear_margins = bit_clear_margins, 409 + .cursor = bit_cursor, 410 + .update_start = bit_update_start, 411 + }; 413 412 414 - if (ops->rotate) 415 - fbcon_set_rotate(ops); 413 + void fbcon_set_bitops_ur(struct fbcon_par *par) 414 + { 415 + par->bitops = &bit_fbcon_bitops; 416 416 }
+236 -223
drivers/video/fbdev/core/fbcon.c
··· 81 81 #include <asm/irq.h> 82 82 83 83 #include "fbcon.h" 84 + #include "fbcon_rotate.h" 84 85 #include "fb_internal.h" 85 86 86 87 /* ··· 199 198 #ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION 200 199 static inline void fbcon_set_rotation(struct fb_info *info) 201 200 { 202 - struct fbcon_ops *ops = info->fbcon_par; 201 + struct fbcon_par *par = info->fbcon_par; 203 202 204 203 if (!(info->flags & FBINFO_MISC_TILEBLITTING) && 205 - ops->p->con_rotate < 4) 206 - ops->rotate = ops->p->con_rotate; 204 + par->p->con_rotate < 4) 205 + par->rotate = par->p->con_rotate; 207 206 else 208 - ops->rotate = 0; 207 + par->rotate = 0; 209 208 } 210 209 211 210 static void fbcon_rotate(struct fb_info *info, u32 rotate) 212 211 { 213 - struct fbcon_ops *ops= info->fbcon_par; 212 + struct fbcon_par *par = info->fbcon_par; 214 213 struct fb_info *fb_info; 215 214 216 - if (!ops || ops->currcon == -1) 215 + if (!par || par->currcon == -1) 217 216 return; 218 217 219 - fb_info = fbcon_info_from_console(ops->currcon); 218 + fb_info = fbcon_info_from_console(par->currcon); 220 219 221 220 if (info == fb_info) { 222 - struct fbcon_display *p = &fb_display[ops->currcon]; 221 + struct fbcon_display *p = &fb_display[par->currcon]; 223 222 224 223 if (rotate < 4) 225 224 p->con_rotate = rotate; ··· 232 231 233 232 static void fbcon_rotate_all(struct fb_info *info, u32 rotate) 234 233 { 235 - struct fbcon_ops *ops = info->fbcon_par; 234 + struct fbcon_par *par = info->fbcon_par; 236 235 struct vc_data *vc; 237 236 struct fbcon_display *p; 238 237 int i; 239 238 240 - if (!ops || ops->currcon < 0 || rotate > 3) 239 + if (!par || par->currcon < 0 || rotate > 3) 241 240 return; 242 241 243 242 for (i = first_fb_vc; i <= last_fb_vc; i++) { ··· 255 254 #else 256 255 static inline void fbcon_set_rotation(struct fb_info *info) 257 256 { 258 - struct fbcon_ops *ops = info->fbcon_par; 257 + struct fbcon_par *par = info->fbcon_par; 259 258 260 - ops->rotate = FB_ROTATE_UR; 259 + par->rotate = FB_ROTATE_UR; 261 260 } 262 261 263 262 static void fbcon_rotate(struct fb_info *info, u32 rotate) ··· 271 270 } 272 271 #endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */ 273 272 273 + static void fbcon_set_bitops(struct fbcon_par *par) 274 + { 275 + switch (par->rotate) { 276 + default: 277 + fallthrough; 278 + case FB_ROTATE_UR: 279 + fbcon_set_bitops_ur(par); 280 + break; 281 + case FB_ROTATE_CW: 282 + fbcon_set_bitops_cw(par); 283 + break; 284 + case FB_ROTATE_UD: 285 + fbcon_set_bitops_ud(par); 286 + break; 287 + case FB_ROTATE_CCW: 288 + fbcon_set_bitops_ccw(par); 289 + break; 290 + } 291 + } 292 + 274 293 static int fbcon_get_rotate(struct fb_info *info) 275 294 { 276 - struct fbcon_ops *ops = info->fbcon_par; 295 + struct fbcon_par *par = info->fbcon_par; 277 296 278 - return (ops) ? ops->rotate : 0; 297 + return (par) ? par->rotate : 0; 279 298 } 280 299 281 300 static bool fbcon_skip_panic(struct fb_info *info) ··· 305 284 306 285 static inline bool fbcon_is_active(struct vc_data *vc, struct fb_info *info) 307 286 { 308 - struct fbcon_ops *ops = info->fbcon_par; 287 + struct fbcon_par *par = info->fbcon_par; 309 288 310 289 return info->state == FBINFO_STATE_RUNNING && 311 - vc->vc_mode == KD_TEXT && !ops->graphics && !fbcon_skip_panic(info); 290 + vc->vc_mode == KD_TEXT && !par->graphics && !fbcon_skip_panic(info); 312 291 } 313 292 314 293 static int get_color(struct vc_data *vc, struct fb_info *info, ··· 390 369 391 370 static void fb_flashcursor(struct work_struct *work) 392 371 { 393 - struct fbcon_ops *ops = container_of(work, struct fbcon_ops, cursor_work.work); 372 + struct fbcon_par *par = container_of(work, struct fbcon_par, cursor_work.work); 394 373 struct fb_info *info; 395 374 struct vc_data *vc = NULL; 396 375 int c; ··· 405 384 return; 406 385 407 386 /* protected by console_lock */ 408 - info = ops->info; 387 + info = par->info; 409 388 410 - if (ops->currcon != -1) 411 - vc = vc_cons[ops->currcon].d; 389 + if (par->currcon != -1) 390 + vc = vc_cons[par->currcon].d; 412 391 413 392 if (!vc || !con_is_visible(vc) || 414 393 fbcon_info_from_console(vc->vc_num) != info || ··· 418 397 } 419 398 420 399 c = scr_readw((u16 *) vc->vc_pos); 421 - enable = ops->cursor_flash && !ops->cursor_state.enable; 422 - ops->cursor(vc, info, enable, 423 - get_fg_color(vc, info, c), 424 - get_bg_color(vc, info, c)); 400 + enable = par->cursor_flash && !par->cursor_state.enable; 401 + par->bitops->cursor(vc, info, enable, 402 + get_fg_color(vc, info, c), 403 + get_bg_color(vc, info, c)); 425 404 console_unlock(); 426 405 427 - queue_delayed_work(system_power_efficient_wq, &ops->cursor_work, 428 - ops->cur_blink_jiffies); 406 + queue_delayed_work(system_power_efficient_wq, &par->cursor_work, 407 + par->cur_blink_jiffies); 429 408 } 430 409 431 410 static void fbcon_add_cursor_work(struct fb_info *info) 432 411 { 433 - struct fbcon_ops *ops = info->fbcon_par; 412 + struct fbcon_par *par = info->fbcon_par; 434 413 435 414 if (fbcon_cursor_blink) 436 - queue_delayed_work(system_power_efficient_wq, &ops->cursor_work, 437 - ops->cur_blink_jiffies); 415 + queue_delayed_work(system_power_efficient_wq, &par->cursor_work, 416 + par->cur_blink_jiffies); 438 417 } 439 418 440 419 static void fbcon_del_cursor_work(struct fb_info *info) 441 420 { 442 - struct fbcon_ops *ops = info->fbcon_par; 421 + struct fbcon_par *par = info->fbcon_par; 443 422 444 - cancel_delayed_work_sync(&ops->cursor_work); 423 + cancel_delayed_work_sync(&par->cursor_work); 445 424 } 446 425 447 426 #ifndef MODULE ··· 601 580 int cols, int rows, int new_cols, int new_rows) 602 581 { 603 582 /* Need to make room for the logo */ 604 - struct fbcon_ops *ops = info->fbcon_par; 583 + struct fbcon_par *par = info->fbcon_par; 605 584 int cnt, erase = vc->vc_video_erase_char, step; 606 585 unsigned short *save = NULL, *r, *q; 607 586 int logo_height; ··· 617 596 */ 618 597 if (fb_get_color_depth(&info->var, &info->fix) == 1) 619 598 erase &= ~0x400; 620 - logo_height = fb_prepare_logo(info, ops->rotate); 599 + logo_height = fb_prepare_logo(info, par->rotate); 621 600 logo_lines = DIV_ROUND_UP(logo_height, vc->vc_font.height); 622 601 q = (unsigned short *) (vc->vc_origin + 623 602 vc->vc_size_row * rows); ··· 689 668 #ifdef CONFIG_FB_TILEBLITTING 690 669 static void set_blitting_type(struct vc_data *vc, struct fb_info *info) 691 670 { 692 - struct fbcon_ops *ops = info->fbcon_par; 671 + struct fbcon_par *par = info->fbcon_par; 693 672 694 - ops->p = &fb_display[vc->vc_num]; 673 + par->p = &fb_display[vc->vc_num]; 695 674 696 675 if ((info->flags & FBINFO_MISC_TILEBLITTING)) 697 676 fbcon_set_tileops(vc, info); 698 677 else { 699 678 fbcon_set_rotation(info); 700 - fbcon_set_bitops(ops); 679 + fbcon_set_bitops(par); 701 680 } 702 681 } 703 682 ··· 714 693 #else 715 694 static void set_blitting_type(struct vc_data *vc, struct fb_info *info) 716 695 { 717 - struct fbcon_ops *ops = info->fbcon_par; 696 + struct fbcon_par *par = info->fbcon_par; 718 697 719 698 info->flags &= ~FBINFO_MISC_TILEBLITTING; 720 - ops->p = &fb_display[vc->vc_num]; 699 + par->p = &fb_display[vc->vc_num]; 721 700 fbcon_set_rotation(info); 722 - fbcon_set_bitops(ops); 701 + fbcon_set_bitops(par); 723 702 } 724 703 725 704 static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount) ··· 739 718 module_put(info->fbops->owner); 740 719 741 720 if (info->fbcon_par) { 742 - struct fbcon_ops *ops = info->fbcon_par; 721 + struct fbcon_par *par = info->fbcon_par; 743 722 744 723 fbcon_del_cursor_work(info); 745 - kfree(ops->cursor_state.mask); 746 - kfree(ops->cursor_data); 747 - kfree(ops->cursor_src); 748 - kfree(ops->fontbuffer); 724 + kfree(par->cursor_state.mask); 725 + kfree(par->cursor_data); 726 + kfree(par->cursor_src); 727 + kfree(par->fontbuffer); 749 728 kfree(info->fbcon_par); 750 729 info->fbcon_par = NULL; 751 730 } ··· 753 732 754 733 static int fbcon_open(struct fb_info *info) 755 734 { 756 - struct fbcon_ops *ops; 735 + struct fbcon_par *par; 757 736 758 737 if (!try_module_get(info->fbops->owner)) 759 738 return -ENODEV; ··· 767 746 } 768 747 unlock_fb_info(info); 769 748 770 - ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL); 771 - if (!ops) { 749 + par = kzalloc(sizeof(*par), GFP_KERNEL); 750 + if (!par) { 772 751 fbcon_release(info); 773 752 return -ENOMEM; 774 753 } 775 754 776 - INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor); 777 - ops->info = info; 778 - info->fbcon_par = ops; 779 - ops->cur_blink_jiffies = HZ / 5; 755 + INIT_DELAYED_WORK(&par->cursor_work, fb_flashcursor); 756 + par->info = info; 757 + info->fbcon_par = par; 758 + par->cur_blink_jiffies = HZ / 5; 780 759 781 760 return 0; 782 761 } ··· 823 802 static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, 824 803 int unit, int show_logo) 825 804 { 826 - struct fbcon_ops *ops = info->fbcon_par; 805 + struct fbcon_par *par = info->fbcon_par; 827 806 int ret; 828 807 829 - ops->currcon = fg_console; 808 + par->currcon = fg_console; 830 809 831 - if (info->fbops->fb_set_par && !ops->initialized) { 810 + if (info->fbops->fb_set_par && !par->initialized) { 832 811 ret = info->fbops->fb_set_par(info); 833 812 834 813 if (ret) ··· 837 816 "error code %d\n", ret); 838 817 } 839 818 840 - ops->initialized = true; 841 - ops->graphics = 0; 819 + par->initialized = true; 820 + par->graphics = 0; 842 821 fbcon_set_disp(info, &info->var, unit); 843 822 844 823 if (show_logo) { ··· 975 954 struct vc_data *vc = vc_cons[fg_console].d; 976 955 const struct font_desc *font = NULL; 977 956 struct fb_info *info = NULL; 978 - struct fbcon_ops *ops; 957 + struct fbcon_par *par; 979 958 int rows, cols; 980 959 981 960 /* ··· 995 974 if (fbcon_open(info)) 996 975 return NULL; 997 976 998 - ops = info->fbcon_par; 999 - ops->currcon = -1; 1000 - ops->graphics = 1; 1001 - ops->cur_rotate = -1; 977 + par = info->fbcon_par; 978 + par->currcon = -1; 979 + par->graphics = 1; 980 + par->cur_rotate = -1; 1002 981 1003 982 p->con_rotate = initial_rotation; 1004 983 if (p->con_rotate == -1) ··· 1021 1000 vc->vc_font.charcount = font->charcount; 1022 1001 } 1023 1002 1024 - cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); 1025 - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); 1003 + cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres); 1004 + rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); 1026 1005 cols /= vc->vc_font.width; 1027 1006 rows /= vc->vc_font.height; 1028 1007 vc_resize(vc, cols, rows); ··· 1040 1019 static void fbcon_init(struct vc_data *vc, bool init) 1041 1020 { 1042 1021 struct fb_info *info; 1043 - struct fbcon_ops *ops; 1022 + struct fbcon_par *par; 1044 1023 struct vc_data **default_mode = vc->vc_display_fg; 1045 1024 struct vc_data *svc = *default_mode; 1046 1025 struct fbcon_display *t, *p = &fb_display[vc->vc_num]; ··· 1114 1093 if (!*vc->uni_pagedict_loc) 1115 1094 con_copy_unimap(vc, svc); 1116 1095 1117 - ops = info->fbcon_par; 1118 - ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); 1096 + par = info->fbcon_par; 1097 + par->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); 1119 1098 1120 1099 p->con_rotate = initial_rotation; 1121 1100 if (p->con_rotate == -1) ··· 1127 1106 1128 1107 cols = vc->vc_cols; 1129 1108 rows = vc->vc_rows; 1130 - new_cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); 1131 - new_rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); 1109 + new_cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres); 1110 + new_rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); 1132 1111 new_cols /= vc->vc_font.width; 1133 1112 new_rows /= vc->vc_font.height; 1134 1113 ··· 1140 1119 * We need to do it in fbcon_init() to prevent screen corruption. 1141 1120 */ 1142 1121 if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) { 1143 - if (info->fbops->fb_set_par && !ops->initialized) { 1122 + if (info->fbops->fb_set_par && !par->initialized) { 1144 1123 ret = info->fbops->fb_set_par(info); 1145 1124 1146 1125 if (ret) ··· 1149 1128 "error code %d\n", ret); 1150 1129 } 1151 1130 1152 - ops->initialized = true; 1131 + par->initialized = true; 1153 1132 } 1154 1133 1155 - ops->graphics = 0; 1134 + par->graphics = 0; 1156 1135 1157 1136 #ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION 1158 1137 if ((info->flags & FBINFO_HWACCEL_COPYAREA) && ··· 1176 1155 if (logo) 1177 1156 fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); 1178 1157 1179 - if (ops->rotate_font && ops->rotate_font(info, vc)) { 1180 - ops->rotate = FB_ROTATE_UR; 1158 + if (par->bitops->rotate_font && par->bitops->rotate_font(info, vc)) { 1159 + par->rotate = FB_ROTATE_UR; 1181 1160 set_blitting_type(vc, info); 1182 1161 } 1183 1162 1184 - ops->p = &fb_display[fg_console]; 1163 + par->p = &fb_display[fg_console]; 1185 1164 } 1186 1165 1187 1166 static void fbcon_free_font(struct fbcon_display *p) ··· 1219 1198 { 1220 1199 struct fbcon_display *p = &fb_display[vc->vc_num]; 1221 1200 struct fb_info *info; 1222 - struct fbcon_ops *ops; 1201 + struct fbcon_par *par; 1223 1202 int idx; 1224 1203 1225 1204 fbcon_free_font(p); ··· 1233 1212 if (!info) 1234 1213 goto finished; 1235 1214 1236 - ops = info->fbcon_par; 1215 + par = info->fbcon_par; 1237 1216 1238 - if (!ops) 1217 + if (!par) 1239 1218 goto finished; 1240 1219 1241 1220 if (con_is_visible(vc)) 1242 1221 fbcon_del_cursor_work(info); 1243 1222 1244 - ops->initialized = false; 1223 + par->initialized = false; 1245 1224 finished: 1246 1225 1247 1226 fbcon_free_font(p); ··· 1288 1267 unsigned int height, unsigned int width) 1289 1268 { 1290 1269 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 1291 - struct fbcon_ops *ops = info->fbcon_par; 1270 + struct fbcon_par *par = info->fbcon_par; 1292 1271 int fg, bg; 1293 1272 struct fbcon_display *p = &fb_display[vc->vc_num]; 1294 1273 u_int y_break; ··· 1303 1282 vc->vc_top = 0; 1304 1283 /* 1305 1284 * If the font dimensions are not an integral of the display 1306 - * dimensions then the ops->clear below won't end up clearing 1285 + * dimensions then the par->clear below won't end up clearing 1307 1286 * the margins. Call clear_margins here in case the logo 1308 1287 * bitmap stretched into the margin area. 1309 1288 */ ··· 1317 1296 y_break = p->vrows - p->yscroll; 1318 1297 if (sy < y_break && sy + height - 1 >= y_break) { 1319 1298 u_int b = y_break - sy; 1320 - ops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg); 1321 - ops->clear(vc, info, real_y(p, sy + b), sx, height - b, 1322 - width, fg, bg); 1299 + par->bitops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg); 1300 + par->bitops->clear(vc, info, real_y(p, sy + b), sx, height - b, 1301 + width, fg, bg); 1323 1302 } else 1324 - ops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg); 1303 + par->bitops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg); 1325 1304 } 1326 1305 1327 1306 static void fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, ··· 1335 1314 { 1336 1315 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 1337 1316 struct fbcon_display *p = &fb_display[vc->vc_num]; 1338 - struct fbcon_ops *ops = info->fbcon_par; 1317 + struct fbcon_par *par = info->fbcon_par; 1339 1318 1340 1319 if (fbcon_is_active(vc, info)) 1341 - ops->putcs(vc, info, s, count, real_y(p, ypos), xpos, 1342 - get_fg_color(vc, info, scr_readw(s)), 1343 - get_bg_color(vc, info, scr_readw(s))); 1320 + par->bitops->putcs(vc, info, s, count, real_y(p, ypos), xpos, 1321 + get_fg_color(vc, info, scr_readw(s)), 1322 + get_bg_color(vc, info, scr_readw(s))); 1344 1323 } 1345 1324 1346 1325 static void fbcon_clear_margins(struct vc_data *vc, int bottom_only) 1347 1326 { 1348 1327 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 1349 - struct fbcon_ops *ops = info->fbcon_par; 1328 + struct fbcon_par *par = info->fbcon_par; 1350 1329 1351 1330 if (fbcon_is_active(vc, info)) 1352 - ops->clear_margins(vc, info, margin_color, bottom_only); 1331 + par->bitops->clear_margins(vc, info, margin_color, bottom_only); 1353 1332 } 1354 1333 1355 1334 static void fbcon_cursor(struct vc_data *vc, bool enable) 1356 1335 { 1357 1336 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 1358 - struct fbcon_ops *ops = info->fbcon_par; 1337 + struct fbcon_par *par = info->fbcon_par; 1359 1338 int c = scr_readw((u16 *) vc->vc_pos); 1360 1339 1361 - ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); 1340 + par->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); 1362 1341 1363 1342 if (!fbcon_is_active(vc, info) || vc->vc_deccm != 1) 1364 1343 return; ··· 1368 1347 else 1369 1348 fbcon_add_cursor_work(info); 1370 1349 1371 - ops->cursor_flash = enable; 1350 + par->cursor_flash = enable; 1372 1351 1373 - if (!ops->cursor) 1352 + if (!par->bitops->cursor) 1374 1353 return; 1375 1354 1376 - ops->cursor(vc, info, enable, 1377 - get_fg_color(vc, info, c), 1378 - get_bg_color(vc, info, c)); 1355 + par->bitops->cursor(vc, info, enable, 1356 + get_fg_color(vc, info, c), 1357 + get_bg_color(vc, info, c)); 1379 1358 } 1380 1359 1381 1360 static int scrollback_phys_max = 0; ··· 1388 1367 struct fbcon_display *p, *t; 1389 1368 struct vc_data **default_mode, *vc; 1390 1369 struct vc_data *svc; 1391 - struct fbcon_ops *ops = info->fbcon_par; 1370 + struct fbcon_par *par = info->fbcon_par; 1392 1371 int rows, cols; 1393 1372 unsigned long ret = 0; 1394 1373 ··· 1421 1400 var->yoffset = info->var.yoffset; 1422 1401 var->xoffset = info->var.xoffset; 1423 1402 fb_set_var(info, var); 1424 - ops->var = info->var; 1403 + par->var = info->var; 1425 1404 vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1); 1426 1405 vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; 1427 1406 if (vc->vc_font.charcount == 256) { ··· 1437 1416 if (!*vc->uni_pagedict_loc) 1438 1417 con_copy_unimap(vc, svc); 1439 1418 1440 - cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); 1441 - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); 1419 + cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres); 1420 + rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); 1442 1421 cols /= vc->vc_font.width; 1443 1422 rows /= vc->vc_font.height; 1444 1423 ret = vc_resize(vc, cols, rows); ··· 1450 1429 static __inline__ void ywrap_up(struct vc_data *vc, int count) 1451 1430 { 1452 1431 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 1453 - struct fbcon_ops *ops = info->fbcon_par; 1432 + struct fbcon_par *par = info->fbcon_par; 1454 1433 struct fbcon_display *p = &fb_display[vc->vc_num]; 1455 1434 1456 1435 p->yscroll += count; 1457 1436 if (p->yscroll >= p->vrows) /* Deal with wrap */ 1458 1437 p->yscroll -= p->vrows; 1459 - ops->var.xoffset = 0; 1460 - ops->var.yoffset = p->yscroll * vc->vc_font.height; 1461 - ops->var.vmode |= FB_VMODE_YWRAP; 1462 - ops->update_start(info); 1438 + par->var.xoffset = 0; 1439 + par->var.yoffset = p->yscroll * vc->vc_font.height; 1440 + par->var.vmode |= FB_VMODE_YWRAP; 1441 + par->bitops->update_start(info); 1463 1442 scrollback_max += count; 1464 1443 if (scrollback_max > scrollback_phys_max) 1465 1444 scrollback_max = scrollback_phys_max; ··· 1469 1448 static __inline__ void ywrap_down(struct vc_data *vc, int count) 1470 1449 { 1471 1450 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 1472 - struct fbcon_ops *ops = info->fbcon_par; 1451 + struct fbcon_par *par = info->fbcon_par; 1473 1452 struct fbcon_display *p = &fb_display[vc->vc_num]; 1474 1453 1475 1454 p->yscroll -= count; 1476 1455 if (p->yscroll < 0) /* Deal with wrap */ 1477 1456 p->yscroll += p->vrows; 1478 - ops->var.xoffset = 0; 1479 - ops->var.yoffset = p->yscroll * vc->vc_font.height; 1480 - ops->var.vmode |= FB_VMODE_YWRAP; 1481 - ops->update_start(info); 1457 + par->var.xoffset = 0; 1458 + par->var.yoffset = p->yscroll * vc->vc_font.height; 1459 + par->var.vmode |= FB_VMODE_YWRAP; 1460 + par->bitops->update_start(info); 1482 1461 scrollback_max -= count; 1483 1462 if (scrollback_max < 0) 1484 1463 scrollback_max = 0; ··· 1489 1468 { 1490 1469 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 1491 1470 struct fbcon_display *p = &fb_display[vc->vc_num]; 1492 - struct fbcon_ops *ops = info->fbcon_par; 1471 + struct fbcon_par *par = info->fbcon_par; 1493 1472 1494 1473 p->yscroll += count; 1495 1474 if (p->yscroll > p->vrows - vc->vc_rows) { 1496 - ops->bmove(vc, info, p->vrows - vc->vc_rows, 1497 - 0, 0, 0, vc->vc_rows, vc->vc_cols); 1475 + par->bitops->bmove(vc, info, p->vrows - vc->vc_rows, 1476 + 0, 0, 0, vc->vc_rows, vc->vc_cols); 1498 1477 p->yscroll -= p->vrows - vc->vc_rows; 1499 1478 } 1500 1479 1501 - ops->var.xoffset = 0; 1502 - ops->var.yoffset = p->yscroll * vc->vc_font.height; 1503 - ops->var.vmode &= ~FB_VMODE_YWRAP; 1504 - ops->update_start(info); 1480 + par->var.xoffset = 0; 1481 + par->var.yoffset = p->yscroll * vc->vc_font.height; 1482 + par->var.vmode &= ~FB_VMODE_YWRAP; 1483 + par->bitops->update_start(info); 1505 1484 fbcon_clear_margins(vc, 1); 1506 1485 scrollback_max += count; 1507 1486 if (scrollback_max > scrollback_phys_max) ··· 1512 1491 static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count) 1513 1492 { 1514 1493 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 1515 - struct fbcon_ops *ops = info->fbcon_par; 1494 + struct fbcon_par *par = info->fbcon_par; 1516 1495 struct fbcon_display *p = &fb_display[vc->vc_num]; 1517 1496 1518 1497 p->yscroll += count; ··· 1522 1501 fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t); 1523 1502 } 1524 1503 1525 - ops->var.xoffset = 0; 1526 - ops->var.yoffset = p->yscroll * vc->vc_font.height; 1527 - ops->var.vmode &= ~FB_VMODE_YWRAP; 1528 - ops->update_start(info); 1504 + par->var.xoffset = 0; 1505 + par->var.yoffset = p->yscroll * vc->vc_font.height; 1506 + par->var.vmode &= ~FB_VMODE_YWRAP; 1507 + par->bitops->update_start(info); 1529 1508 fbcon_clear_margins(vc, 1); 1530 1509 scrollback_max += count; 1531 1510 if (scrollback_max > scrollback_phys_max) ··· 1537 1516 { 1538 1517 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 1539 1518 struct fbcon_display *p = &fb_display[vc->vc_num]; 1540 - struct fbcon_ops *ops = info->fbcon_par; 1519 + struct fbcon_par *par = info->fbcon_par; 1541 1520 1542 1521 p->yscroll -= count; 1543 1522 if (p->yscroll < 0) { 1544 - ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows, 1545 - 0, vc->vc_rows, vc->vc_cols); 1523 + par->bitops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows, 1524 + 0, vc->vc_rows, vc->vc_cols); 1546 1525 p->yscroll += p->vrows - vc->vc_rows; 1547 1526 } 1548 1527 1549 - ops->var.xoffset = 0; 1550 - ops->var.yoffset = p->yscroll * vc->vc_font.height; 1551 - ops->var.vmode &= ~FB_VMODE_YWRAP; 1552 - ops->update_start(info); 1528 + par->var.xoffset = 0; 1529 + par->var.yoffset = p->yscroll * vc->vc_font.height; 1530 + par->var.vmode &= ~FB_VMODE_YWRAP; 1531 + par->bitops->update_start(info); 1553 1532 fbcon_clear_margins(vc, 1); 1554 1533 scrollback_max -= count; 1555 1534 if (scrollback_max < 0) ··· 1560 1539 static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) 1561 1540 { 1562 1541 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 1563 - struct fbcon_ops *ops = info->fbcon_par; 1542 + struct fbcon_par *par = info->fbcon_par; 1564 1543 struct fbcon_display *p = &fb_display[vc->vc_num]; 1565 1544 1566 1545 p->yscroll -= count; ··· 1570 1549 fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count); 1571 1550 } 1572 1551 1573 - ops->var.xoffset = 0; 1574 - ops->var.yoffset = p->yscroll * vc->vc_font.height; 1575 - ops->var.vmode &= ~FB_VMODE_YWRAP; 1576 - ops->update_start(info); 1552 + par->var.xoffset = 0; 1553 + par->var.yoffset = p->yscroll * vc->vc_font.height; 1554 + par->var.vmode &= ~FB_VMODE_YWRAP; 1555 + par->bitops->update_start(info); 1577 1556 fbcon_clear_margins(vc, 1); 1578 1557 scrollback_max -= count; 1579 1558 if (scrollback_max < 0) ··· 1622 1601 unsigned short *d = (unsigned short *) 1623 1602 (vc->vc_origin + vc->vc_size_row * line); 1624 1603 unsigned short *s = d + offset; 1625 - struct fbcon_ops *ops = info->fbcon_par; 1604 + struct fbcon_par *par = info->fbcon_par; 1626 1605 1627 1606 while (count--) { 1628 1607 unsigned short *start = s; ··· 1635 1614 1636 1615 if (c == scr_readw(d)) { 1637 1616 if (s > start) { 1638 - ops->bmove(vc, info, line + ycount, x, 1639 - line, x, 1, s-start); 1617 + par->bitops->bmove(vc, info, line + ycount, x, 1618 + line, x, 1, s - start); 1640 1619 x += s - start + 1; 1641 1620 start = s + 1; 1642 1621 } else { ··· 1651 1630 d++; 1652 1631 } while (s < le); 1653 1632 if (s > start) 1654 - ops->bmove(vc, info, line + ycount, x, line, x, 1, 1655 - s-start); 1633 + par->bitops->bmove(vc, info, line + ycount, x, line, x, 1, 1634 + s - start); 1656 1635 console_conditional_schedule(); 1657 1636 if (ycount > 0) 1658 1637 line++; ··· 1723 1702 int dy, int dx, int height, int width, u_int y_break) 1724 1703 { 1725 1704 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 1726 - struct fbcon_ops *ops = info->fbcon_par; 1705 + struct fbcon_par *par = info->fbcon_par; 1727 1706 u_int b; 1728 1707 1729 1708 if (sy < y_break && sy + height > y_break) { ··· 1757 1736 } 1758 1737 return; 1759 1738 } 1760 - ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, 1761 - height, width); 1739 + par->bitops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, 1740 + height, width); 1762 1741 } 1763 1742 1764 1743 static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, ··· 1985 1964 struct vc_data *vc) 1986 1965 { 1987 1966 #ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION 1988 - struct fbcon_ops *ops = info->fbcon_par; 1967 + struct fbcon_par *par = info->fbcon_par; 1989 1968 int cap = info->flags; 1990 1969 u16 t = 0; 1991 - int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep, 1992 - info->fix.xpanstep); 1993 - int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t); 1994 - int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); 1995 - int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, 1996 - info->var.xres_virtual); 1970 + int ypan = FBCON_SWAP(par->rotate, info->fix.ypanstep, info->fix.xpanstep); 1971 + int ywrap = FBCON_SWAP(par->rotate, info->fix.ywrapstep, t); 1972 + int yres = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); 1973 + int vyres = FBCON_SWAP(par->rotate, info->var.yres_virtual, info->var.xres_virtual); 1997 1974 int good_pan = (cap & FBINFO_HWACCEL_YPAN) && 1998 1975 divides(ypan, vc->vc_font.height) && vyres > yres; 1999 1976 int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) && ··· 2024 2005 struct fb_info *info, 2025 2006 struct vc_data *vc) 2026 2007 { 2027 - struct fbcon_ops *ops = info->fbcon_par; 2008 + struct fbcon_par *par = info->fbcon_par; 2028 2009 int fh = vc->vc_font.height; 2029 - int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); 2030 - int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, 2031 - info->var.xres_virtual); 2010 + int yres = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); 2011 + int vyres = FBCON_SWAP(par->rotate, info->var.yres_virtual, info->var.xres_virtual); 2032 2012 2033 2013 p->vrows = vyres/fh; 2034 2014 if (yres > (fh * (vc->vc_rows + 1))) ··· 2046 2028 unsigned int height, bool from_user) 2047 2029 { 2048 2030 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 2049 - struct fbcon_ops *ops = info->fbcon_par; 2031 + struct fbcon_par *par = info->fbcon_par; 2050 2032 struct fbcon_display *p = &fb_display[vc->vc_num]; 2051 2033 struct fb_var_screeninfo var = info->var; 2052 2034 int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh; ··· 2069 2051 return -EINVAL; 2070 2052 } 2071 2053 2072 - virt_w = FBCON_SWAP(ops->rotate, width, height); 2073 - virt_h = FBCON_SWAP(ops->rotate, height, width); 2074 - virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width, 2075 - vc->vc_font.height); 2076 - virt_fh = FBCON_SWAP(ops->rotate, vc->vc_font.height, 2077 - vc->vc_font.width); 2054 + virt_w = FBCON_SWAP(par->rotate, width, height); 2055 + virt_h = FBCON_SWAP(par->rotate, height, width); 2056 + virt_fw = FBCON_SWAP(par->rotate, vc->vc_font.width, vc->vc_font.height); 2057 + virt_fh = FBCON_SWAP(par->rotate, vc->vc_font.height, vc->vc_font.width); 2078 2058 var.xres = virt_w * virt_fw; 2079 2059 var.yres = virt_h * virt_fh; 2080 2060 x_diff = info->var.xres - var.xres; ··· 2098 2082 fb_set_var(info, &var); 2099 2083 } 2100 2084 var_to_display(p, &info->var, info); 2101 - ops->var = info->var; 2085 + par->var = info->var; 2102 2086 } 2103 2087 updatescrollmode(p, info, vc); 2104 2088 return 0; ··· 2107 2091 static bool fbcon_switch(struct vc_data *vc) 2108 2092 { 2109 2093 struct fb_info *info, *old_info = NULL; 2110 - struct fbcon_ops *ops; 2094 + struct fbcon_par *par; 2111 2095 struct fbcon_display *p = &fb_display[vc->vc_num]; 2112 2096 struct fb_var_screeninfo var; 2113 2097 int i, ret, prev_console; 2114 2098 2115 2099 info = fbcon_info_from_console(vc->vc_num); 2116 - ops = info->fbcon_par; 2100 + par = info->fbcon_par; 2117 2101 2118 2102 if (logo_shown >= 0) { 2119 2103 struct vc_data *conp2 = vc_cons[logo_shown].d; ··· 2124 2108 logo_shown = FBCON_LOGO_CANSHOW; 2125 2109 } 2126 2110 2127 - prev_console = ops->currcon; 2111 + prev_console = par->currcon; 2128 2112 if (prev_console != -1) 2129 2113 old_info = fbcon_info_from_console(prev_console); 2130 2114 /* ··· 2137 2121 */ 2138 2122 fbcon_for_each_registered_fb(i) { 2139 2123 if (fbcon_registered_fb[i]->fbcon_par) { 2140 - struct fbcon_ops *o = fbcon_registered_fb[i]->fbcon_par; 2124 + struct fbcon_par *par = fbcon_registered_fb[i]->fbcon_par; 2141 2125 2142 - o->currcon = vc->vc_num; 2126 + par->currcon = vc->vc_num; 2143 2127 } 2144 2128 } 2145 2129 memset(&var, 0, sizeof(struct fb_var_screeninfo)); ··· 2153 2137 info->var.activate = var.activate; 2154 2138 var.vmode |= info->var.vmode & ~FB_VMODE_MASK; 2155 2139 fb_set_var(info, &var); 2156 - ops->var = info->var; 2140 + par->var = info->var; 2157 2141 2158 2142 if (old_info != NULL && (old_info != info || 2159 2143 info->flags & FBINFO_MISC_ALWAYS_SETPAR)) { ··· 2170 2154 fbcon_del_cursor_work(old_info); 2171 2155 } 2172 2156 2173 - if (!fbcon_is_active(vc, info) || 2174 - ops->blank_state != FB_BLANK_UNBLANK) 2157 + if (!fbcon_is_active(vc, info) || par->blank_state != FB_BLANK_UNBLANK) 2175 2158 fbcon_del_cursor_work(info); 2176 2159 else 2177 2160 fbcon_add_cursor_work(info); 2178 2161 2179 2162 set_blitting_type(vc, info); 2180 - ops->cursor_reset = 1; 2163 + par->cursor_reset = 1; 2181 2164 2182 - if (ops->rotate_font && ops->rotate_font(info, vc)) { 2183 - ops->rotate = FB_ROTATE_UR; 2165 + if (par->bitops->rotate_font && par->bitops->rotate_font(info, vc)) { 2166 + par->rotate = FB_ROTATE_UR; 2184 2167 set_blitting_type(vc, info); 2185 2168 } 2186 2169 ··· 2210 2195 scrollback_current = 0; 2211 2196 2212 2197 if (fbcon_is_active(vc, info)) { 2213 - ops->var.xoffset = ops->var.yoffset = p->yscroll = 0; 2214 - ops->update_start(info); 2198 + par->var.xoffset = par->var.yoffset = p->yscroll = 0; 2199 + par->bitops->update_start(info); 2215 2200 } 2216 2201 2217 2202 fbcon_set_palette(vc, color_table); ··· 2220 2205 if (logo_shown == FBCON_LOGO_DRAW) { 2221 2206 2222 2207 logo_shown = fg_console; 2223 - fb_show_logo(info, ops->rotate); 2208 + fb_show_logo(info, par->rotate); 2224 2209 update_region(vc, 2225 2210 vc->vc_origin + vc->vc_size_row * vc->vc_top, 2226 2211 vc->vc_size_row * (vc->vc_bottom - ··· 2249 2234 bool mode_switch) 2250 2235 { 2251 2236 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 2252 - struct fbcon_ops *ops = info->fbcon_par; 2237 + struct fbcon_par *par = info->fbcon_par; 2253 2238 2254 2239 if (mode_switch) { 2255 2240 struct fb_var_screeninfo var = info->var; 2256 2241 2257 - ops->graphics = 1; 2242 + par->graphics = 1; 2258 2243 2259 2244 if (!blank) { 2260 2245 var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE | 2261 2246 FB_ACTIVATE_KD_TEXT; 2262 2247 fb_set_var(info, &var); 2263 - ops->graphics = 0; 2264 - ops->var = info->var; 2248 + par->graphics = 0; 2249 + par->var = info->var; 2265 2250 } 2266 2251 } 2267 2252 2268 2253 if (fbcon_is_active(vc, info)) { 2269 - if (ops->blank_state != blank) { 2270 - ops->blank_state = blank; 2254 + if (par->blank_state != blank) { 2255 + par->blank_state = blank; 2271 2256 fbcon_cursor(vc, !blank); 2272 - ops->cursor_flash = (!blank); 2257 + par->cursor_flash = (!blank); 2273 2258 2274 2259 if (fb_blank(info, blank)) 2275 2260 fbcon_generic_blank(vc, info, blank); ··· 2279 2264 update_screen(vc); 2280 2265 } 2281 2266 2282 - if (mode_switch || !fbcon_is_active(vc, info) || 2283 - ops->blank_state != FB_BLANK_UNBLANK) 2267 + if (mode_switch || !fbcon_is_active(vc, info) || par->blank_state != FB_BLANK_UNBLANK) 2284 2268 fbcon_del_cursor_work(info); 2285 2269 else 2286 2270 fbcon_add_cursor_work(info); ··· 2290 2276 static void fbcon_debug_enter(struct vc_data *vc) 2291 2277 { 2292 2278 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 2293 - struct fbcon_ops *ops = info->fbcon_par; 2279 + struct fbcon_par *par = info->fbcon_par; 2294 2280 2295 - ops->save_graphics = ops->graphics; 2296 - ops->graphics = 0; 2281 + par->save_graphics = par->graphics; 2282 + par->graphics = 0; 2297 2283 if (info->fbops->fb_debug_enter) 2298 2284 info->fbops->fb_debug_enter(info); 2299 2285 fbcon_set_palette(vc, color_table); ··· 2302 2288 static void fbcon_debug_leave(struct vc_data *vc) 2303 2289 { 2304 2290 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 2305 - struct fbcon_ops *ops = info->fbcon_par; 2291 + struct fbcon_par *par = info->fbcon_par; 2306 2292 2307 - ops->graphics = ops->save_graphics; 2293 + par->graphics = par->save_graphics; 2308 2294 if (info->fbops->fb_debug_leave) 2309 2295 info->fbops->fb_debug_leave(info); 2310 2296 } ··· 2439 2425 const u8 * data, int userfont) 2440 2426 { 2441 2427 struct fb_info *info = fbcon_info_from_console(vc->vc_num); 2442 - struct fbcon_ops *ops = info->fbcon_par; 2428 + struct fbcon_par *par = info->fbcon_par; 2443 2429 struct fbcon_display *p = &fb_display[vc->vc_num]; 2444 2430 int resize, ret, old_userfont, old_width, old_height, old_charcount; 2445 2431 u8 *old_data = vc->vc_font.data; ··· 2465 2451 if (resize) { 2466 2452 int cols, rows; 2467 2453 2468 - cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); 2469 - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); 2454 + cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres); 2455 + rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); 2470 2456 cols /= w; 2471 2457 rows /= h; 2472 2458 ret = vc_resize(vc, cols, rows); ··· 2665 2651 void fbcon_suspended(struct fb_info *info) 2666 2652 { 2667 2653 struct vc_data *vc = NULL; 2668 - struct fbcon_ops *ops = info->fbcon_par; 2654 + struct fbcon_par *par = info->fbcon_par; 2669 2655 2670 - if (!ops || ops->currcon < 0) 2656 + if (!par || par->currcon < 0) 2671 2657 return; 2672 - vc = vc_cons[ops->currcon].d; 2658 + vc = vc_cons[par->currcon].d; 2673 2659 2674 2660 /* Clear cursor, restore saved data */ 2675 2661 fbcon_cursor(vc, false); ··· 2678 2664 void fbcon_resumed(struct fb_info *info) 2679 2665 { 2680 2666 struct vc_data *vc; 2681 - struct fbcon_ops *ops = info->fbcon_par; 2667 + struct fbcon_par *par = info->fbcon_par; 2682 2668 2683 - if (!ops || ops->currcon < 0) 2669 + if (!par || par->currcon < 0) 2684 2670 return; 2685 - vc = vc_cons[ops->currcon].d; 2671 + vc = vc_cons[par->currcon].d; 2686 2672 2687 2673 update_screen(vc); 2688 2674 } 2689 2675 2690 2676 static void fbcon_modechanged(struct fb_info *info) 2691 2677 { 2692 - struct fbcon_ops *ops = info->fbcon_par; 2678 + struct fbcon_par *par = info->fbcon_par; 2693 2679 struct vc_data *vc; 2694 2680 struct fbcon_display *p; 2695 2681 int rows, cols; 2696 2682 2697 - if (!ops || ops->currcon < 0) 2683 + if (!par || par->currcon < 0) 2698 2684 return; 2699 - vc = vc_cons[ops->currcon].d; 2685 + vc = vc_cons[par->currcon].d; 2700 2686 if (vc->vc_mode != KD_TEXT || 2701 - fbcon_info_from_console(ops->currcon) != info) 2687 + fbcon_info_from_console(par->currcon) != info) 2702 2688 return; 2703 2689 2704 2690 p = &fb_display[vc->vc_num]; ··· 2706 2692 2707 2693 if (con_is_visible(vc)) { 2708 2694 var_to_display(p, &info->var, info); 2709 - cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); 2710 - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); 2695 + cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres); 2696 + rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); 2711 2697 cols /= vc->vc_font.width; 2712 2698 rows /= vc->vc_font.height; 2713 2699 vc_resize(vc, cols, rows); ··· 2716 2702 scrollback_current = 0; 2717 2703 2718 2704 if (fbcon_is_active(vc, info)) { 2719 - ops->var.xoffset = ops->var.yoffset = p->yscroll = 0; 2720 - ops->update_start(info); 2705 + par->var.xoffset = par->var.yoffset = p->yscroll = 0; 2706 + par->bitops->update_start(info); 2721 2707 } 2722 2708 2723 2709 fbcon_set_palette(vc, color_table); ··· 2727 2713 2728 2714 static void fbcon_set_all_vcs(struct fb_info *info) 2729 2715 { 2730 - struct fbcon_ops *ops = info->fbcon_par; 2716 + struct fbcon_par *par = info->fbcon_par; 2731 2717 struct vc_data *vc; 2732 2718 struct fbcon_display *p; 2733 2719 int i, rows, cols, fg = -1; 2734 2720 2735 - if (!ops || ops->currcon < 0) 2721 + if (!par || par->currcon < 0) 2736 2722 return; 2737 2723 2738 2724 for (i = first_fb_vc; i <= last_fb_vc; i++) { ··· 2749 2735 p = &fb_display[vc->vc_num]; 2750 2736 set_blitting_type(vc, info); 2751 2737 var_to_display(p, &info->var, info); 2752 - cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); 2753 - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); 2738 + cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres); 2739 + rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); 2754 2740 cols /= vc->vc_font.width; 2755 2741 rows /= vc->vc_font.height; 2756 2742 vc_resize(vc, cols, rows); ··· 2773 2759 /* let fbcon check if it supports a new screen resolution */ 2774 2760 int fbcon_modechange_possible(struct fb_info *info, struct fb_var_screeninfo *var) 2775 2761 { 2776 - struct fbcon_ops *ops = info->fbcon_par; 2762 + struct fbcon_par *par = info->fbcon_par; 2777 2763 struct vc_data *vc; 2778 2764 unsigned int i; 2779 2765 2780 2766 WARN_CONSOLE_UNLOCKED(); 2781 2767 2782 - if (!ops) 2768 + if (!par) 2783 2769 return 0; 2784 2770 2785 2771 /* prevent setting a screen size which is smaller than font size */ ··· 3051 3037 3052 3038 void fbcon_fb_blanked(struct fb_info *info, int blank) 3053 3039 { 3054 - struct fbcon_ops *ops = info->fbcon_par; 3040 + struct fbcon_par *par = info->fbcon_par; 3055 3041 struct vc_data *vc; 3056 3042 3057 - if (!ops || ops->currcon < 0) 3043 + if (!par || par->currcon < 0) 3058 3044 return; 3059 3045 3060 - vc = vc_cons[ops->currcon].d; 3061 - if (vc->vc_mode != KD_TEXT || 3062 - fbcon_info_from_console(ops->currcon) != info) 3046 + vc = vc_cons[par->currcon].d; 3047 + if (vc->vc_mode != KD_TEXT || fbcon_info_from_console(par->currcon) != info) 3063 3048 return; 3064 3049 3065 3050 if (con_is_visible(vc)) { ··· 3067 3054 else 3068 3055 do_unblank_screen(0); 3069 3056 } 3070 - ops->blank_state = blank; 3057 + par->blank_state = blank; 3071 3058 } 3072 3059 3073 3060 void fbcon_new_modelist(struct fb_info *info) ··· 3257 3244 struct device_attribute *attr, char *buf) 3258 3245 { 3259 3246 struct fb_info *info; 3260 - struct fbcon_ops *ops; 3247 + struct fbcon_par *par; 3261 3248 int idx, blink = -1; 3262 3249 3263 3250 console_lock(); ··· 3267 3254 goto err; 3268 3255 3269 3256 info = fbcon_registered_fb[idx]; 3270 - ops = info->fbcon_par; 3257 + par = info->fbcon_par; 3271 3258 3272 - if (!ops) 3259 + if (!par) 3273 3260 goto err; 3274 3261 3275 - blink = delayed_work_pending(&ops->cursor_work); 3262 + blink = delayed_work_pending(&par->cursor_work); 3276 3263 err: 3277 3264 console_unlock(); 3278 3265 return sysfs_emit(buf, "%d\n", blink);
+8 -9
drivers/video/fbdev/core/fbcon.h
··· 51 51 const struct fb_videomode *mode; 52 52 }; 53 53 54 - struct fbcon_ops { 54 + struct fbcon_bitops { 55 55 void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy, 56 56 int sx, int dy, int dx, int height, int width); 57 57 void (*clear)(struct vc_data *vc, struct fb_info *info, int sy, ··· 65 65 bool enable, int fg, int bg); 66 66 int (*update_start)(struct fb_info *info); 67 67 int (*rotate_font)(struct fb_info *info, struct vc_data *vc); 68 + }; 69 + 70 + struct fbcon_par { 68 71 struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */ 69 72 struct delayed_work cursor_work; /* Cursor timer */ 70 73 struct fb_cursor cursor_state; ··· 89 86 u8 *cursor_src; 90 87 u32 cursor_size; 91 88 u32 fd_size; 89 + 90 + const struct fbcon_bitops *bitops; 92 91 }; 92 + 93 93 /* 94 94 * Attribute Decoding 95 95 */ ··· 112 106 ((s) & 0x400) 113 107 #define attr_blink(s) \ 114 108 ((s) & 0x8000) 115 - 116 109 117 110 static inline int mono_col(const struct fb_info *info) 118 111 { ··· 191 186 #ifdef CONFIG_FB_TILEBLITTING 192 187 extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info); 193 188 #endif 194 - extern void fbcon_set_bitops(struct fbcon_ops *ops); 189 + extern void fbcon_set_bitops_ur(struct fbcon_par *par); 195 190 extern int soft_cursor(struct fb_info *info, struct fb_cursor *cursor); 196 191 197 192 #define FBCON_ATTRIBUTE_UNDERLINE 1 ··· 228 223 typeof(v) _v = (v); \ 229 224 (void) (&_r == &_v); \ 230 225 (i == FB_ROTATE_UR || i == FB_ROTATE_UD) ? _r : _v; }) 231 - 232 - #ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION 233 - extern void fbcon_set_rotate(struct fbcon_ops *ops); 234 - #else 235 - #define fbcon_set_rotate(x) do {} while(0) 236 - #endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */ 237 226 238 227 #endif /* _VIDEO_FBCON_H */
+78 -73
drivers/video/fbdev/core/fbcon_ccw.c
··· 63 63 static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, 64 64 int sx, int dy, int dx, int height, int width) 65 65 { 66 - struct fbcon_ops *ops = info->fbcon_par; 66 + struct fbcon_par *par = info->fbcon_par; 67 67 struct fb_copyarea area; 68 - u32 vyres = GETVYRES(ops->p, info); 68 + u32 vyres = GETVYRES(par->p, info); 69 69 70 70 area.sx = sy * vc->vc_font.height; 71 71 area.sy = vyres - ((sx + width) * vc->vc_font.width); ··· 80 80 static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy, 81 81 int sx, int height, int width, int fg, int bg) 82 82 { 83 - struct fbcon_ops *ops = info->fbcon_par; 83 + struct fbcon_par *par = info->fbcon_par; 84 84 struct fb_fillrect region; 85 - u32 vyres = GETVYRES(ops->p, info); 85 + u32 vyres = GETVYRES(par->p, info); 86 86 87 87 region.color = bg; 88 88 region.dx = sy * vc->vc_font.height; ··· 99 99 u32 d_pitch, u32 s_pitch, u32 cellsize, 100 100 struct fb_image *image, u8 *buf, u8 *dst) 101 101 { 102 - struct fbcon_ops *ops = info->fbcon_par; 102 + struct fbcon_par *par = info->fbcon_par; 103 103 u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; 104 104 u32 idx = (vc->vc_font.height + 7) >> 3; 105 105 u8 *src; 106 106 107 107 while (cnt--) { 108 - src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize; 108 + src = par->fontbuffer + (scr_readw(s--) & charmask) * cellsize; 109 109 110 110 if (attr) { 111 111 ccw_update_attr(buf, src, attr, vc); ··· 130 130 int fg, int bg) 131 131 { 132 132 struct fb_image image; 133 - struct fbcon_ops *ops = info->fbcon_par; 133 + struct fbcon_par *par = info->fbcon_par; 134 134 u32 width = (vc->vc_font.height + 7)/8; 135 135 u32 cellsize = width * vc->vc_font.width; 136 136 u32 maxcnt = info->pixmap.size/cellsize; ··· 139 139 u32 cnt, pitch, size; 140 140 u32 attribute = get_attribute(info, scr_readw(s)); 141 141 u8 *dst, *buf = NULL; 142 - u32 vyres = GETVYRES(ops->p, info); 142 + u32 vyres = GETVYRES(par->p, info); 143 143 144 - if (!ops->fontbuffer) 144 + if (!par->fontbuffer) 145 145 return; 146 146 147 147 image.fg_color = fg; ··· 221 221 int fg, int bg) 222 222 { 223 223 struct fb_cursor cursor; 224 - struct fbcon_ops *ops = info->fbcon_par; 224 + struct fbcon_par *par = info->fbcon_par; 225 225 unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; 226 226 int w = (vc->vc_font.height + 7) >> 3, c; 227 - int y = real_y(ops->p, vc->state.y); 227 + int y = real_y(par->p, vc->state.y); 228 228 int attribute, use_sw = vc->vc_cursor_type & CUR_SW; 229 229 int err = 1, dx, dy; 230 230 char *src; 231 - u32 vyres = GETVYRES(ops->p, info); 231 + u32 vyres = GETVYRES(par->p, info); 232 232 233 - if (!ops->fontbuffer) 233 + if (!par->fontbuffer) 234 234 return; 235 235 236 236 cursor.set = 0; 237 237 238 238 c = scr_readw((u16 *) vc->vc_pos); 239 239 attribute = get_attribute(info, c); 240 - src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); 240 + src = par->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); 241 241 242 - if (ops->cursor_state.image.data != src || 243 - ops->cursor_reset) { 244 - ops->cursor_state.image.data = src; 245 - cursor.set |= FB_CUR_SETIMAGE; 242 + if (par->cursor_state.image.data != src || 243 + par->cursor_reset) { 244 + par->cursor_state.image.data = src; 245 + cursor.set |= FB_CUR_SETIMAGE; 246 246 } 247 247 248 248 if (attribute) { ··· 251 251 dst = kmalloc_array(w, vc->vc_font.width, GFP_ATOMIC); 252 252 if (!dst) 253 253 return; 254 - kfree(ops->cursor_data); 255 - ops->cursor_data = dst; 254 + kfree(par->cursor_data); 255 + par->cursor_data = dst; 256 256 ccw_update_attr(dst, src, attribute, vc); 257 257 src = dst; 258 258 } 259 259 260 - if (ops->cursor_state.image.fg_color != fg || 261 - ops->cursor_state.image.bg_color != bg || 262 - ops->cursor_reset) { 263 - ops->cursor_state.image.fg_color = fg; 264 - ops->cursor_state.image.bg_color = bg; 260 + if (par->cursor_state.image.fg_color != fg || 261 + par->cursor_state.image.bg_color != bg || 262 + par->cursor_reset) { 263 + par->cursor_state.image.fg_color = fg; 264 + par->cursor_state.image.bg_color = bg; 265 265 cursor.set |= FB_CUR_SETCMAP; 266 266 } 267 267 268 - if (ops->cursor_state.image.height != vc->vc_font.width || 269 - ops->cursor_state.image.width != vc->vc_font.height || 270 - ops->cursor_reset) { 271 - ops->cursor_state.image.height = vc->vc_font.width; 272 - ops->cursor_state.image.width = vc->vc_font.height; 268 + if (par->cursor_state.image.height != vc->vc_font.width || 269 + par->cursor_state.image.width != vc->vc_font.height || 270 + par->cursor_reset) { 271 + par->cursor_state.image.height = vc->vc_font.width; 272 + par->cursor_state.image.width = vc->vc_font.height; 273 273 cursor.set |= FB_CUR_SETSIZE; 274 274 } 275 275 276 276 dx = y * vc->vc_font.height; 277 277 dy = vyres - ((vc->state.x + 1) * vc->vc_font.width); 278 278 279 - if (ops->cursor_state.image.dx != dx || 280 - ops->cursor_state.image.dy != dy || 281 - ops->cursor_reset) { 282 - ops->cursor_state.image.dx = dx; 283 - ops->cursor_state.image.dy = dy; 279 + if (par->cursor_state.image.dx != dx || 280 + par->cursor_state.image.dy != dy || 281 + par->cursor_reset) { 282 + par->cursor_state.image.dx = dx; 283 + par->cursor_state.image.dy = dy; 284 284 cursor.set |= FB_CUR_SETPOS; 285 285 } 286 286 287 - if (ops->cursor_state.hot.x || ops->cursor_state.hot.y || 288 - ops->cursor_reset) { 289 - ops->cursor_state.hot.x = cursor.hot.y = 0; 287 + if (par->cursor_state.hot.x || par->cursor_state.hot.y || 288 + par->cursor_reset) { 289 + par->cursor_state.hot.x = cursor.hot.y = 0; 290 290 cursor.set |= FB_CUR_SETHOT; 291 291 } 292 292 293 293 if (cursor.set & FB_CUR_SETSIZE || 294 - vc->vc_cursor_type != ops->p->cursor_shape || 295 - ops->cursor_state.mask == NULL || 296 - ops->cursor_reset) { 294 + vc->vc_cursor_type != par->p->cursor_shape || 295 + par->cursor_state.mask == NULL || 296 + par->cursor_reset) { 297 297 char *tmp, *mask = kmalloc_array(w, vc->vc_font.width, 298 298 GFP_ATOMIC); 299 299 int cur_height, size, i = 0; ··· 309 309 return; 310 310 } 311 311 312 - kfree(ops->cursor_state.mask); 313 - ops->cursor_state.mask = mask; 312 + kfree(par->cursor_state.mask); 313 + par->cursor_state.mask = mask; 314 314 315 - ops->p->cursor_shape = vc->vc_cursor_type; 315 + par->p->cursor_shape = vc->vc_cursor_type; 316 316 cursor.set |= FB_CUR_SETSHAPE; 317 317 318 - switch (CUR_SIZE(ops->p->cursor_shape)) { 318 + switch (CUR_SIZE(par->p->cursor_shape)) { 319 319 case CUR_NONE: 320 320 cur_height = 0; 321 321 break; ··· 348 348 kfree(tmp); 349 349 } 350 350 351 - ops->cursor_state.enable = enable && !use_sw; 351 + par->cursor_state.enable = enable && !use_sw; 352 352 353 353 cursor.image.data = src; 354 - cursor.image.fg_color = ops->cursor_state.image.fg_color; 355 - cursor.image.bg_color = ops->cursor_state.image.bg_color; 356 - cursor.image.dx = ops->cursor_state.image.dx; 357 - cursor.image.dy = ops->cursor_state.image.dy; 358 - cursor.image.height = ops->cursor_state.image.height; 359 - cursor.image.width = ops->cursor_state.image.width; 360 - cursor.hot.x = ops->cursor_state.hot.x; 361 - cursor.hot.y = ops->cursor_state.hot.y; 362 - cursor.mask = ops->cursor_state.mask; 363 - cursor.enable = ops->cursor_state.enable; 354 + cursor.image.fg_color = par->cursor_state.image.fg_color; 355 + cursor.image.bg_color = par->cursor_state.image.bg_color; 356 + cursor.image.dx = par->cursor_state.image.dx; 357 + cursor.image.dy = par->cursor_state.image.dy; 358 + cursor.image.height = par->cursor_state.image.height; 359 + cursor.image.width = par->cursor_state.image.width; 360 + cursor.hot.x = par->cursor_state.hot.x; 361 + cursor.hot.y = par->cursor_state.hot.y; 362 + cursor.mask = par->cursor_state.mask; 363 + cursor.enable = par->cursor_state.enable; 364 364 cursor.image.depth = 1; 365 365 cursor.rop = ROP_XOR; 366 366 ··· 370 370 if (err) 371 371 soft_cursor(info, &cursor); 372 372 373 - ops->cursor_reset = 0; 373 + par->cursor_reset = 0; 374 374 } 375 375 376 376 static int ccw_update_start(struct fb_info *info) 377 377 { 378 - struct fbcon_ops *ops = info->fbcon_par; 378 + struct fbcon_par *par = info->fbcon_par; 379 379 u32 yoffset; 380 - u32 vyres = GETVYRES(ops->p, info); 380 + u32 vyres = GETVYRES(par->p, info); 381 381 int err; 382 382 383 - yoffset = (vyres - info->var.yres) - ops->var.xoffset; 384 - ops->var.xoffset = ops->var.yoffset; 385 - ops->var.yoffset = yoffset; 386 - err = fb_pan_display(info, &ops->var); 387 - ops->var.xoffset = info->var.xoffset; 388 - ops->var.yoffset = info->var.yoffset; 389 - ops->var.vmode = info->var.vmode; 383 + yoffset = (vyres - info->var.yres) - par->var.xoffset; 384 + par->var.xoffset = par->var.yoffset; 385 + par->var.yoffset = yoffset; 386 + err = fb_pan_display(info, &par->var); 387 + par->var.xoffset = info->var.xoffset; 388 + par->var.yoffset = info->var.yoffset; 389 + par->var.vmode = info->var.vmode; 390 390 return err; 391 391 } 392 392 393 - void fbcon_rotate_ccw(struct fbcon_ops *ops) 393 + static const struct fbcon_bitops ccw_fbcon_bitops = { 394 + .bmove = ccw_bmove, 395 + .clear = ccw_clear, 396 + .putcs = ccw_putcs, 397 + .clear_margins = ccw_clear_margins, 398 + .cursor = ccw_cursor, 399 + .update_start = ccw_update_start, 400 + .rotate_font = fbcon_rotate_font, 401 + }; 402 + 403 + void fbcon_set_bitops_ccw(struct fbcon_par *par) 394 404 { 395 - ops->bmove = ccw_bmove; 396 - ops->clear = ccw_clear; 397 - ops->putcs = ccw_putcs; 398 - ops->clear_margins = ccw_clear_margins; 399 - ops->cursor = ccw_cursor; 400 - ops->update_start = ccw_update_start; 405 + par->bitops = &ccw_fbcon_bitops; 401 406 }
+78 -73
drivers/video/fbdev/core/fbcon_cw.c
··· 48 48 static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, 49 49 int sx, int dy, int dx, int height, int width) 50 50 { 51 - struct fbcon_ops *ops = info->fbcon_par; 51 + struct fbcon_par *par = info->fbcon_par; 52 52 struct fb_copyarea area; 53 - u32 vxres = GETVXRES(ops->p, info); 53 + u32 vxres = GETVXRES(par->p, info); 54 54 55 55 area.sx = vxres - ((sy + height) * vc->vc_font.height); 56 56 area.sy = sx * vc->vc_font.width; ··· 65 65 static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, 66 66 int sx, int height, int width, int fg, int bg) 67 67 { 68 - struct fbcon_ops *ops = info->fbcon_par; 68 + struct fbcon_par *par = info->fbcon_par; 69 69 struct fb_fillrect region; 70 - u32 vxres = GETVXRES(ops->p, info); 70 + u32 vxres = GETVXRES(par->p, info); 71 71 72 72 region.color = bg; 73 73 region.dx = vxres - ((sy + height) * vc->vc_font.height); ··· 84 84 u32 d_pitch, u32 s_pitch, u32 cellsize, 85 85 struct fb_image *image, u8 *buf, u8 *dst) 86 86 { 87 - struct fbcon_ops *ops = info->fbcon_par; 87 + struct fbcon_par *par = info->fbcon_par; 88 88 u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; 89 89 u32 idx = (vc->vc_font.height + 7) >> 3; 90 90 u8 *src; 91 91 92 92 while (cnt--) { 93 - src = ops->fontbuffer + (scr_readw(s++) & charmask)*cellsize; 93 + src = par->fontbuffer + (scr_readw(s++) & charmask) * cellsize; 94 94 95 95 if (attr) { 96 96 cw_update_attr(buf, src, attr, vc); ··· 115 115 int fg, int bg) 116 116 { 117 117 struct fb_image image; 118 - struct fbcon_ops *ops = info->fbcon_par; 118 + struct fbcon_par *par = info->fbcon_par; 119 119 u32 width = (vc->vc_font.height + 7)/8; 120 120 u32 cellsize = width * vc->vc_font.width; 121 121 u32 maxcnt = info->pixmap.size/cellsize; ··· 124 124 u32 cnt, pitch, size; 125 125 u32 attribute = get_attribute(info, scr_readw(s)); 126 126 u8 *dst, *buf = NULL; 127 - u32 vxres = GETVXRES(ops->p, info); 127 + u32 vxres = GETVXRES(par->p, info); 128 128 129 - if (!ops->fontbuffer) 129 + if (!par->fontbuffer) 130 130 return; 131 131 132 132 image.fg_color = fg; ··· 204 204 int fg, int bg) 205 205 { 206 206 struct fb_cursor cursor; 207 - struct fbcon_ops *ops = info->fbcon_par; 207 + struct fbcon_par *par = info->fbcon_par; 208 208 unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; 209 209 int w = (vc->vc_font.height + 7) >> 3, c; 210 - int y = real_y(ops->p, vc->state.y); 210 + int y = real_y(par->p, vc->state.y); 211 211 int attribute, use_sw = vc->vc_cursor_type & CUR_SW; 212 212 int err = 1, dx, dy; 213 213 char *src; 214 - u32 vxres = GETVXRES(ops->p, info); 214 + u32 vxres = GETVXRES(par->p, info); 215 215 216 - if (!ops->fontbuffer) 216 + if (!par->fontbuffer) 217 217 return; 218 218 219 219 cursor.set = 0; 220 220 221 221 c = scr_readw((u16 *) vc->vc_pos); 222 222 attribute = get_attribute(info, c); 223 - src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); 223 + src = par->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); 224 224 225 - if (ops->cursor_state.image.data != src || 226 - ops->cursor_reset) { 227 - ops->cursor_state.image.data = src; 228 - cursor.set |= FB_CUR_SETIMAGE; 225 + if (par->cursor_state.image.data != src || 226 + par->cursor_reset) { 227 + par->cursor_state.image.data = src; 228 + cursor.set |= FB_CUR_SETIMAGE; 229 229 } 230 230 231 231 if (attribute) { ··· 234 234 dst = kmalloc_array(w, vc->vc_font.width, GFP_ATOMIC); 235 235 if (!dst) 236 236 return; 237 - kfree(ops->cursor_data); 238 - ops->cursor_data = dst; 237 + kfree(par->cursor_data); 238 + par->cursor_data = dst; 239 239 cw_update_attr(dst, src, attribute, vc); 240 240 src = dst; 241 241 } 242 242 243 - if (ops->cursor_state.image.fg_color != fg || 244 - ops->cursor_state.image.bg_color != bg || 245 - ops->cursor_reset) { 246 - ops->cursor_state.image.fg_color = fg; 247 - ops->cursor_state.image.bg_color = bg; 243 + if (par->cursor_state.image.fg_color != fg || 244 + par->cursor_state.image.bg_color != bg || 245 + par->cursor_reset) { 246 + par->cursor_state.image.fg_color = fg; 247 + par->cursor_state.image.bg_color = bg; 248 248 cursor.set |= FB_CUR_SETCMAP; 249 249 } 250 250 251 - if (ops->cursor_state.image.height != vc->vc_font.width || 252 - ops->cursor_state.image.width != vc->vc_font.height || 253 - ops->cursor_reset) { 254 - ops->cursor_state.image.height = vc->vc_font.width; 255 - ops->cursor_state.image.width = vc->vc_font.height; 251 + if (par->cursor_state.image.height != vc->vc_font.width || 252 + par->cursor_state.image.width != vc->vc_font.height || 253 + par->cursor_reset) { 254 + par->cursor_state.image.height = vc->vc_font.width; 255 + par->cursor_state.image.width = vc->vc_font.height; 256 256 cursor.set |= FB_CUR_SETSIZE; 257 257 } 258 258 259 259 dx = vxres - ((y * vc->vc_font.height) + vc->vc_font.height); 260 260 dy = vc->state.x * vc->vc_font.width; 261 261 262 - if (ops->cursor_state.image.dx != dx || 263 - ops->cursor_state.image.dy != dy || 264 - ops->cursor_reset) { 265 - ops->cursor_state.image.dx = dx; 266 - ops->cursor_state.image.dy = dy; 262 + if (par->cursor_state.image.dx != dx || 263 + par->cursor_state.image.dy != dy || 264 + par->cursor_reset) { 265 + par->cursor_state.image.dx = dx; 266 + par->cursor_state.image.dy = dy; 267 267 cursor.set |= FB_CUR_SETPOS; 268 268 } 269 269 270 - if (ops->cursor_state.hot.x || ops->cursor_state.hot.y || 271 - ops->cursor_reset) { 272 - ops->cursor_state.hot.x = cursor.hot.y = 0; 270 + if (par->cursor_state.hot.x || par->cursor_state.hot.y || 271 + par->cursor_reset) { 272 + par->cursor_state.hot.x = cursor.hot.y = 0; 273 273 cursor.set |= FB_CUR_SETHOT; 274 274 } 275 275 276 276 if (cursor.set & FB_CUR_SETSIZE || 277 - vc->vc_cursor_type != ops->p->cursor_shape || 278 - ops->cursor_state.mask == NULL || 279 - ops->cursor_reset) { 277 + vc->vc_cursor_type != par->p->cursor_shape || 278 + par->cursor_state.mask == NULL || 279 + par->cursor_reset) { 280 280 char *tmp, *mask = kmalloc_array(w, vc->vc_font.width, 281 281 GFP_ATOMIC); 282 282 int cur_height, size, i = 0; ··· 292 292 return; 293 293 } 294 294 295 - kfree(ops->cursor_state.mask); 296 - ops->cursor_state.mask = mask; 295 + kfree(par->cursor_state.mask); 296 + par->cursor_state.mask = mask; 297 297 298 - ops->p->cursor_shape = vc->vc_cursor_type; 298 + par->p->cursor_shape = vc->vc_cursor_type; 299 299 cursor.set |= FB_CUR_SETSHAPE; 300 300 301 - switch (CUR_SIZE(ops->p->cursor_shape)) { 301 + switch (CUR_SIZE(par->p->cursor_shape)) { 302 302 case CUR_NONE: 303 303 cur_height = 0; 304 304 break; ··· 331 331 kfree(tmp); 332 332 } 333 333 334 - ops->cursor_state.enable = enable && !use_sw; 334 + par->cursor_state.enable = enable && !use_sw; 335 335 336 336 cursor.image.data = src; 337 - cursor.image.fg_color = ops->cursor_state.image.fg_color; 338 - cursor.image.bg_color = ops->cursor_state.image.bg_color; 339 - cursor.image.dx = ops->cursor_state.image.dx; 340 - cursor.image.dy = ops->cursor_state.image.dy; 341 - cursor.image.height = ops->cursor_state.image.height; 342 - cursor.image.width = ops->cursor_state.image.width; 343 - cursor.hot.x = ops->cursor_state.hot.x; 344 - cursor.hot.y = ops->cursor_state.hot.y; 345 - cursor.mask = ops->cursor_state.mask; 346 - cursor.enable = ops->cursor_state.enable; 337 + cursor.image.fg_color = par->cursor_state.image.fg_color; 338 + cursor.image.bg_color = par->cursor_state.image.bg_color; 339 + cursor.image.dx = par->cursor_state.image.dx; 340 + cursor.image.dy = par->cursor_state.image.dy; 341 + cursor.image.height = par->cursor_state.image.height; 342 + cursor.image.width = par->cursor_state.image.width; 343 + cursor.hot.x = par->cursor_state.hot.x; 344 + cursor.hot.y = par->cursor_state.hot.y; 345 + cursor.mask = par->cursor_state.mask; 346 + cursor.enable = par->cursor_state.enable; 347 347 cursor.image.depth = 1; 348 348 cursor.rop = ROP_XOR; 349 349 ··· 353 353 if (err) 354 354 soft_cursor(info, &cursor); 355 355 356 - ops->cursor_reset = 0; 356 + par->cursor_reset = 0; 357 357 } 358 358 359 359 static int cw_update_start(struct fb_info *info) 360 360 { 361 - struct fbcon_ops *ops = info->fbcon_par; 362 - u32 vxres = GETVXRES(ops->p, info); 361 + struct fbcon_par *par = info->fbcon_par; 362 + u32 vxres = GETVXRES(par->p, info); 363 363 u32 xoffset; 364 364 int err; 365 365 366 - xoffset = vxres - (info->var.xres + ops->var.yoffset); 367 - ops->var.yoffset = ops->var.xoffset; 368 - ops->var.xoffset = xoffset; 369 - err = fb_pan_display(info, &ops->var); 370 - ops->var.xoffset = info->var.xoffset; 371 - ops->var.yoffset = info->var.yoffset; 372 - ops->var.vmode = info->var.vmode; 366 + xoffset = vxres - (info->var.xres + par->var.yoffset); 367 + par->var.yoffset = par->var.xoffset; 368 + par->var.xoffset = xoffset; 369 + err = fb_pan_display(info, &par->var); 370 + par->var.xoffset = info->var.xoffset; 371 + par->var.yoffset = info->var.yoffset; 372 + par->var.vmode = info->var.vmode; 373 373 return err; 374 374 } 375 375 376 - void fbcon_rotate_cw(struct fbcon_ops *ops) 376 + static const struct fbcon_bitops cw_fbcon_bitops = { 377 + .bmove = cw_bmove, 378 + .clear = cw_clear, 379 + .putcs = cw_putcs, 380 + .clear_margins = cw_clear_margins, 381 + .cursor = cw_cursor, 382 + .update_start = cw_update_start, 383 + .rotate_font = fbcon_rotate_font, 384 + }; 385 + 386 + void fbcon_set_bitops_cw(struct fbcon_par *par) 377 387 { 378 - ops->bmove = cw_bmove; 379 - ops->clear = cw_clear; 380 - ops->putcs = cw_putcs; 381 - ops->clear_margins = cw_clear_margins; 382 - ops->cursor = cw_cursor; 383 - ops->update_start = cw_update_start; 388 + par->bitops = &cw_fbcon_bitops; 384 389 }
+15 -32
drivers/video/fbdev/core/fbcon_rotate.c
··· 18 18 #include "fbcon.h" 19 19 #include "fbcon_rotate.h" 20 20 21 - static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc) 21 + int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc) 22 22 { 23 - struct fbcon_ops *ops = info->fbcon_par; 23 + struct fbcon_par *par = info->fbcon_par; 24 24 int len, err = 0; 25 25 int s_cellsize, d_cellsize, i; 26 26 const u8 *src; 27 27 u8 *dst; 28 28 29 - if (vc->vc_font.data == ops->fontdata && 30 - ops->p->con_rotate == ops->cur_rotate) 29 + if (vc->vc_font.data == par->fontdata && 30 + par->p->con_rotate == par->cur_rotate) 31 31 goto finished; 32 32 33 - src = ops->fontdata = vc->vc_font.data; 34 - ops->cur_rotate = ops->p->con_rotate; 33 + src = par->fontdata = vc->vc_font.data; 34 + par->cur_rotate = par->p->con_rotate; 35 35 len = vc->vc_font.charcount; 36 36 s_cellsize = ((vc->vc_font.width + 7)/8) * 37 37 vc->vc_font.height; 38 38 d_cellsize = s_cellsize; 39 39 40 - if (ops->rotate == FB_ROTATE_CW || 41 - ops->rotate == FB_ROTATE_CCW) 40 + if (par->rotate == FB_ROTATE_CW || 41 + par->rotate == FB_ROTATE_CCW) 42 42 d_cellsize = ((vc->vc_font.height + 7)/8) * 43 43 vc->vc_font.width; 44 44 45 45 if (info->fbops->fb_sync) 46 46 info->fbops->fb_sync(info); 47 47 48 - if (ops->fd_size < d_cellsize * len) { 48 + if (par->fd_size < d_cellsize * len) { 49 49 dst = kmalloc_array(len, d_cellsize, GFP_KERNEL); 50 50 51 51 if (dst == NULL) { ··· 53 53 goto finished; 54 54 } 55 55 56 - ops->fd_size = d_cellsize * len; 57 - kfree(ops->fontbuffer); 58 - ops->fontbuffer = dst; 56 + par->fd_size = d_cellsize * len; 57 + kfree(par->fontbuffer); 58 + par->fontbuffer = dst; 59 59 } 60 60 61 - dst = ops->fontbuffer; 62 - memset(dst, 0, ops->fd_size); 61 + dst = par->fontbuffer; 62 + memset(dst, 0, par->fd_size); 63 63 64 - switch (ops->rotate) { 64 + switch (par->rotate) { 65 65 case FB_ROTATE_UD: 66 66 for (i = len; i--; ) { 67 67 rotate_ud(src, dst, vc->vc_font.width, ··· 91 91 92 92 finished: 93 93 return err; 94 - } 95 - 96 - void fbcon_set_rotate(struct fbcon_ops *ops) 97 - { 98 - ops->rotate_font = fbcon_rotate_font; 99 - 100 - switch(ops->rotate) { 101 - case FB_ROTATE_CW: 102 - fbcon_rotate_cw(ops); 103 - break; 104 - case FB_ROTATE_UD: 105 - fbcon_rotate_ud(ops); 106 - break; 107 - case FB_ROTATE_CCW: 108 - fbcon_rotate_ccw(ops); 109 - break; 110 - } 111 94 }
+15 -3
drivers/video/fbdev/core/fbcon_rotate.h
··· 90 90 } 91 91 } 92 92 93 - extern void fbcon_rotate_cw(struct fbcon_ops *ops); 94 - extern void fbcon_rotate_ud(struct fbcon_ops *ops); 95 - extern void fbcon_rotate_ccw(struct fbcon_ops *ops); 93 + int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc); 94 + 95 + #if defined(CONFIG_FRAMEBUFFER_CONSOLE_ROTATION) 96 + void fbcon_set_bitops_cw(struct fbcon_par *par); 97 + void fbcon_set_bitops_ud(struct fbcon_par *par); 98 + void fbcon_set_bitops_ccw(struct fbcon_par *par); 99 + #else 100 + static inline void fbcon_set_bitops_cw(struct fbcon_par *par) 101 + { } 102 + static inline void fbcon_set_bitops_ud(struct fbcon_par *par) 103 + { } 104 + static inline void fbcon_set_bitops_ccw(struct fbcon_par *par) 105 + { } 106 + #endif 107 + 96 108 #endif
+86 -81
drivers/video/fbdev/core/fbcon_ud.c
··· 48 48 static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, 49 49 int sx, int dy, int dx, int height, int width) 50 50 { 51 - struct fbcon_ops *ops = info->fbcon_par; 51 + struct fbcon_par *par = info->fbcon_par; 52 52 struct fb_copyarea area; 53 - u32 vyres = GETVYRES(ops->p, info); 54 - u32 vxres = GETVXRES(ops->p, info); 53 + u32 vyres = GETVYRES(par->p, info); 54 + u32 vxres = GETVXRES(par->p, info); 55 55 56 56 area.sy = vyres - ((sy + height) * vc->vc_font.height); 57 57 area.sx = vxres - ((sx + width) * vc->vc_font.width); ··· 66 66 static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, 67 67 int sx, int height, int width, int fg, int bg) 68 68 { 69 - struct fbcon_ops *ops = info->fbcon_par; 69 + struct fbcon_par *par = info->fbcon_par; 70 70 struct fb_fillrect region; 71 - u32 vyres = GETVYRES(ops->p, info); 72 - u32 vxres = GETVXRES(ops->p, info); 71 + u32 vyres = GETVYRES(par->p, info); 72 + u32 vxres = GETVXRES(par->p, info); 73 73 74 74 region.color = bg; 75 75 region.dy = vyres - ((sy + height) * vc->vc_font.height); ··· 86 86 u32 d_pitch, u32 s_pitch, u32 cellsize, 87 87 struct fb_image *image, u8 *buf, u8 *dst) 88 88 { 89 - struct fbcon_ops *ops = info->fbcon_par; 89 + struct fbcon_par *par = info->fbcon_par; 90 90 u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; 91 91 u32 idx = vc->vc_font.width >> 3; 92 92 u8 *src; 93 93 94 94 while (cnt--) { 95 - src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize; 95 + src = par->fontbuffer + (scr_readw(s--) & charmask) * cellsize; 96 96 97 97 if (attr) { 98 98 ud_update_attr(buf, src, attr, vc); ··· 119 119 struct fb_image *image, u8 *buf, 120 120 u8 *dst) 121 121 { 122 - struct fbcon_ops *ops = info->fbcon_par; 122 + struct fbcon_par *par = info->fbcon_par; 123 123 u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; 124 124 u32 shift_low = 0, mod = vc->vc_font.width % 8; 125 125 u32 shift_high = 8; ··· 127 127 u8 *src; 128 128 129 129 while (cnt--) { 130 - src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize; 130 + src = par->fontbuffer + (scr_readw(s--) & charmask) * cellsize; 131 131 132 132 if (attr) { 133 133 ud_update_attr(buf, src, attr, vc); ··· 152 152 int fg, int bg) 153 153 { 154 154 struct fb_image image; 155 - struct fbcon_ops *ops = info->fbcon_par; 155 + struct fbcon_par *par = info->fbcon_par; 156 156 u32 width = (vc->vc_font.width + 7)/8; 157 157 u32 cellsize = width * vc->vc_font.height; 158 158 u32 maxcnt = info->pixmap.size/cellsize; ··· 161 161 u32 mod = vc->vc_font.width % 8, cnt, pitch, size; 162 162 u32 attribute = get_attribute(info, scr_readw(s)); 163 163 u8 *dst, *buf = NULL; 164 - u32 vyres = GETVYRES(ops->p, info); 165 - u32 vxres = GETVXRES(ops->p, info); 164 + u32 vyres = GETVYRES(par->p, info); 165 + u32 vxres = GETVXRES(par->p, info); 166 166 167 - if (!ops->fontbuffer) 167 + if (!par->fontbuffer) 168 168 return; 169 169 170 170 image.fg_color = fg; ··· 251 251 int fg, int bg) 252 252 { 253 253 struct fb_cursor cursor; 254 - struct fbcon_ops *ops = info->fbcon_par; 254 + struct fbcon_par *par = info->fbcon_par; 255 255 unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; 256 256 int w = (vc->vc_font.width + 7) >> 3, c; 257 - int y = real_y(ops->p, vc->state.y); 257 + int y = real_y(par->p, vc->state.y); 258 258 int attribute, use_sw = vc->vc_cursor_type & CUR_SW; 259 259 int err = 1, dx, dy; 260 260 char *src; 261 - u32 vyres = GETVYRES(ops->p, info); 262 - u32 vxres = GETVXRES(ops->p, info); 261 + u32 vyres = GETVYRES(par->p, info); 262 + u32 vxres = GETVXRES(par->p, info); 263 263 264 - if (!ops->fontbuffer) 264 + if (!par->fontbuffer) 265 265 return; 266 266 267 267 cursor.set = 0; 268 268 269 269 c = scr_readw((u16 *) vc->vc_pos); 270 270 attribute = get_attribute(info, c); 271 - src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); 271 + src = par->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); 272 272 273 - if (ops->cursor_state.image.data != src || 274 - ops->cursor_reset) { 275 - ops->cursor_state.image.data = src; 276 - cursor.set |= FB_CUR_SETIMAGE; 273 + if (par->cursor_state.image.data != src || 274 + par->cursor_reset) { 275 + par->cursor_state.image.data = src; 276 + cursor.set |= FB_CUR_SETIMAGE; 277 277 } 278 278 279 279 if (attribute) { ··· 282 282 dst = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC); 283 283 if (!dst) 284 284 return; 285 - kfree(ops->cursor_data); 286 - ops->cursor_data = dst; 285 + kfree(par->cursor_data); 286 + par->cursor_data = dst; 287 287 ud_update_attr(dst, src, attribute, vc); 288 288 src = dst; 289 289 } 290 290 291 - if (ops->cursor_state.image.fg_color != fg || 292 - ops->cursor_state.image.bg_color != bg || 293 - ops->cursor_reset) { 294 - ops->cursor_state.image.fg_color = fg; 295 - ops->cursor_state.image.bg_color = bg; 291 + if (par->cursor_state.image.fg_color != fg || 292 + par->cursor_state.image.bg_color != bg || 293 + par->cursor_reset) { 294 + par->cursor_state.image.fg_color = fg; 295 + par->cursor_state.image.bg_color = bg; 296 296 cursor.set |= FB_CUR_SETCMAP; 297 297 } 298 298 299 - if (ops->cursor_state.image.height != vc->vc_font.height || 300 - ops->cursor_state.image.width != vc->vc_font.width || 301 - ops->cursor_reset) { 302 - ops->cursor_state.image.height = vc->vc_font.height; 303 - ops->cursor_state.image.width = vc->vc_font.width; 299 + if (par->cursor_state.image.height != vc->vc_font.height || 300 + par->cursor_state.image.width != vc->vc_font.width || 301 + par->cursor_reset) { 302 + par->cursor_state.image.height = vc->vc_font.height; 303 + par->cursor_state.image.width = vc->vc_font.width; 304 304 cursor.set |= FB_CUR_SETSIZE; 305 305 } 306 306 307 307 dy = vyres - ((y * vc->vc_font.height) + vc->vc_font.height); 308 308 dx = vxres - ((vc->state.x * vc->vc_font.width) + vc->vc_font.width); 309 309 310 - if (ops->cursor_state.image.dx != dx || 311 - ops->cursor_state.image.dy != dy || 312 - ops->cursor_reset) { 313 - ops->cursor_state.image.dx = dx; 314 - ops->cursor_state.image.dy = dy; 310 + if (par->cursor_state.image.dx != dx || 311 + par->cursor_state.image.dy != dy || 312 + par->cursor_reset) { 313 + par->cursor_state.image.dx = dx; 314 + par->cursor_state.image.dy = dy; 315 315 cursor.set |= FB_CUR_SETPOS; 316 316 } 317 317 318 - if (ops->cursor_state.hot.x || ops->cursor_state.hot.y || 319 - ops->cursor_reset) { 320 - ops->cursor_state.hot.x = cursor.hot.y = 0; 318 + if (par->cursor_state.hot.x || par->cursor_state.hot.y || 319 + par->cursor_reset) { 320 + par->cursor_state.hot.x = cursor.hot.y = 0; 321 321 cursor.set |= FB_CUR_SETHOT; 322 322 } 323 323 324 324 if (cursor.set & FB_CUR_SETSIZE || 325 - vc->vc_cursor_type != ops->p->cursor_shape || 326 - ops->cursor_state.mask == NULL || 327 - ops->cursor_reset) { 325 + vc->vc_cursor_type != par->p->cursor_shape || 326 + par->cursor_state.mask == NULL || 327 + par->cursor_reset) { 328 328 char *mask = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC); 329 329 int cur_height, size, i = 0; 330 330 u8 msk = 0xff; ··· 332 332 if (!mask) 333 333 return; 334 334 335 - kfree(ops->cursor_state.mask); 336 - ops->cursor_state.mask = mask; 335 + kfree(par->cursor_state.mask); 336 + par->cursor_state.mask = mask; 337 337 338 - ops->p->cursor_shape = vc->vc_cursor_type; 338 + par->p->cursor_shape = vc->vc_cursor_type; 339 339 cursor.set |= FB_CUR_SETSHAPE; 340 340 341 - switch (CUR_SIZE(ops->p->cursor_shape)) { 341 + switch (CUR_SIZE(par->p->cursor_shape)) { 342 342 case CUR_NONE: 343 343 cur_height = 0; 344 344 break; ··· 371 371 mask[i++] = ~msk; 372 372 } 373 373 374 - ops->cursor_state.enable = enable && !use_sw; 374 + par->cursor_state.enable = enable && !use_sw; 375 375 376 376 cursor.image.data = src; 377 - cursor.image.fg_color = ops->cursor_state.image.fg_color; 378 - cursor.image.bg_color = ops->cursor_state.image.bg_color; 379 - cursor.image.dx = ops->cursor_state.image.dx; 380 - cursor.image.dy = ops->cursor_state.image.dy; 381 - cursor.image.height = ops->cursor_state.image.height; 382 - cursor.image.width = ops->cursor_state.image.width; 383 - cursor.hot.x = ops->cursor_state.hot.x; 384 - cursor.hot.y = ops->cursor_state.hot.y; 385 - cursor.mask = ops->cursor_state.mask; 386 - cursor.enable = ops->cursor_state.enable; 377 + cursor.image.fg_color = par->cursor_state.image.fg_color; 378 + cursor.image.bg_color = par->cursor_state.image.bg_color; 379 + cursor.image.dx = par->cursor_state.image.dx; 380 + cursor.image.dy = par->cursor_state.image.dy; 381 + cursor.image.height = par->cursor_state.image.height; 382 + cursor.image.width = par->cursor_state.image.width; 383 + cursor.hot.x = par->cursor_state.hot.x; 384 + cursor.hot.y = par->cursor_state.hot.y; 385 + cursor.mask = par->cursor_state.mask; 386 + cursor.enable = par->cursor_state.enable; 387 387 cursor.image.depth = 1; 388 388 cursor.rop = ROP_XOR; 389 389 ··· 393 393 if (err) 394 394 soft_cursor(info, &cursor); 395 395 396 - ops->cursor_reset = 0; 396 + par->cursor_reset = 0; 397 397 } 398 398 399 399 static int ud_update_start(struct fb_info *info) 400 400 { 401 - struct fbcon_ops *ops = info->fbcon_par; 401 + struct fbcon_par *par = info->fbcon_par; 402 402 int xoffset, yoffset; 403 - u32 vyres = GETVYRES(ops->p, info); 404 - u32 vxres = GETVXRES(ops->p, info); 403 + u32 vyres = GETVYRES(par->p, info); 404 + u32 vxres = GETVXRES(par->p, info); 405 405 int err; 406 406 407 - xoffset = vxres - info->var.xres - ops->var.xoffset; 408 - yoffset = vyres - info->var.yres - ops->var.yoffset; 407 + xoffset = vxres - info->var.xres - par->var.xoffset; 408 + yoffset = vyres - info->var.yres - par->var.yoffset; 409 409 if (yoffset < 0) 410 410 yoffset += vyres; 411 - ops->var.xoffset = xoffset; 412 - ops->var.yoffset = yoffset; 413 - err = fb_pan_display(info, &ops->var); 414 - ops->var.xoffset = info->var.xoffset; 415 - ops->var.yoffset = info->var.yoffset; 416 - ops->var.vmode = info->var.vmode; 411 + par->var.xoffset = xoffset; 412 + par->var.yoffset = yoffset; 413 + err = fb_pan_display(info, &par->var); 414 + par->var.xoffset = info->var.xoffset; 415 + par->var.yoffset = info->var.yoffset; 416 + par->var.vmode = info->var.vmode; 417 417 return err; 418 418 } 419 419 420 - void fbcon_rotate_ud(struct fbcon_ops *ops) 420 + static const struct fbcon_bitops ud_fbcon_bitops = { 421 + .bmove = ud_bmove, 422 + .clear = ud_clear, 423 + .putcs = ud_putcs, 424 + .clear_margins = ud_clear_margins, 425 + .cursor = ud_cursor, 426 + .update_start = ud_update_start, 427 + .rotate_font = fbcon_rotate_font, 428 + }; 429 + 430 + void fbcon_set_bitops_ud(struct fbcon_par *par) 421 431 { 422 - ops->bmove = ud_bmove; 423 - ops->clear = ud_clear; 424 - ops->putcs = ud_putcs; 425 - ops->clear_margins = ud_clear_margins; 426 - ops->cursor = ud_cursor; 427 - ops->update_start = ud_update_start; 432 + par->bitops = &ud_fbcon_bitops; 428 433 }
+9 -9
drivers/video/fbdev/core/softcursor.c
··· 21 21 22 22 int soft_cursor(struct fb_info *info, struct fb_cursor *cursor) 23 23 { 24 - struct fbcon_ops *ops = info->fbcon_par; 24 + struct fbcon_par *par = info->fbcon_par; 25 25 unsigned int scan_align = info->pixmap.scan_align - 1; 26 26 unsigned int buf_align = info->pixmap.buf_align - 1; 27 27 unsigned int i, size, dsize, s_pitch, d_pitch; ··· 34 34 s_pitch = (cursor->image.width + 7) >> 3; 35 35 dsize = s_pitch * cursor->image.height; 36 36 37 - if (dsize + sizeof(struct fb_image) != ops->cursor_size) { 38 - kfree(ops->cursor_src); 39 - ops->cursor_size = dsize + sizeof(struct fb_image); 37 + if (dsize + sizeof(struct fb_image) != par->cursor_size) { 38 + kfree(par->cursor_src); 39 + par->cursor_size = dsize + sizeof(struct fb_image); 40 40 41 - ops->cursor_src = kmalloc(ops->cursor_size, GFP_ATOMIC); 42 - if (!ops->cursor_src) { 43 - ops->cursor_size = 0; 41 + par->cursor_src = kmalloc(par->cursor_size, GFP_ATOMIC); 42 + if (!par->cursor_src) { 43 + par->cursor_size = 0; 44 44 return -ENOMEM; 45 45 } 46 46 } 47 47 48 - src = ops->cursor_src + sizeof(struct fb_image); 49 - image = (struct fb_image *)ops->cursor_src; 48 + src = par->cursor_src + sizeof(struct fb_image); 49 + image = (struct fb_image *)par->cursor_src; 50 50 *image = cursor->image; 51 51 d_pitch = (s_pitch + scan_align) & ~scan_align; 52 52
+18 -14
drivers/video/fbdev/core/tileblit.c
··· 151 151 152 152 static int tile_update_start(struct fb_info *info) 153 153 { 154 - struct fbcon_ops *ops = info->fbcon_par; 154 + struct fbcon_par *par = info->fbcon_par; 155 155 int err; 156 156 157 - err = fb_pan_display(info, &ops->var); 158 - ops->var.xoffset = info->var.xoffset; 159 - ops->var.yoffset = info->var.yoffset; 160 - ops->var.vmode = info->var.vmode; 157 + err = fb_pan_display(info, &par->var); 158 + par->var.xoffset = info->var.xoffset; 159 + par->var.yoffset = info->var.yoffset; 160 + par->var.vmode = info->var.vmode; 161 161 return err; 162 162 } 163 + 164 + static const struct fbcon_bitops tile_fbcon_bitops = { 165 + .bmove = tile_bmove, 166 + .clear = tile_clear, 167 + .putcs = tile_putcs, 168 + .clear_margins = tile_clear_margins, 169 + .cursor = tile_cursor, 170 + .update_start = tile_update_start, 171 + }; 163 172 164 173 void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info) 165 174 { 166 175 struct fb_tilemap map; 167 - struct fbcon_ops *ops = info->fbcon_par; 176 + struct fbcon_par *par = info->fbcon_par; 168 177 169 - ops->bmove = tile_bmove; 170 - ops->clear = tile_clear; 171 - ops->putcs = tile_putcs; 172 - ops->clear_margins = tile_clear_margins; 173 - ops->cursor = tile_cursor; 174 - ops->update_start = tile_update_start; 178 + par->bitops = &tile_fbcon_bitops; 175 179 176 - if (ops->p) { 180 + if (par->p) { 177 181 map.width = vc->vc_font.width; 178 182 map.height = vc->vc_font.height; 179 183 map.depth = 1; 180 184 map.length = vc->vc_font.charcount; 181 - map.data = ops->p->fontdata; 185 + map.data = par->p->fontdata; 182 186 info->tileops->fb_settile(info, &map); 183 187 } 184 188 }
+3 -3
drivers/video/fbdev/simplefb.c
··· 13 13 */ 14 14 15 15 #include <linux/aperture.h> 16 + #include <linux/clk.h> 16 17 #include <linux/errno.h> 17 18 #include <linux/fb.h> 18 19 #include <linux/io.h> 19 20 #include <linux/module.h> 20 - #include <linux/platform_data/simplefb.h> 21 - #include <linux/platform_device.h> 22 - #include <linux/clk.h> 23 21 #include <linux/of.h> 24 22 #include <linux/of_clk.h> 25 23 #include <linux/of_platform.h> 26 24 #include <linux/of_reserved_mem.h> 27 25 #include <linux/parser.h> 26 + #include <linux/platform_data/simplefb.h> 27 + #include <linux/platform_device.h> 28 28 #include <linux/pm_domain.h> 29 29 #include <linux/regulator/consumer.h> 30 30
+10 -1
include/drm/bridge/dw_hdmi.h
··· 143 143 const struct drm_display_info *info, 144 144 const struct drm_display_mode *mode); 145 145 146 + /* 147 + * priv_audio is specially used for additional audio device to get 148 + * driver data through this dw_hdmi_plat_data. 149 + */ 150 + void *priv_audio; 151 + 146 152 /* Platform-specific audio enable/disable (optional) */ 147 153 void (*enable_audio)(struct dw_hdmi *hdmi, int channel, 148 - int width, int rate, int non_pcm); 154 + int width, int rate, int non_pcm, int iec958); 149 155 void (*disable_audio)(struct dw_hdmi *hdmi); 150 156 151 157 /* Vendor PHY support */ ··· 185 179 int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn, 186 180 struct device *codec_dev); 187 181 void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm); 182 + void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958); 188 183 void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width); 189 184 void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); 190 185 void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt); ··· 214 207 void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data); 215 208 216 209 bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi); 210 + 211 + const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi); 217 212 218 213 #endif /* __IMX_HDMI_H__ */
+3
include/drm/display/drm_dp.h
··· 115 115 116 116 #define DP_MAX_LANE_COUNT 0x002 117 117 # define DP_MAX_LANE_COUNT_MASK 0x1f 118 + # define DP_POST_LT_ADJ_REQ_SUPPORTED (1 << 5) /* 1.3 */ 118 119 # define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */ 119 120 # define DP_ENHANCED_FRAME_CAP (1 << 7) 120 121 ··· 584 583 585 584 #define DP_LANE_COUNT_SET 0x101 586 585 # define DP_LANE_COUNT_MASK 0x0f 586 + # define DP_POST_LT_ADJ_REQ_GRANTED (1 << 5) /* 1.3 */ 587 587 # define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) 588 588 589 589 #define DP_TRAINING_PATTERN_SET 0x102 ··· 802 800 803 801 #define DP_LANE_ALIGN_STATUS_UPDATED 0x204 804 802 #define DP_INTERLANE_ALIGN_DONE (1 << 0) 803 + #define DP_POST_LT_ADJ_REQ_IN_PROGRESS (1 << 1) /* 1.3 */ 805 804 #define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */ 806 805 #define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */ 807 806 #define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */
+8
include/drm/display/drm_dp_helper.h
··· 37 37 int lane_count); 38 38 bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], 39 39 int lane_count); 40 + bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE]); 40 41 u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], 41 42 int lane); 42 43 u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], ··· 154 153 { 155 154 return dpcd[DP_DPCD_REV] >= 0x11 && 156 155 (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); 156 + } 157 + 158 + static inline bool 159 + drm_dp_post_lt_adj_req_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) 160 + { 161 + return dpcd[DP_DPCD_REV] >= 0x13 && 162 + (dpcd[DP_MAX_LANE_COUNT] & DP_POST_LT_ADJ_REQ_SUPPORTED); 157 163 } 158 164 159 165 static inline bool
+57 -4
include/drm/drm_bridge.h
··· 1362 1362 * drm_bridge_get_next_bridge() - Get the next bridge in the chain 1363 1363 * @bridge: bridge object 1364 1364 * 1365 + * The caller is responsible of having a reference to @bridge via 1366 + * drm_bridge_get() or equivalent. This function leaves the refcount of 1367 + * @bridge unmodified. 1368 + * 1369 + * The refcount of the returned bridge is incremented. Use drm_bridge_put() 1370 + * when done with it. 1371 + * 1365 1372 * RETURNS: 1366 1373 * the next bridge in the chain after @bridge, or NULL if @bridge is the last. 1367 1374 */ ··· 1378 1371 if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain)) 1379 1372 return NULL; 1380 1373 1381 - return list_next_entry(bridge, chain_node); 1374 + return drm_bridge_get(list_next_entry(bridge, chain_node)); 1382 1375 } 1383 1376 1384 1377 /** ··· 1441 1434 } 1442 1435 1443 1436 /** 1444 - * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain 1437 + * drm_bridge_get_next_bridge_and_put - Get the next bridge in the chain 1438 + * and put the previous 1439 + * @bridge: bridge object 1440 + * 1441 + * Same as drm_bridge_get_next_bridge() but additionally puts the @bridge. 1442 + * 1443 + * RETURNS: 1444 + * the next bridge in the chain after @bridge, or NULL if @bridge is the last. 1445 + */ 1446 + static inline struct drm_bridge * 1447 + drm_bridge_get_next_bridge_and_put(struct drm_bridge *bridge) 1448 + { 1449 + struct drm_bridge *next = drm_bridge_get_next_bridge(bridge); 1450 + 1451 + drm_bridge_put(bridge); 1452 + 1453 + return next; 1454 + } 1455 + 1456 + /** 1457 + * drm_for_each_bridge_in_chain_scoped - iterate over all bridges attached 1458 + * to an encoder 1445 1459 * @encoder: the encoder to iterate bridges on 1446 1460 * @bridge: a bridge pointer updated to point to the current bridge at each 1447 1461 * iteration 1448 1462 * 1449 1463 * Iterate over all bridges present in the bridge chain attached to @encoder. 1464 + * 1465 + * Automatically gets/puts the bridge reference while iterating, and puts 1466 + * the reference even if returning or breaking in the middle of the loop. 1450 1467 */ 1451 - #define drm_for_each_bridge_in_chain(encoder, bridge) \ 1452 - list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node) 1468 + #define drm_for_each_bridge_in_chain_scoped(encoder, bridge) \ 1469 + for (struct drm_bridge *bridge __free(drm_bridge_put) = \ 1470 + drm_bridge_chain_get_first_bridge(encoder); \ 1471 + bridge; \ 1472 + bridge = drm_bridge_get_next_bridge_and_put(bridge)) 1473 + 1474 + /** 1475 + * drm_for_each_bridge_in_chain_from - iterate over all bridges starting 1476 + * from the given bridge 1477 + * @first_bridge: the bridge to start from 1478 + * @bridge: a bridge pointer updated to point to the current bridge at each 1479 + * iteration 1480 + * 1481 + * Iterate over all bridges in the encoder chain starting from 1482 + * @first_bridge, included. 1483 + * 1484 + * Automatically gets/puts the bridge reference while iterating, and puts 1485 + * the reference even if returning or breaking in the middle of the loop. 1486 + */ 1487 + #define drm_for_each_bridge_in_chain_from(first_bridge, bridge) \ 1488 + for (struct drm_bridge *bridge __free(drm_bridge_put) = \ 1489 + drm_bridge_get(first_bridge); \ 1490 + bridge; \ 1491 + bridge = drm_bridge_get_next_bridge_and_put(bridge)) 1453 1492 1454 1493 enum drm_mode_status 1455 1494 drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
+1
include/drm/drm_client.h
··· 220 220 int drm_client_modeset_commit_locked(struct drm_client_dev *client); 221 221 int drm_client_modeset_commit(struct drm_client_dev *client); 222 222 int drm_client_modeset_dpms(struct drm_client_dev *client, int mode); 223 + int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index); 223 224 224 225 /** 225 226 * drm_client_for_each_modeset() - Iterate over client modesets
+1 -1
include/drm/drm_crtc.h
··· 186 186 * this case the driver will send the VBLANK event on its own when the 187 187 * writeback job is complete. 188 188 */ 189 - bool no_vblank : 1; 189 + bool no_vblank; 190 190 191 191 /** 192 192 * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to
+14
include/drm/drm_dumb_buffers.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + 3 + #ifndef __DRM_DUMB_BUFFERS_H__ 4 + #define __DRM_DUMB_BUFFERS_H__ 5 + 6 + struct drm_device; 7 + struct drm_mode_create_dumb; 8 + 9 + int drm_mode_size_dumb(struct drm_device *dev, 10 + struct drm_mode_create_dumb *args, 11 + unsigned long hw_pitch_align, 12 + unsigned long hw_size_align); 13 + 14 + #endif
-4
include/drm/drm_format_helper.h
··· 128 128 const struct iosys_map *src, const struct drm_framebuffer *fb, 129 129 const struct drm_rect *clip, struct drm_format_conv_state *state); 130 130 131 - int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, 132 - const struct iosys_map *src, const struct drm_framebuffer *fb, 133 - const struct drm_rect *clip, struct drm_format_conv_state *state); 134 - 135 131 void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch, 136 132 const struct iosys_map *src, const struct drm_framebuffer *fb, 137 133 const struct drm_rect *clip, struct drm_format_conv_state *state);
+2
include/drm/drm_gem_shmem_helper.h
··· 107 107 #define to_drm_gem_shmem_obj(obj) \ 108 108 container_of(obj, struct drm_gem_shmem_object, base) 109 109 110 + int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size); 110 111 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); 111 112 struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev, 112 113 size_t size, 113 114 struct vfsmount *gemfs); 115 + void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem); 114 116 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); 115 117 116 118 void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem);
+12
include/drm/drm_modeset_helper_vtables.h
··· 490 490 bool in_vblank_irq, int *vpos, int *hpos, 491 491 ktime_t *stime, ktime_t *etime, 492 492 const struct drm_display_mode *mode); 493 + 494 + /** 495 + * @handle_vblank_timeout: Handles timeouts of the vblank timer. 496 + * 497 + * Called by CRTC's the vblank timer on each timeout. Semantics is 498 + * equivalient to drm_crtc_handle_vblank(). Implementations should 499 + * invoke drm_crtc_handle_vblank() as part of processing the timeout. 500 + * 501 + * This callback is optional. If unset, the vblank timer invokes 502 + * drm_crtc_handle_vblank() directly. 503 + */ 504 + bool (*handle_vblank_timeout)(struct drm_crtc *crtc); 493 505 }; 494 506 495 507 /**
+32
include/drm/drm_vblank.h
··· 25 25 #define _DRM_VBLANK_H_ 26 26 27 27 #include <linux/seqlock.h> 28 + #include <linux/hrtimer.h> 28 29 #include <linux/idr.h> 29 30 #include <linux/poll.h> 30 31 #include <linux/kthread.h> ··· 102 101 * drm_crtc_vblank_on(). 103 102 */ 104 103 bool disable_immediate; 104 + }; 105 + 106 + /** 107 + * struct drm_vblank_crtc_timer - vblank timer for a CRTC 108 + */ 109 + struct drm_vblank_crtc_timer { 110 + /** 111 + * @timer: The vblank's high-resolution timer 112 + */ 113 + struct hrtimer timer; 114 + /** 115 + * @interval_lock: Protects @interval 116 + */ 117 + spinlock_t interval_lock; 118 + /** 119 + * @interval: Duration between two vblanks 120 + */ 121 + ktime_t interval; 122 + /** 123 + * @crtc: The timer's CRTC 124 + */ 125 + struct drm_crtc *crtc; 105 126 }; 106 127 107 128 /** ··· 277 254 * cancelled. 278 255 */ 279 256 wait_queue_head_t work_wait_queue; 257 + 258 + /** 259 + * @vblank_timer: Holds the state of the vblank timer 260 + */ 261 + struct drm_vblank_crtc_timer vblank_timer; 280 262 }; 281 263 282 264 struct drm_vblank_crtc *drm_crtc_vblank_crtc(struct drm_crtc *crtc); ··· 317 289 wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc); 318 290 void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, 319 291 u32 max_vblank_count); 292 + 293 + int drm_crtc_vblank_start_timer(struct drm_crtc *crtc); 294 + void drm_crtc_vblank_cancel_timer(struct drm_crtc *crtc); 295 + void drm_crtc_vblank_get_vblank_timeout(struct drm_crtc *crtc, ktime_t *vblank_time); 320 296 321 297 /* 322 298 * Helpers for struct drm_crtc_funcs
+56
include/drm/drm_vblank_helper.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + 3 + #ifndef _DRM_VBLANK_HELPER_H_ 4 + #define _DRM_VBLANK_HELPER_H_ 5 + 6 + #include <linux/hrtimer_types.h> 7 + #include <linux/types.h> 8 + 9 + struct drm_atomic_state; 10 + struct drm_crtc; 11 + 12 + /* 13 + * VBLANK helpers 14 + */ 15 + 16 + void drm_crtc_vblank_atomic_flush(struct drm_crtc *crtc, 17 + struct drm_atomic_state *state); 18 + void drm_crtc_vblank_atomic_enable(struct drm_crtc *crtc, 19 + struct drm_atomic_state *state); 20 + void drm_crtc_vblank_atomic_disable(struct drm_crtc *crtc, 21 + struct drm_atomic_state *crtc_state); 22 + 23 + /** 24 + * DRM_CRTC_HELPER_VBLANK_FUNCS - Default implementation for VBLANK helpers 25 + * 26 + * This macro initializes struct &drm_crtc_helper_funcs to default helpers 27 + * for VBLANK handling. 28 + */ 29 + #define DRM_CRTC_HELPER_VBLANK_FUNCS \ 30 + .atomic_flush = drm_crtc_vblank_atomic_flush, \ 31 + .atomic_enable = drm_crtc_vblank_atomic_enable, \ 32 + .atomic_disable = drm_crtc_vblank_atomic_disable 33 + 34 + /* 35 + * VBLANK timer 36 + */ 37 + 38 + int drm_crtc_vblank_helper_enable_vblank_timer(struct drm_crtc *crtc); 39 + void drm_crtc_vblank_helper_disable_vblank_timer(struct drm_crtc *crtc); 40 + bool drm_crtc_vblank_helper_get_vblank_timestamp_from_timer(struct drm_crtc *crtc, 41 + int *max_error, 42 + ktime_t *vblank_time, 43 + bool in_vblank_irq); 44 + 45 + /** 46 + * DRM_CRTC_VBLANK_TIMER_FUNCS - Default implementation for VBLANK timers 47 + * 48 + * This macro initializes struct &drm_crtc_funcs to default helpers for 49 + * VBLANK timers. 50 + */ 51 + #define DRM_CRTC_VBLANK_TIMER_FUNCS \ 52 + .enable_vblank = drm_crtc_vblank_helper_enable_vblank_timer, \ 53 + .disable_vblank = drm_crtc_vblank_helper_disable_vblank_timer, \ 54 + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp_from_timer 55 + 56 + #endif
+1 -1
include/drm/gpu_scheduler.h
··· 546 546 * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT, 547 547 * as there's usually one run-queue per priority, but could be less. 548 548 * @sched_rq: An allocated array of run-queues of size @num_rqs; 549 - * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler 549 + * @job_scheduled: once drm_sched_entity_flush() is called the scheduler 550 550 * waits on this wait queue until all the scheduled jobs are 551 551 * finished. 552 552 * @job_id_count: used to assign unique id to the each job.
+1 -1
include/drm/ttm/ttm_bo.h
··· 391 391 int ttm_bo_validate(struct ttm_buffer_object *bo, 392 392 struct ttm_placement *placement, 393 393 struct ttm_operation_ctx *ctx); 394 - void ttm_bo_put(struct ttm_buffer_object *bo); 394 + void ttm_bo_fini(struct ttm_buffer_object *bo); 395 395 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, 396 396 struct ttm_lru_bulk_move *bulk); 397 397 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+9
include/sound/asoundef.h
··· 12 12 * Digital audio interface * 13 13 * * 14 14 ****************************************************************************/ 15 + /* IEC958 subframe format */ 16 + #define IEC958_SUBFRAME_PREAMBLE_MASK (0xfU) 17 + #define IEC958_SUBFRAME_AUXILIARY_MASK (0xfU << 4) 18 + #define IEC958_SUBFRAME_SAMPLE_24_MASK (0xffffffU << 4) 19 + #define IEC958_SUBFRAME_SAMPLE_20_MASK (0xfffffU << 8) 20 + #define IEC958_SUBFRAME_VALIDITY (0x1U << 28) 21 + #define IEC958_SUBFRAME_USER_DATA (0x1U << 29) 22 + #define IEC958_SUBFRAME_CHANNEL_STATUS (0x1U << 30) 23 + #define IEC958_SUBFRAME_PARITY (0x1U << 31) 15 24 16 25 /* AES/IEC958 channel status bits */ 17 26 #define IEC958_AES0_PROFESSIONAL (1<<0) /* 0 = consumer, 1 = professional */
+49 -1
include/uapi/drm/drm_mode.h
··· 1066 1066 * struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout. 1067 1067 * @height: buffer height in pixels 1068 1068 * @width: buffer width in pixels 1069 - * @bpp: bits per pixel 1069 + * @bpp: color mode 1070 1070 * @flags: must be zero 1071 1071 * @handle: buffer object handle 1072 1072 * @pitch: number of bytes between two consecutive lines ··· 1074 1074 * 1075 1075 * User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds, 1076 1076 * the kernel fills @handle, @pitch and @size. 1077 + * 1078 + * The value of @bpp is a color-mode number describing a specific format 1079 + * or a variant thereof. The value often corresponds to the number of bits 1080 + * per pixel for most modes, although there are exceptions. Each color mode 1081 + * maps to a DRM format plus a number of modes with similar pixel layout. 1082 + * Framebuffer layout is always linear. 1083 + * 1084 + * Support for all modes and formats is optional. Even if dumb-buffer 1085 + * creation with a certain color mode succeeds, it is not guaranteed that 1086 + * the DRM driver supports any of the related formats. Most drivers support 1087 + * a color mode of 32 with a format of DRM_FORMAT_XRGB8888 on their primary 1088 + * plane. 1089 + * 1090 + * +------------+------------------------+------------------------+ 1091 + * | Color mode | Framebuffer format | Compatible formats | 1092 + * +============+========================+========================+ 1093 + * | 32 | * DRM_FORMAT_XRGB8888 | * DRM_FORMAT_BGRX8888 | 1094 + * | | | * DRM_FORMAT_RGBX8888 | 1095 + * | | | * DRM_FORMAT_XBGR8888 | 1096 + * +------------+------------------------+------------------------+ 1097 + * | 24 | * DRM_FORMAT_RGB888 | * DRM_FORMAT_BGR888 | 1098 + * +------------+------------------------+------------------------+ 1099 + * | 16 | * DRM_FORMAT_RGB565 | * DRM_FORMAT_BGR565 | 1100 + * +------------+------------------------+------------------------+ 1101 + * | 15 | * DRM_FORMAT_XRGB1555 | * DRM_FORMAT_BGRX1555 | 1102 + * | | | * DRM_FORMAT_RGBX1555 | 1103 + * | | | * DRM_FORMAT_XBGR1555 | 1104 + * +------------+------------------------+------------------------+ 1105 + * | 8 | * DRM_FORMAT_C8 | * DRM_FORMAT_D8 | 1106 + * | | | * DRM_FORMAT_R8 | 1107 + * +------------+------------------------+------------------------+ 1108 + * | 4 | * DRM_FORMAT_C4 | * DRM_FORMAT_D4 | 1109 + * | | | * DRM_FORMAT_R4 | 1110 + * +------------+------------------------+------------------------+ 1111 + * | 2 | * DRM_FORMAT_C2 | * DRM_FORMAT_D2 | 1112 + * | | | * DRM_FORMAT_R2 | 1113 + * +------------+------------------------+------------------------+ 1114 + * | 1 | * DRM_FORMAT_C1 | * DRM_FORMAT_D1 | 1115 + * | | | * DRM_FORMAT_R1 | 1116 + * +------------+------------------------+------------------------+ 1117 + * 1118 + * Color modes of 10, 12, 15, 30 and 64 are only supported for use by 1119 + * legacy user space. Please don't use them in new code. Other modes 1120 + * are not support. 1121 + * 1122 + * Do not attempt to allocate anything but linear framebuffer memory 1123 + * with single-plane RGB data. Allocation of other framebuffer 1124 + * layouts requires dedicated ioctls in the respective DRM driver. 1077 1125 */ 1078 1126 struct drm_mode_create_dumb { 1079 1127 __u32 height;
+11
include/uapi/drm/ivpu_accel.h
··· 90 90 #define DRM_IVPU_PARAM_TILE_CONFIG 11 91 91 #define DRM_IVPU_PARAM_SKU 12 92 92 #define DRM_IVPU_PARAM_CAPABILITIES 13 93 + #define DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE 14 93 94 94 95 #define DRM_IVPU_PLATFORM_TYPE_SILICON 0 95 96 ··· 177 176 * 178 177 * %DRM_IVPU_PARAM_CAPABILITIES: 179 178 * Supported capabilities (read-only) 179 + * 180 + * %DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE: 181 + * Size of the preemption buffer (read-only) 180 182 */ 181 183 __u32 param; 182 184 ··· 375 371 * to be executed. The offset has to be 8-byte aligned. 376 372 */ 377 373 __u32 commands_offset; 374 + /** 375 + * @preempt_buffer_index: 376 + * 377 + * Index of the preemption buffer in the buffers_ptr array. 378 + */ 379 + __u32 preempt_buffer_index; 380 + __u32 reserved; 378 381 }; 379 382 380 383 /* drm_ivpu_bo_wait job status codes */
+50
include/uapi/drm/panfrost_drm.h
··· 22 22 #define DRM_PANFROST_PERFCNT_DUMP 0x07 23 23 #define DRM_PANFROST_MADVISE 0x08 24 24 #define DRM_PANFROST_SET_LABEL_BO 0x09 25 + #define DRM_PANFROST_JM_CTX_CREATE 0x0a 26 + #define DRM_PANFROST_JM_CTX_DESTROY 0x0b 25 27 26 28 #define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit) 27 29 #define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo) ··· 33 31 #define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset) 34 32 #define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise) 35 33 #define DRM_IOCTL_PANFROST_SET_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SET_LABEL_BO, struct drm_panfrost_set_label_bo) 34 + #define DRM_IOCTL_PANFROST_JM_CTX_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_CREATE, struct drm_panfrost_jm_ctx_create) 35 + #define DRM_IOCTL_PANFROST_JM_CTX_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_DESTROY, struct drm_panfrost_jm_ctx_destroy) 36 36 37 37 /* 38 38 * Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module ··· 75 71 76 72 /** A combination of PANFROST_JD_REQ_* */ 77 73 __u32 requirements; 74 + 75 + /** JM context handle. Zero if you want to use the default context. */ 76 + __u32 jm_ctx_handle; 77 + 78 + /** Padding field. MBZ. */ 79 + __u32 pad; 78 80 }; 79 81 80 82 /** ··· 187 177 DRM_PANFROST_PARAM_AFBC_FEATURES, 188 178 DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP, 189 179 DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY, 180 + DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES, 190 181 }; 191 182 192 183 struct drm_panfrost_get_param { ··· 308 297 struct panfrost_dump_registers { 309 298 __u32 reg; 310 299 __u32 value; 300 + }; 301 + 302 + enum drm_panfrost_jm_ctx_priority { 303 + /** 304 + * @PANFROST_JM_CTX_PRIORITY_LOW: Low priority context. 305 + */ 306 + PANFROST_JM_CTX_PRIORITY_LOW = 0, 307 + 308 + /** 309 + * @PANFROST_JM_CTX_PRIORITY_MEDIUM: Medium priority context. 310 + */ 311 + PANFROST_JM_CTX_PRIORITY_MEDIUM, 312 + 313 + /** 314 + * @PANFROST_JM_CTX_PRIORITY_HIGH: High priority context. 315 + * 316 + * Requires CAP_SYS_NICE or DRM_MASTER. 317 + */ 318 + PANFROST_JM_CTX_PRIORITY_HIGH, 319 + }; 320 + 321 + struct drm_panfrost_jm_ctx_create { 322 + /** @handle: Handle of the created JM context */ 323 + __u32 handle; 324 + 325 + /** @priority: Context priority (see enum drm_panfrost_jm_ctx_priority). */ 326 + __u32 priority; 327 + }; 328 + 329 + struct drm_panfrost_jm_ctx_destroy { 330 + /** 331 + * @handle: Handle of the JM context to destroy. 332 + * 333 + * Must be a valid context handle returned by DRM_IOCTL_PANTHOR_JM_CTX_CREATE. 334 + */ 335 + __u32 handle; 336 + 337 + /** @pad: Padding field, MBZ. */ 338 + __u32 pad; 311 339 }; 312 340 313 341 #if defined(__cplusplus)