Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge drm/drm-next into drm-intel-next

Sync up with changes from drm-intel-gt-next.

Signed-off-by: Jani Nikula <jani.nikula@intel.com>

+4821 -2051
+1
.mailmap
··· 327 327 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@s-opensource.com> 328 328 Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@mellanox.com> 329 329 Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@nvidia.com> 330 + Maxime Ripard <mripard@kernel.org> <maxime@cerno.tech> 330 331 Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com> 331 332 Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com> 332 333 Mayuresh Janorkar <mayur@ti.com>
+118
Documentation/devicetree/bindings/display/amlogic,meson-g12a-dw-mipi-dsi.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + # Copyright 2020 BayLibre, SAS 3 + %YAML 1.2 4 + --- 5 + $id: http://devicetree.org/schemas/display/amlogic,meson-g12a-dw-mipi-dsi.yaml# 6 + $schema: http://devicetree.org/meta-schemas/core.yaml# 7 + 8 + title: Amlogic specific extensions to the Synopsys Designware MIPI DSI Host Controller 9 + 10 + maintainers: 11 + - Neil Armstrong <neil.armstrong@linaro.org> 12 + 13 + description: | 14 + The Amlogic Meson Synopsys Designware Integration is composed of 15 + - A Synopsys DesignWare MIPI DSI Host Controller IP 16 + - A TOP control block controlling the Clocks & Resets of the IP 17 + 18 + allOf: 19 + - $ref: dsi-controller.yaml# 20 + 21 + properties: 22 + compatible: 23 + enum: 24 + - amlogic,meson-g12a-dw-mipi-dsi 25 + 26 + reg: 27 + maxItems: 1 28 + 29 + clocks: 30 + minItems: 3 31 + maxItems: 4 32 + 33 + clock-names: 34 + minItems: 3 35 + items: 36 + - const: pclk 37 + - const: bit 38 + - const: px 39 + - const: meas 40 + 41 + resets: 42 + maxItems: 1 43 + 44 + reset-names: 45 + items: 46 + - const: top 47 + 48 + phys: 49 + maxItems: 1 50 + 51 + phy-names: 52 + items: 53 + - const: dphy 54 + 55 + ports: 56 + $ref: /schemas/graph.yaml#/properties/ports 57 + 58 + properties: 59 + port@0: 60 + $ref: /schemas/graph.yaml#/properties/port 61 + description: Input node to receive pixel data. 62 + 63 + port@1: 64 + $ref: /schemas/graph.yaml#/properties/port 65 + description: DSI output node to panel. 66 + 67 + required: 68 + - port@0 69 + - port@1 70 + 71 + required: 72 + - compatible 73 + - reg 74 + - clocks 75 + - clock-names 76 + - resets 77 + - reset-names 78 + - phys 79 + - phy-names 80 + - ports 81 + 82 + unevaluatedProperties: false 83 + 84 + examples: 85 + - | 86 + dsi@6000 { 87 + compatible = "amlogic,meson-g12a-dw-mipi-dsi"; 88 + reg = <0x6000 0x400>; 89 + resets = <&reset_top>; 90 + reset-names = "top"; 91 + clocks = <&clk_pclk>, <&bit_clk>, <&clk_px>; 92 + clock-names = "pclk", "bit", "px"; 93 + phys = <&mipi_dphy>; 94 + phy-names = "dphy"; 95 + 96 + ports { 97 + #address-cells = <1>; 98 + #size-cells = <0>; 99 + 100 + /* VPU VENC Input */ 101 + mipi_dsi_venc_port: port@0 { 102 + reg = <0>; 103 + 104 + mipi_dsi_in: endpoint { 105 + remote-endpoint = <&dpi_out>; 106 + }; 107 + }; 108 + 109 + /* DSI Output */ 110 + mipi_dsi_panel_port: port@1 { 111 + reg = <1>; 112 + 113 + mipi_out_panel: endpoint { 114 + remote-endpoint = <&mipi_in_panel>; 115 + }; 116 + }; 117 + }; 118 + };
+5
Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
··· 96 96 description: 97 97 A port node pointing to the HDMI-TX port node. 98 98 99 + port@2: 100 + $ref: /schemas/graph.yaml#/properties/port 101 + description: 102 + A port node pointing to the DPI port node (e.g. DSI or LVDS transceiver). 103 + 99 104 "#address-cells": 100 105 const: 1 101 106
+7 -5
Documentation/devicetree/bindings/display/bridge/samsung,mipi-dsim.yaml
··· 70 70 samsung,burst-clock-frequency: 71 71 $ref: /schemas/types.yaml#/definitions/uint32 72 72 description: 73 - DSIM high speed burst mode frequency. 73 + DSIM high speed burst mode frequency. If absent, 74 + the pixel clock from the attached device or bridge 75 + will be used instead. 74 76 75 77 samsung,esc-clock-frequency: 76 78 $ref: /schemas/types.yaml#/definitions/uint32 ··· 82 80 samsung,pll-clock-frequency: 83 81 $ref: /schemas/types.yaml#/definitions/uint32 84 82 description: 85 - DSIM oscillator clock frequency. 83 + DSIM oscillator clock frequency. If absent, the clock frequency 84 + of sclk_mipi will be used instead. 86 85 87 86 phys: 88 87 maxItems: 1 ··· 103 100 specified. 104 101 105 102 port@1: 106 - $ref: /schemas/graph.yaml#/properties/port 103 + $ref: /schemas/graph.yaml#/$defs/port-base 104 + unevaluatedProperties: false 107 105 description: 108 106 DSI output port node to the panel or the next bridge 109 107 in the chain. ··· 138 134 - compatible 139 135 - interrupts 140 136 - reg 141 - - samsung,burst-clock-frequency 142 137 - samsung,esc-clock-frequency 143 - - samsung,pll-clock-frequency 144 138 145 139 allOf: 146 140 - $ref: ../dsi-controller.yaml#
+3
Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml
··· 21 21 maxItems: 1 22 22 description: virtual channel number of a DSI peripheral 23 23 24 + reset-gpios: 25 + maxItems: 1 26 + 24 27 vddc-supply: 25 28 description: Regulator for 1.2V internal core power. 26 29
+3
Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml
··· 36 36 description: GPIO signal to enable DDC bus 37 37 maxItems: 1 38 38 39 + hdmi-pwr-supply: 40 + description: Power supply for the HDMI +5V Power pin 41 + 39 42 port: 40 43 $ref: /schemas/graph.yaml#/properties/port 41 44 description: Connection to controller providing HDMI signals
+6 -1
Documentation/devicetree/bindings/display/fsl,lcdif.yaml
··· 21 21 - fsl,imx28-lcdif 22 22 - fsl,imx6sx-lcdif 23 23 - fsl,imx8mp-lcdif 24 + - fsl,imx93-lcdif 24 25 - items: 25 26 - enum: 26 27 - fsl,imx6sl-lcdif ··· 89 88 properties: 90 89 compatible: 91 90 contains: 92 - const: fsl,imx8mp-lcdif 91 + enum: 92 + - fsl,imx8mp-lcdif 93 + - fsl,imx93-lcdif 93 94 then: 94 95 properties: 95 96 clocks: ··· 110 107 enum: 111 108 - fsl,imx6sx-lcdif 112 109 - fsl,imx8mp-lcdif 110 + - fsl,imx93-lcdif 113 111 then: 114 112 properties: 115 113 clocks: ··· 127 123 - fsl,imx8mm-lcdif 128 124 - fsl,imx8mn-lcdif 129 125 - fsl,imx8mp-lcdif 126 + - fsl,imx93-lcdif 130 127 then: 131 128 required: 132 129 - power-domains
+4
Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
··· 32 32 - innolux,hj110iz-01a 33 33 # STARRY 2081101QFH032011-53G 10.1" WUXGA TFT LCD panel 34 34 - starry,2081101qfh032011-53g 35 + # STARRY himax83102-j02 10.51" WUXGA TFT LCD panel 36 + - starry,himax83102-j02 37 + # STARRY ili9882t 10.51" WUXGA TFT LCD panel 38 + - starry,ili9882t 35 39 36 40 reg: 37 41 description: the virtual channel number of a DSI peripheral
+4
Documentation/devicetree/bindings/display/panel/panel-simple.yaml
··· 33 33 - ampire,am-1280800n3tzqw-t00h 34 34 # Ampire AM-480272H3TMQW-T01H 4.3" WQVGA TFT LCD panel 35 35 - ampire,am-480272h3tmqw-t01h 36 + # Ampire AM-800480L1TMQW-T00H 5" WVGA TFT LCD panel 37 + - ampire,am-800480l1tmqw-t00h 36 38 # Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel 37 39 - ampire,am800480r3tmqwa1h 38 40 # Ampire AM-800600P5TMQW-TB8H 8.0" SVGA TFT LCD panel ··· 286 284 - rocktech,rk101ii01d-ct 287 285 # Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel 288 286 - rocktech,rk070er9427 287 + # Rocktech Display Ltd. RK043FN48H 4.3" 480x272 LCD-TFT panel 288 + - rocktech,rk043fn48h 289 289 # Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel 290 290 - samsung,atna33xc20 291 291 # Samsung 12.2" (2560x1600 pixels) TFT LCD panel
+65 -26
Documentation/gpu/drm-usage-stats.rst
··· 24 24 - All keys shall be prefixed with `drm-`. 25 25 - Whitespace between the delimiter and first non-whitespace character shall be 26 26 ignored when parsing. 27 - - Neither keys or values are allowed to contain whitespace characters. 27 + - Keys are not allowed to contain whitespace characters. 28 28 - Numerical key value pairs can end with optional unit string. 29 29 - Data type of the value is fixed as defined in the specification. 30 30 ··· 39 39 ---------- 40 40 41 41 - <uint> - Unsigned integer without defining the maximum value. 42 - - <str> - String excluding any above defined reserved characters or whitespace. 42 + - <keystr> - String excluding any above defined reserved characters or whitespace. 43 + - <valstr> - String. 43 44 44 45 Mandatory fully standardised keys 45 46 --------------------------------- 46 47 47 - - drm-driver: <str> 48 + - drm-driver: <valstr> 48 49 49 50 String shall contain the name this driver registered as via the respective 50 51 `struct drm_driver` data structure. 51 52 52 53 Optional fully standardised keys 53 54 -------------------------------- 55 + 56 + Identification 57 + ^^^^^^^^^^^^^^ 54 58 55 59 - drm-pdev: <aaaa:bb.cc.d> 56 60 ··· 73 69 Userspace should make sure to not double account any usage statistics by using 74 70 the above described criteria in order to associate data to individual clients. 75 71 76 - - drm-engine-<str>: <uint> ns 72 + Utilization 73 + ^^^^^^^^^^^ 74 + 75 + - drm-engine-<keystr>: <uint> ns 77 76 78 77 GPUs usually contain multiple execution engines. Each shall be given a stable 79 - and unique name (str), with possible values documented in the driver specific 78 + and unique name (keystr), with possible values documented in the driver specific 80 79 documentation. 81 80 82 81 Value shall be in specified time units which the respective GPU engine spent ··· 91 84 was previously read, userspace is expected to stay with that larger previous 92 85 value until a monotonic update is seen. 93 86 94 - - drm-engine-capacity-<str>: <uint> 87 + - drm-engine-capacity-<keystr>: <uint> 95 88 96 89 Engine identifier string must be the same as the one specified in the 97 - drm-engine-<str> tag and shall contain a greater than zero number in case the 90 + drm-engine-<keystr> tag and shall contain a greater than zero number in case the 98 91 exported engine corresponds to a group of identical hardware engines. 99 92 100 93 In the absence of this tag parser shall assume capacity of one. Zero capacity 101 94 is not allowed. 102 95 103 - - drm-memory-<str>: <uint> [KiB|MiB] 104 - 105 - Each possible memory type which can be used to store buffer objects by the 106 - GPU in question shall be given a stable and unique name to be returned as the 107 - string here. 108 - 109 - Value shall reflect the amount of storage currently consumed by the buffer 110 - object belong to this client, in the respective memory region. 111 - 112 - Default unit shall be bytes with optional unit specifiers of 'KiB' or 'MiB' 113 - indicating kibi- or mebi-bytes. 114 - 115 - - drm-cycles-<str> <uint> 96 + - drm-cycles-<keystr>: <uint> 116 97 117 98 Engine identifier string must be the same as the one specified in the 118 - drm-engine-<str> tag and shall contain the number of busy cycles for the given 99 + drm-engine-<keystr> tag and shall contain the number of busy cycles for the given 119 100 engine. 120 101 121 102 Values are not required to be constantly monotonic if it makes the driver ··· 112 117 was previously read, userspace is expected to stay with that larger previous 113 118 value until a monotonic update is seen. 114 119 115 - - drm-maxfreq-<str> <uint> [Hz|MHz|KHz] 120 + - drm-maxfreq-<keystr>: <uint> [Hz|MHz|KHz] 116 121 117 122 Engine identifier string must be the same as the one specified in the 118 - drm-engine-<str> tag and shall contain the maximum frequency for the given 119 - engine. Taken together with drm-cycles-<str>, this can be used to calculate 120 - percentage utilization of the engine, whereas drm-engine-<str> only reflects 123 + drm-engine-<keystr> tag and shall contain the maximum frequency for the given 124 + engine. Taken together with drm-cycles-<keystr>, this can be used to calculate 125 + percentage utilization of the engine, whereas drm-engine-<keystr> only reflects 121 126 time active without considering what frequency the engine is operating as a 122 127 percentage of it's maximum frequency. 123 128 129 + Memory 130 + ^^^^^^ 131 + 132 + - drm-memory-<region>: <uint> [KiB|MiB] 133 + 134 + Each possible memory type which can be used to store buffer objects by the 135 + GPU in question shall be given a stable and unique name to be returned as the 136 + string here. The name "memory" is reserved to refer to normal system memory. 137 + 138 + Value shall reflect the amount of storage currently consumed by the buffer 139 + objects belong to this client, in the respective memory region. 140 + 141 + Default unit shall be bytes with optional unit specifiers of 'KiB' or 'MiB' 142 + indicating kibi- or mebi-bytes. 143 + 144 + - drm-shared-<region>: <uint> [KiB|MiB] 145 + 146 + The total size of buffers that are shared with another file (ie. have more 147 + than a single handle). 148 + 149 + - drm-total-<region>: <uint> [KiB|MiB] 150 + 151 + The total size of buffers that including shared and private memory. 152 + 153 + - drm-resident-<region>: <uint> [KiB|MiB] 154 + 155 + The total size of buffers that are resident in the specified region. 156 + 157 + - drm-purgeable-<region>: <uint> [KiB|MiB] 158 + 159 + The total size of buffers that are purgeable. 160 + 161 + - drm-active-<region>: <uint> [KiB|MiB] 162 + 163 + The total size of buffers that are active on one or more engines. 164 + 165 + Implementation Details 166 + ====================== 167 + 168 + Drivers should use drm_show_fdinfo() in their `struct file_operations`, and 169 + implement &drm_driver.show_fdinfo if they wish to provide any stats which 170 + are not provided by drm_show_fdinfo(). But even driver specific stats should 171 + be documented above and where possible, aligned with other drivers. 172 + 124 173 Driver specific implementations 125 - =============================== 174 + ------------------------------- 126 175 127 176 :ref:`i915-usage-stats`
+3 -2
MAINTAINERS
··· 6981 6981 F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.yaml 6982 6982 F: Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml 6983 6983 F: Documentation/devicetree/bindings/display/renesas,du.yaml 6984 - F: drivers/gpu/drm/rcar-du/ 6985 - F: drivers/gpu/drm/shmobile/ 6984 + F: drivers/gpu/drm/renesas/ 6986 6985 F: include/linux/platform_data/shmob_drm.h 6987 6986 6988 6987 DRM DRIVERS FOR ROCKCHIP ··· 17387 17388 17388 17389 QUALCOMM CLOUD AI (QAIC) DRIVER 17389 17390 M: Jeffrey Hugo <quic_jhugo@quicinc.com> 17391 + R: Carl Vanderlip <quic_carlv@quicinc.com> 17392 + R: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com> 17390 17393 L: linux-arm-msm@vger.kernel.org 17391 17394 L: dri-devel@lists.freedesktop.org 17392 17395 S: Supported
-6
drivers/accel/habanalabs/common/command_buffer.c
··· 27 27 return -EINVAL; 28 28 } 29 29 30 - if (!hdev->mmu_enable) { 31 - dev_err_ratelimited(hdev->dev, 32 - "Cannot map CB because MMU is disabled\n"); 33 - return -EINVAL; 34 - } 35 - 36 30 if (cb->is_mmu_mapped) 37 31 return 0; 38 32
+31 -30
drivers/accel/habanalabs/common/command_submission.c
··· 280 280 281 281 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) 282 282 { 283 - /* 284 - * Patched CB is created for external queues jobs, and for H/W queues 285 - * jobs if the user CB was allocated by driver and MMU is disabled. 286 - */ 287 - return (job->queue_type == QUEUE_TYPE_EXT || 288 - (job->queue_type == QUEUE_TYPE_HW && 289 - job->is_kernel_allocated_cb && 290 - !hdev->mmu_enable)); 283 + /* Patched CB is created for external queues jobs */ 284 + return (job->queue_type == QUEUE_TYPE_EXT); 291 285 } 292 286 293 287 /* ··· 357 363 } 358 364 } 359 365 360 - /* For H/W queue jobs, if a user CB was allocated by driver and MMU is 361 - * enabled, the user CB isn't released in cs_parser() and thus should be 366 + /* For H/W queue jobs, if a user CB was allocated by driver, 367 + * the user CB isn't released in cs_parser() and thus should be 362 368 * released here. This is also true for INT queues jobs which were 363 369 * allocated by driver. 364 370 */ 365 - if ((job->is_kernel_allocated_cb && 366 - ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) || 367 - job->queue_type == QUEUE_TYPE_INT))) { 371 + if (job->is_kernel_allocated_cb && 372 + (job->queue_type == QUEUE_TYPE_HW || job->queue_type == QUEUE_TYPE_INT)) { 368 373 atomic_dec(&job->user_cb->cs_cnt); 369 374 hl_cb_put(job->user_cb); 370 375 } ··· 797 804 798 805 static void cs_timedout(struct work_struct *work) 799 806 { 807 + struct hl_cs *cs = container_of(work, struct hl_cs, work_tdr.work); 808 + bool skip_reset_on_timeout, device_reset = false; 800 809 struct hl_device *hdev; 801 810 u64 event_mask = 0x0; 811 + uint timeout_sec; 802 812 int rc; 803 - struct hl_cs *cs = container_of(work, struct hl_cs, 804 - work_tdr.work); 805 - bool skip_reset_on_timeout = cs->skip_reset_on_timeout, device_reset = false; 813 + 814 + skip_reset_on_timeout = cs->skip_reset_on_timeout; 806 815 807 816 rc = cs_get_unless_zero(cs); 808 817 if (!rc) ··· 835 840 event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT; 836 841 } 837 842 843 + timeout_sec = jiffies_to_msecs(hdev->timeout_jiffies) / 1000; 844 + 838 845 switch (cs->type) { 839 846 case CS_TYPE_SIGNAL: 840 847 dev_err(hdev->dev, 841 - "Signal command submission %llu has not finished in time!\n", 842 - cs->sequence); 848 + "Signal command submission %llu has not finished in %u seconds!\n", 849 + cs->sequence, timeout_sec); 843 850 break; 844 851 845 852 case CS_TYPE_WAIT: 846 853 dev_err(hdev->dev, 847 - "Wait command submission %llu has not finished in time!\n", 848 - cs->sequence); 854 + "Wait command submission %llu has not finished in %u seconds!\n", 855 + cs->sequence, timeout_sec); 849 856 break; 850 857 851 858 case CS_TYPE_COLLECTIVE_WAIT: 852 859 dev_err(hdev->dev, 853 - "Collective Wait command submission %llu has not finished in time!\n", 854 - cs->sequence); 860 + "Collective Wait command submission %llu has not finished in %u seconds!\n", 861 + cs->sequence, timeout_sec); 855 862 break; 856 863 857 864 default: 858 865 dev_err(hdev->dev, 859 - "Command submission %llu has not finished in time!\n", 860 - cs->sequence); 866 + "Command submission %llu has not finished in %u seconds!\n", 867 + cs->sequence, timeout_sec); 861 868 break; 862 869 } 863 870 ··· 1136 1139 spin_unlock(&hdev->cs_mirror_lock); 1137 1140 } 1138 1141 1139 - void hl_abort_waitings_for_completion(struct hl_device *hdev) 1142 + void hl_abort_waiting_for_cs_completions(struct hl_device *hdev) 1140 1143 { 1141 1144 force_complete_cs(hdev); 1142 1145 force_complete_multi_cs(hdev); 1143 - hl_release_pending_user_interrupts(hdev); 1144 1146 } 1145 1147 1146 1148 static void job_wq_completion(struct work_struct *work) ··· 1944 1948 else 1945 1949 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev); 1946 1950 1947 - cb = hl_cb_kernel_create(hdev, cb_size, 1948 - q_type == QUEUE_TYPE_HW && hdev->mmu_enable); 1951 + cb = hl_cb_kernel_create(hdev, cb_size, q_type == QUEUE_TYPE_HW); 1949 1952 if (!cb) { 1950 1953 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1951 1954 atomic64_inc(&cntr->out_of_mem_drop_cnt); ··· 2147 2152 2148 2153 hdev->asic_funcs->hw_queues_unlock(hdev); 2149 2154 rc = -EINVAL; 2150 - goto out; 2155 + goto out_unlock; 2151 2156 } 2152 2157 2153 2158 /* ··· 2162 2167 2163 2168 /* Release the id and free allocated memory of the handle */ 2164 2169 idr_remove(&mgr->handles, handle_id); 2170 + 2171 + /* unlock before calling ctx_put, where we might sleep */ 2172 + spin_unlock(&mgr->lock); 2165 2173 hl_ctx_put(encaps_sig_hdl->ctx); 2166 2174 kfree(encaps_sig_hdl); 2175 + goto out; 2167 2176 } else { 2168 2177 rc = -EINVAL; 2169 2178 dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n"); 2170 2179 } 2171 - out: 2180 + 2181 + out_unlock: 2172 2182 spin_unlock(&mgr->lock); 2173 2183 2184 + out: 2174 2185 return rc; 2175 2186 } 2176 2187
+24 -36
drivers/accel/habanalabs/common/debugfs.c
··· 255 255 u64 j; 256 256 int i; 257 257 258 - if (!dev_entry->hdev->mmu_enable) 259 - return 0; 260 - 261 258 mutex_lock(&dev_entry->ctx_mem_hash_mutex); 262 259 263 260 list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) { ··· 433 436 u64 virt_addr = dev_entry->mmu_addr, phys_addr; 434 437 int i; 435 438 436 - if (!hdev->mmu_enable) 437 - return 0; 438 - 439 439 if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID) 440 440 ctx = hdev->kernel_ctx; 441 441 else ··· 490 496 char *c; 491 497 ssize_t rc; 492 498 493 - if (!hdev->mmu_enable) 494 - return count; 495 - 496 499 if (count > sizeof(kbuf) - 1) 497 500 goto err; 498 501 if (copy_from_user(kbuf, buf, count)) ··· 526 535 struct hl_device *hdev = dev_entry->hdev; 527 536 int rc; 528 537 529 - if (!hdev->mmu_enable) 530 - return 0; 531 - 532 538 if (!dev_entry->mmu_cap_mask) { 533 539 dev_err(hdev->dev, "mmu_cap_mask is not set\n"); 534 540 goto err; ··· 550 562 struct hl_device *hdev = dev_entry->hdev; 551 563 char kbuf[MMU_KBUF_SIZE]; 552 564 ssize_t rc; 553 - 554 - if (!hdev->mmu_enable) 555 - return count; 556 565 557 566 if (count > sizeof(kbuf) - 1) 558 567 goto err; ··· 646 661 { 647 662 struct asic_fixed_properties *prop = &hdev->asic_prop; 648 663 649 - if (!hdev->mmu_enable) 650 - goto out; 651 - 652 664 if (prop->dram_supports_virtual_memory && 653 665 (addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr)) 654 666 return true; ··· 657 675 if (addr >= prop->pmmu_huge.start_addr && 658 676 addr < prop->pmmu_huge.end_addr) 659 677 return true; 660 - out: 678 + 661 679 return false; 662 680 } 663 681 ··· 666 684 { 667 685 struct asic_fixed_properties *prop = &hdev->asic_prop; 668 686 u64 dram_start_addr, dram_end_addr; 669 - 670 - if (!hdev->mmu_enable) 671 - return false; 672 687 673 688 if (prop->dram_supports_virtual_memory) { 674 689 dram_start_addr = prop->dmmu.start_addr; ··· 1735 1756 } 1736 1757 } 1737 1758 1738 - void hl_debugfs_add_device(struct hl_device *hdev) 1759 + int hl_debugfs_device_init(struct hl_device *hdev) 1739 1760 { 1740 1761 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; 1741 1762 int count = ARRAY_SIZE(hl_debugfs_list); 1742 1763 1743 1764 dev_entry->hdev = hdev; 1744 - dev_entry->entry_arr = kmalloc_array(count, 1745 - sizeof(struct hl_debugfs_entry), 1746 - GFP_KERNEL); 1765 + dev_entry->entry_arr = kmalloc_array(count, sizeof(struct hl_debugfs_entry), GFP_KERNEL); 1747 1766 if (!dev_entry->entry_arr) 1748 - return; 1767 + return -ENOMEM; 1749 1768 1750 1769 dev_entry->data_dma_blob_desc.size = 0; 1751 1770 dev_entry->data_dma_blob_desc.data = NULL; ··· 1764 1787 spin_lock_init(&dev_entry->userptr_spinlock); 1765 1788 mutex_init(&dev_entry->ctx_mem_hash_mutex); 1766 1789 1767 - dev_entry->root = debugfs_create_dir(dev_name(hdev->dev), 1768 - hl_debug_root); 1769 - 1770 - add_files_to_device(hdev, dev_entry, dev_entry->root); 1771 - if (!hdev->asic_prop.fw_security_enabled) 1772 - add_secured_nodes(dev_entry, dev_entry->root); 1790 + return 0; 1773 1791 } 1774 1792 1775 - void hl_debugfs_remove_device(struct hl_device *hdev) 1793 + void hl_debugfs_device_fini(struct hl_device *hdev) 1776 1794 { 1777 1795 struct hl_dbg_device_entry *entry = &hdev->hl_debugfs; 1778 1796 int i; 1779 - 1780 - debugfs_remove_recursive(entry->root); 1781 1797 1782 1798 mutex_destroy(&entry->ctx_mem_hash_mutex); 1783 1799 mutex_destroy(&entry->file_mutex); ··· 1782 1812 vfree(entry->state_dump[i]); 1783 1813 1784 1814 kfree(entry->entry_arr); 1815 + } 1816 + 1817 + void hl_debugfs_add_device(struct hl_device *hdev) 1818 + { 1819 + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; 1820 + 1821 + dev_entry->root = debugfs_create_dir(dev_name(hdev->dev), hl_debug_root); 1822 + 1823 + add_files_to_device(hdev, dev_entry, dev_entry->root); 1824 + if (!hdev->asic_prop.fw_security_enabled) 1825 + add_secured_nodes(dev_entry, dev_entry->root); 1826 + } 1827 + 1828 + void hl_debugfs_remove_device(struct hl_device *hdev) 1829 + { 1830 + struct hl_dbg_device_entry *entry = &hdev->hl_debugfs; 1831 + 1832 + debugfs_remove_recursive(entry->root); 1785 1833 } 1786 1834 1787 1835 void hl_debugfs_add_file(struct hl_fpriv *hpriv)
+71 -41
drivers/accel/habanalabs/common/device.c
··· 674 674 return 0; 675 675 } 676 676 677 - static int device_cdev_sysfs_add(struct hl_device *hdev) 677 + static int cdev_sysfs_debugfs_add(struct hl_device *hdev) 678 678 { 679 679 int rc; 680 680 ··· 699 699 goto delete_ctrl_cdev_device; 700 700 } 701 701 702 - hdev->cdev_sysfs_created = true; 702 + hl_debugfs_add_device(hdev); 703 + 704 + hdev->cdev_sysfs_debugfs_created = true; 703 705 704 706 return 0; 705 707 ··· 712 710 return rc; 713 711 } 714 712 715 - static void device_cdev_sysfs_del(struct hl_device *hdev) 713 + static void cdev_sysfs_debugfs_remove(struct hl_device *hdev) 716 714 { 717 - if (!hdev->cdev_sysfs_created) 715 + if (!hdev->cdev_sysfs_debugfs_created) 718 716 goto put_devices; 719 717 718 + hl_debugfs_remove_device(hdev); 720 719 hl_sysfs_fini(hdev); 721 720 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl); 722 721 cdev_device_del(&hdev->cdev, hdev->dev); ··· 984 981 hdev->asic_funcs->early_fini(hdev); 985 982 } 986 983 984 + static bool is_pci_link_healthy(struct hl_device *hdev) 985 + { 986 + u16 vendor_id; 987 + 988 + if (!hdev->pdev) 989 + return false; 990 + 991 + pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id); 992 + 993 + return (vendor_id == PCI_VENDOR_ID_HABANALABS); 994 + } 995 + 987 996 static void hl_device_heartbeat(struct work_struct *work) 988 997 { 989 998 struct hl_device *hdev = container_of(work, struct hl_device, ··· 1010 995 goto reschedule; 1011 996 1012 997 if (hl_device_operational(hdev, NULL)) 1013 - dev_err(hdev->dev, "Device heartbeat failed!\n"); 998 + dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n", 999 + is_pci_link_healthy(hdev) ? "healthy" : "broken"); 1014 1000 1015 1001 info.err_type = HL_INFO_FW_HEARTBEAT_ERR; 1016 1002 info.event_mask = &event_mask; ··· 1173 1157 mutex_unlock(&hdev->fpriv_ctrl_list_lock); 1174 1158 } 1175 1159 1160 + static void hl_abort_waiting_for_completions(struct hl_device *hdev) 1161 + { 1162 + hl_abort_waiting_for_cs_completions(hdev); 1163 + 1164 + /* Release all pending user interrupts, each pending user interrupt 1165 + * holds a reference to a user context. 1166 + */ 1167 + hl_release_pending_user_interrupts(hdev); 1168 + } 1169 + 1176 1170 static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset, 1177 1171 bool skip_wq_flush) 1178 1172 { ··· 1202 1176 /* flush the MMU prefetch workqueue */ 1203 1177 flush_workqueue(hdev->prefetch_wq); 1204 1178 1205 - /* Release all pending user interrupts, each pending user interrupt 1206 - * holds a reference to user context 1207 - */ 1208 - hl_release_pending_user_interrupts(hdev); 1179 + hl_abort_waiting_for_completions(hdev); 1209 1180 } 1210 1181 1211 1182 /* ··· 1944 1921 1945 1922 hl_ctx_put(ctx); 1946 1923 1947 - hl_abort_waitings_for_completion(hdev); 1924 + hl_abort_waiting_for_completions(hdev); 1948 1925 1949 1926 return 0; 1950 1927 ··· 2057 2034 int hl_device_init(struct hl_device *hdev) 2058 2035 { 2059 2036 int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt; 2060 - bool add_cdev_sysfs_on_err = false; 2037 + bool expose_interfaces_on_err = false; 2061 2038 2062 2039 rc = create_cdev(hdev); 2063 2040 if (rc) ··· 2173 2150 hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC; 2174 2151 2175 2152 hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL; 2176 - hl_debugfs_add_device(hdev); 2177 2153 2178 - /* debugfs nodes are created in hl_ctx_init so it must be called after 2179 - * hl_debugfs_add_device. 2154 + rc = hl_debugfs_device_init(hdev); 2155 + if (rc) { 2156 + dev_err(hdev->dev, "failed to initialize debugfs entry structure\n"); 2157 + kfree(hdev->kernel_ctx); 2158 + goto mmu_fini; 2159 + } 2160 + 2161 + /* The debugfs entry structure is accessed in hl_ctx_init(), so it must be called after 2162 + * hl_debugfs_device_init(). 2180 2163 */ 2181 2164 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); 2182 2165 if (rc) { 2183 2166 dev_err(hdev->dev, "failed to initialize kernel context\n"); 2184 2167 kfree(hdev->kernel_ctx); 2185 - goto remove_device_from_debugfs; 2168 + goto debugfs_device_fini; 2186 2169 } 2187 2170 2188 2171 rc = hl_cb_pool_init(hdev); ··· 2204 2175 } 2205 2176 2206 2177 /* 2207 - * From this point, override rc (=0) in case of an error to allow 2208 - * debugging (by adding char devices and create sysfs nodes as part of 2209 - * the error flow). 2178 + * From this point, override rc (=0) in case of an error to allow debugging 2179 + * (by adding char devices and creating sysfs/debugfs files as part of the error flow). 2210 2180 */ 2211 - add_cdev_sysfs_on_err = true; 2181 + expose_interfaces_on_err = true; 2212 2182 2213 2183 /* Device is now enabled as part of the initialization requires 2214 2184 * communication with the device firmware to get information that ··· 2249 2221 } 2250 2222 2251 2223 /* 2252 - * Expose devices and sysfs nodes to user. 2253 - * From here there is no need to add char devices and create sysfs nodes 2254 - * in case of an error. 2224 + * Expose devices and sysfs/debugfs files to user. 2225 + * From here there is no need to expose them in case of an error. 2255 2226 */ 2256 - add_cdev_sysfs_on_err = false; 2257 - rc = device_cdev_sysfs_add(hdev); 2227 + expose_interfaces_on_err = false; 2228 + rc = cdev_sysfs_debugfs_add(hdev); 2258 2229 if (rc) { 2259 - dev_err(hdev->dev, 2260 - "Failed to add char devices and sysfs nodes\n"); 2230 + dev_err(hdev->dev, "Failed to add char devices and sysfs/debugfs files\n"); 2261 2231 rc = 0; 2262 2232 goto out_disabled; 2263 2233 } ··· 2301 2275 if (hl_ctx_put(hdev->kernel_ctx) != 1) 2302 2276 dev_err(hdev->dev, 2303 2277 "kernel ctx is still alive on initialization failure\n"); 2304 - remove_device_from_debugfs: 2305 - hl_debugfs_remove_device(hdev); 2278 + debugfs_device_fini: 2279 + hl_debugfs_device_fini(hdev); 2306 2280 mmu_fini: 2307 2281 hl_mmu_fini(hdev); 2308 2282 eq_fini: ··· 2326 2300 put_device(hdev->dev); 2327 2301 out_disabled: 2328 2302 hdev->disabled = true; 2329 - if (add_cdev_sysfs_on_err) 2330 - device_cdev_sysfs_add(hdev); 2331 - if (hdev->pdev) 2332 - dev_err(&hdev->pdev->dev, 2333 - "Failed to initialize hl%d. Device %s is NOT usable !\n", 2334 - hdev->cdev_idx, dev_name(&(hdev)->pdev->dev)); 2335 - else 2336 - pr_err("Failed to initialize hl%d. Device %s is NOT usable !\n", 2337 - hdev->cdev_idx, dev_name(&(hdev)->pdev->dev)); 2303 + if (expose_interfaces_on_err) 2304 + cdev_sysfs_debugfs_add(hdev); 2305 + dev_err(&hdev->pdev->dev, 2306 + "Failed to initialize hl%d. Device %s is NOT usable !\n", 2307 + hdev->cdev_idx, dev_name(&hdev->pdev->dev)); 2338 2308 2339 2309 return rc; 2340 2310 } ··· 2449 2427 if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1)) 2450 2428 dev_err(hdev->dev, "kernel ctx is still alive\n"); 2451 2429 2452 - hl_debugfs_remove_device(hdev); 2453 - 2454 2430 hl_dec_fini(hdev); 2455 2431 2456 2432 hl_vm_fini(hdev); ··· 2473 2453 2474 2454 device_early_fini(hdev); 2475 2455 2476 - /* Hide devices and sysfs nodes from user */ 2477 - device_cdev_sysfs_del(hdev); 2456 + /* Hide devices and sysfs/debugfs files from user */ 2457 + cdev_sysfs_debugfs_remove(hdev); 2458 + 2459 + hl_debugfs_device_fini(hdev); 2478 2460 2479 2461 pr_info("removed device successfully\n"); 2480 2462 } ··· 2688 2666 2689 2667 if (info->event_mask) 2690 2668 *info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR; 2669 + } 2670 + 2671 + void hl_enable_err_info_capture(struct hl_error_info *captured_err_info) 2672 + { 2673 + vfree(captured_err_info->page_fault_info.user_mappings); 2674 + memset(captured_err_info, 0, sizeof(struct hl_error_info)); 2675 + atomic_set(&captured_err_info->cs_timeout.write_enable, 1); 2676 + captured_err_info->undef_opcode.write_enable = true; 2691 2677 }
+166 -52
drivers/accel/habanalabs/common/firmware_if.c
··· 71 71 return NULL; 72 72 } 73 73 74 + /** 75 + * extract_u32_until_given_char() - given a string of the format "<u32><char>*", extract the u32. 76 + * @str: the given string 77 + * @ver_num: the pointer to the extracted u32 to be returned to the caller. 78 + * @given_char: the given char at the end of the u32 in the string 79 + * 80 + * Return: Upon success, return a pointer to the given_char in the string. Upon failure, return NULL 81 + */ 82 + static char *extract_u32_until_given_char(char *str, u32 *ver_num, char given_char) 83 + { 84 + char num_str[8] = {}, *ch; 85 + 86 + ch = strchrnul(str, given_char); 87 + if (*ch == '\0' || ch == str || ch - str >= sizeof(num_str)) 88 + return NULL; 89 + 90 + memcpy(num_str, str, ch - str); 91 + if (kstrtou32(num_str, 10, ver_num)) 92 + return NULL; 93 + return ch; 94 + } 95 + 96 + /** 97 + * hl_get_sw_major_minor_subminor() - extract the FW's SW version major, minor, sub-minor 98 + * from the version string 99 + * @hdev: pointer to the hl_device 100 + * @fw_str: the FW's version string 101 + * 102 + * The extracted version is set in the hdev fields: fw_sw_{major/minor/sub_minor}_ver. 103 + * 104 + * fw_str is expected to have one of two possible formats, examples: 105 + * 1) 'Preboot version hl-gaudi2-1.9.0-fw-42.0.1-sec-3' 106 + * 2) 'Preboot version hl-gaudi2-1.9.0-rc-fw-42.0.1-sec-3' 107 + * In those examples, the SW major,minor,subminor are correspondingly: 1,9,0. 108 + * 109 + * Return: 0 for success or a negative error code for failure. 110 + */ 111 + static int hl_get_sw_major_minor_subminor(struct hl_device *hdev, const char *fw_str) 112 + { 113 + char *end, *start; 114 + 115 + end = strnstr(fw_str, "-rc-", VERSION_MAX_LEN); 116 + if (end == fw_str) 117 + return -EINVAL; 118 + 119 + if (!end) 120 + end = strnstr(fw_str, "-fw-", VERSION_MAX_LEN); 121 + 122 + if (end == fw_str) 123 + return -EINVAL; 124 + 125 + if (!end) 126 + return -EINVAL; 127 + 128 + for (start = end - 1; start != fw_str; start--) { 129 + if (*start == '-') 130 + break; 131 + } 132 + 133 + if (start == fw_str) 134 + return -EINVAL; 135 + 136 + /* start/end point each to the starting and ending hyphen of the sw version e.g. -1.9.0- */ 137 + start++; 138 + start = extract_u32_until_given_char(start, &hdev->fw_sw_major_ver, '.'); 139 + if (!start) 140 + goto err_zero_ver; 141 + 142 + start++; 143 + start = extract_u32_until_given_char(start, &hdev->fw_sw_minor_ver, '.'); 144 + if (!start) 145 + goto err_zero_ver; 146 + 147 + start++; 148 + start = extract_u32_until_given_char(start, &hdev->fw_sw_sub_minor_ver, '-'); 149 + if (!start) 150 + goto err_zero_ver; 151 + 152 + return 0; 153 + 154 + err_zero_ver: 155 + hdev->fw_sw_major_ver = 0; 156 + hdev->fw_sw_minor_ver = 0; 157 + hdev->fw_sw_sub_minor_ver = 0; 158 + return -EINVAL; 159 + } 160 + 161 + /** 162 + * hl_get_preboot_major_minor() - extract the FW's version major, minor from the version string. 163 + * @hdev: pointer to the hl_device 164 + * @preboot_ver: the FW's version string 165 + * 166 + * preboot_ver is expected to be the format of <major>.<minor>.<sub minor>*, e.g: 42.0.1-sec-3 167 + * The extracted version is set in the hdev fields: fw_inner_{major/minor}_ver. 168 + * 169 + * Return: 0 on success, negative error code for failure. 170 + */ 74 171 static int hl_get_preboot_major_minor(struct hl_device *hdev, char *preboot_ver) 75 172 { 76 - char major[8], minor[8], *first_dot, *second_dot; 77 - int rc; 78 - 79 - first_dot = strnstr(preboot_ver, ".", 10); 80 - if (first_dot) { 81 - strscpy(major, preboot_ver, first_dot - preboot_ver + 1); 82 - rc = kstrtou32(major, 10, &hdev->fw_major_version); 83 - } else { 84 - rc = -EINVAL; 173 + preboot_ver = extract_u32_until_given_char(preboot_ver, &hdev->fw_inner_major_ver, '.'); 174 + if (!preboot_ver) { 175 + dev_err(hdev->dev, "Error parsing preboot major version\n"); 176 + goto err_zero_ver; 85 177 } 86 178 87 - if (rc) { 88 - dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc); 89 - return rc; 179 + preboot_ver++; 180 + 181 + preboot_ver = extract_u32_until_given_char(preboot_ver, &hdev->fw_inner_minor_ver, '.'); 182 + if (!preboot_ver) { 183 + dev_err(hdev->dev, "Error parsing preboot minor version\n"); 184 + goto err_zero_ver; 90 185 } 186 + return 0; 91 187 92 - /* skip the first dot */ 93 - first_dot++; 94 - 95 - second_dot = strnstr(first_dot, ".", 10); 96 - if (second_dot) { 97 - strscpy(minor, first_dot, second_dot - first_dot + 1); 98 - rc = kstrtou32(minor, 10, &hdev->fw_minor_version); 99 - } else { 100 - rc = -EINVAL; 101 - } 102 - 103 - if (rc) 104 - dev_err(hdev->dev, "Error %d parsing preboot minor version\n", rc); 105 - return rc; 188 + err_zero_ver: 189 + hdev->fw_inner_major_ver = 0; 190 + hdev->fw_inner_minor_ver = 0; 191 + return -EINVAL; 106 192 } 107 193 108 194 static int hl_request_fw(struct hl_device *hdev, ··· 589 503 { 590 504 gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr, 591 505 size); 506 + } 507 + 508 + int hl_fw_send_soft_reset(struct hl_device *hdev) 509 + { 510 + struct cpucp_packet pkt; 511 + int rc; 512 + 513 + memset(&pkt, 0, sizeof(pkt)); 514 + pkt.ctl = cpu_to_le32(CPUCP_PACKET_SOFT_RESET << CPUCP_PKT_CTL_OPCODE_SHIFT); 515 + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL); 516 + if (rc) 517 + dev_err(hdev->dev, "failed to send soft-reset msg (err = %d)\n", rc); 518 + 519 + return rc; 592 520 } 593 521 594 522 int hl_fw_send_device_activity(struct hl_device *hdev, bool open) ··· 1368 1268 1369 1269 void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev) 1370 1270 { 1371 - struct static_fw_load_mgr *static_loader = 1372 - &hdev->fw_loader.static_loader; 1271 + struct fw_load_mgr *fw_loader = &hdev->fw_loader; 1272 + u32 status, cpu_boot_status_reg, cpu_timeout; 1273 + struct static_fw_load_mgr *static_loader; 1274 + struct pre_fw_load_props *pre_fw_load; 1373 1275 int rc; 1374 1276 1375 1277 if (hdev->device_cpu_is_halted) ··· 1379 1277 1380 1278 /* Stop device CPU to make sure nothing bad happens */ 1381 1279 if (hdev->asic_prop.dynamic_fw_load) { 1280 + pre_fw_load = &fw_loader->pre_fw_load; 1281 + cpu_timeout = fw_loader->cpu_timeout; 1282 + cpu_boot_status_reg = pre_fw_load->cpu_boot_status_reg; 1283 + 1382 1284 rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader, 1383 - COMMS_GOTO_WFE, 0, false, 1384 - hdev->fw_loader.cpu_timeout); 1385 - if (rc) 1285 + COMMS_GOTO_WFE, 0, false, cpu_timeout); 1286 + if (rc) { 1386 1287 dev_err(hdev->dev, "Failed sending COMMS_GOTO_WFE\n"); 1288 + } else { 1289 + rc = hl_poll_timeout( 1290 + hdev, 1291 + cpu_boot_status_reg, 1292 + status, 1293 + status == CPU_BOOT_STATUS_IN_WFE, 1294 + hdev->fw_poll_interval_usec, 1295 + cpu_timeout); 1296 + if (rc) 1297 + dev_err(hdev->dev, "Current status=%u. Timed-out updating to WFE\n", 1298 + status); 1299 + } 1387 1300 } else { 1301 + static_loader = &hdev->fw_loader.static_loader; 1388 1302 WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE); 1389 1303 msleep(static_loader->cpu_reset_wait_msec); 1390 1304 ··· 2269 2151 struct asic_fixed_properties *prop = &hdev->asic_prop; 2270 2152 char *preboot_ver, *boot_ver; 2271 2153 char btl_ver[32]; 2154 + int rc; 2272 2155 2273 2156 switch (fwc) { 2274 2157 case FW_COMP_BOOT_FIT: ··· 2283 2164 break; 2284 2165 case FW_COMP_PREBOOT: 2285 2166 strscpy(prop->preboot_ver, fw_version, VERSION_MAX_LEN); 2286 - preboot_ver = strnstr(prop->preboot_ver, "Preboot", 2287 - VERSION_MAX_LEN); 2167 + preboot_ver = strnstr(prop->preboot_ver, "Preboot", VERSION_MAX_LEN); 2168 + dev_info(hdev->dev, "preboot full version: '%s'\n", preboot_ver); 2169 + 2288 2170 if (preboot_ver && preboot_ver != prop->preboot_ver) { 2289 2171 strscpy(btl_ver, prop->preboot_ver, 2290 2172 min((int) (preboot_ver - prop->preboot_ver), 31)); 2291 2173 dev_info(hdev->dev, "%s\n", btl_ver); 2292 2174 } 2293 2175 2176 + rc = hl_get_sw_major_minor_subminor(hdev, preboot_ver); 2177 + if (rc) 2178 + return rc; 2294 2179 preboot_ver = extract_fw_ver_from_str(prop->preboot_ver); 2295 2180 if (preboot_ver) { 2296 - int rc; 2297 - 2298 - dev_info(hdev->dev, "preboot version %s\n", preboot_ver); 2299 - 2300 2181 rc = hl_get_preboot_major_minor(hdev, preboot_ver); 2301 2182 kfree(preboot_ver); 2302 2183 if (rc) ··· 2485 2366 fw_loader->dynamic_loader.comm_desc.cur_fw_ver); 2486 2367 if (rc) 2487 2368 goto release_fw; 2488 - 2489 - /* update state according to boot stage */ 2490 - if (cur_fwc == FW_COMP_BOOT_FIT) { 2491 - struct cpu_dyn_regs *dyn_regs; 2492 - 2493 - dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs; 2494 - hl_fw_boot_fit_update_state(hdev, 2495 - le32_to_cpu(dyn_regs->cpu_boot_dev_sts0), 2496 - le32_to_cpu(dyn_regs->cpu_boot_dev_sts1)); 2497 - } 2498 2369 2499 2370 /* copy boot fit to space allocated by FW */ 2500 2371 rc = hl_fw_dynamic_copy_image(hdev, fw, fw_loader); ··· 2788 2679 goto protocol_err; 2789 2680 } 2790 2681 2682 + rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader); 2683 + if (rc) 2684 + goto protocol_err; 2685 + 2686 + hl_fw_boot_fit_update_state(hdev, 2687 + le32_to_cpu(dyn_regs->cpu_boot_dev_sts0), 2688 + le32_to_cpu(dyn_regs->cpu_boot_dev_sts1)); 2689 + 2791 2690 /* 2792 2691 * when testing FW load (without Linux) on PLDM we don't want to 2793 2692 * wait until boot fit is active as it may take several hours. ··· 2804 2687 */ 2805 2688 if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX)) 2806 2689 return 0; 2807 - 2808 - rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader); 2809 - if (rc) 2810 - goto protocol_err; 2811 2690 2812 2691 /* Enable DRAM scrambling before Linux boot and after successful 2813 2692 * UBoot ··· 2838 2725 if (rc) 2839 2726 goto protocol_err; 2840 2727 2841 - hl_fw_linux_update_state(hdev, le32_to_cpu(dyn_regs->cpu_boot_dev_sts0), 2728 + hl_fw_linux_update_state(hdev, 2729 + le32_to_cpu(dyn_regs->cpu_boot_dev_sts0), 2842 2730 le32_to_cpu(dyn_regs->cpu_boot_dev_sts1)); 2843 2731 2844 2732 hl_fw_dynamic_update_linux_interrupt_if(hdev);
+33 -44
drivers/accel/habanalabs/common/habanalabs.h
··· 36 36 struct hl_device; 37 37 struct hl_fpriv; 38 38 39 + #define PCI_VENDOR_ID_HABANALABS 0x1da3 40 + 39 41 /* Use upper bits of mmap offset to store habana driver specific information. 40 42 * bits[63:59] - Encode mmap type 41 43 * bits[45:0] - mmap offset value ··· 113 111 MMU_DR_PGT = 0, /* device-dram-resident MMU PGT */ 114 112 MMU_HR_PGT, /* host resident MMU PGT */ 115 113 MMU_NUM_PGT_LOCATIONS /* num of PGT locations */ 116 - }; 117 - 118 - /** 119 - * enum hl_mmu_enablement - what mmu modules to enable 120 - * @MMU_EN_NONE: mmu disabled. 121 - * @MMU_EN_ALL: enable all. 122 - * @MMU_EN_PMMU_ONLY: Enable only the PMMU leaving the DMMU disabled. 123 - */ 124 - enum hl_mmu_enablement { 125 - MMU_EN_NONE = 0, 126 - MMU_EN_ALL = 1, 127 - MMU_EN_PMMU_ONLY = 3, /* N/A for Goya/Gaudi */ 128 114 }; 129 115 130 116 /* ··· 2558 2568 ktime_t __timeout; \ 2559 2569 u32 __elbi_read; \ 2560 2570 int __rc = 0; \ 2561 - if (hdev->pdev) \ 2562 - __timeout = ktime_add_us(ktime_get(), timeout_us); \ 2563 - else \ 2564 - __timeout = ktime_add_us(ktime_get(),\ 2565 - min((u64)(timeout_us * 10), \ 2566 - (u64) HL_SIM_MAX_TIMEOUT_US)); \ 2571 + __timeout = ktime_add_us(ktime_get(), timeout_us); \ 2567 2572 might_sleep_if(sleep_us); \ 2568 2573 for (;;) { \ 2569 2574 if (elbi) { \ ··· 2610 2625 u8 __arr_idx; \ 2611 2626 int __rc = 0; \ 2612 2627 \ 2613 - if (hdev->pdev) \ 2614 - __timeout = ktime_add_us(ktime_get(), timeout_us); \ 2615 - else \ 2616 - __timeout = ktime_add_us(ktime_get(),\ 2617 - min(((u64)timeout_us * 10), \ 2618 - (u64) HL_SIM_MAX_TIMEOUT_US)); \ 2619 - \ 2628 + __timeout = ktime_add_us(ktime_get(), timeout_us); \ 2620 2629 might_sleep_if(sleep_us); \ 2621 2630 if (arr_size >= 64) \ 2622 2631 __rc = -EINVAL; \ ··· 2668 2689 mem_written_by_device) \ 2669 2690 ({ \ 2670 2691 ktime_t __timeout; \ 2671 - if (hdev->pdev) \ 2672 - __timeout = ktime_add_us(ktime_get(), timeout_us); \ 2673 - else \ 2674 - __timeout = ktime_add_us(ktime_get(),\ 2675 - min((u64)(timeout_us * 100), \ 2676 - (u64) HL_SIM_MAX_TIMEOUT_US)); \ 2692 + \ 2693 + __timeout = ktime_add_us(ktime_get(), timeout_us); \ 2677 2694 might_sleep_if(sleep_us); \ 2678 2695 for (;;) { \ 2679 2696 /* Verify we read updates done by other cores or by device */ \ ··· 3200 3225 * @captured_err_info: holds information about errors. 3201 3226 * @reset_info: holds current device reset information. 3202 3227 * @stream_master_qid_arr: pointer to array with QIDs of master streams. 3203 - * @fw_major_version: major version of current loaded preboot. 3204 - * @fw_minor_version: minor version of current loaded preboot. 3228 + * @fw_inner_major_ver: the major of current loaded preboot inner version. 3229 + * @fw_inner_minor_ver: the minor of current loaded preboot inner version. 3230 + * @fw_sw_major_ver: the major of current loaded preboot SW version. 3231 + * @fw_sw_minor_ver: the minor of current loaded preboot SW version. 3232 + * @fw_sw_sub_minor_ver: the sub-minor of current loaded preboot SW version. 3205 3233 * @dram_used_mem: current DRAM memory consumption. 3206 3234 * @memory_scrub_val: the value to which the dram will be scrubbed to using cb scrub_device_dram 3207 3235 * @timeout_jiffies: device CS timeout value. ··· 3265 3287 * @in_debug: whether the device is in a state where the profiling/tracing infrastructure 3266 3288 * can be used. This indication is needed because in some ASICs we need to do 3267 3289 * specific operations to enable that infrastructure. 3268 - * @cdev_sysfs_created: were char devices and sysfs nodes created. 3290 + * @cdev_sysfs_debugfs_created: were char devices and sysfs/debugfs files created. 3269 3291 * @stop_on_err: true if engines should stop on error. 3270 3292 * @supports_sync_stream: is sync stream supported. 3271 3293 * @sync_stream_queue_idx: helper index for sync stream queues initialization. ··· 3292 3314 * @nic_ports_mask: Controls which NIC ports are enabled. Used only for testing. 3293 3315 * @fw_components: Controls which f/w components to load to the device. There are multiple f/w 3294 3316 * stages and sometimes we want to stop at a certain stage. Used only for testing. 3295 - * @mmu_enable: Whether to enable or disable the device MMU(s). Used only for testing. 3317 + * @mmu_disable: Disable the device MMU(s). Used only for testing. 3296 3318 * @cpu_queues_enable: Whether to enable queues communication vs. the f/w. Used only for testing. 3297 3319 * @pldm: Whether we are running in Palladium environment. Used only for testing. 3298 3320 * @hard_reset_on_fw_events: Whether to do device hard-reset when a fatal event is received from ··· 3390 3412 struct hl_reset_info reset_info; 3391 3413 3392 3414 u32 *stream_master_qid_arr; 3393 - u32 fw_major_version; 3394 - u32 fw_minor_version; 3415 + u32 fw_inner_major_ver; 3416 + u32 fw_inner_minor_ver; 3417 + u32 fw_sw_major_ver; 3418 + u32 fw_sw_minor_ver; 3419 + u32 fw_sw_sub_minor_ver; 3395 3420 atomic64_t dram_used_mem; 3396 3421 u64 memory_scrub_val; 3397 3422 u64 timeout_jiffies; ··· 3432 3451 u8 init_done; 3433 3452 u8 device_cpu_disabled; 3434 3453 u8 in_debug; 3435 - u8 cdev_sysfs_created; 3454 + u8 cdev_sysfs_debugfs_created; 3436 3455 u8 stop_on_err; 3437 3456 u8 supports_sync_stream; 3438 3457 u8 sync_stream_queue_idx; ··· 3455 3474 /* Parameters for bring-up to be upstreamed */ 3456 3475 u64 nic_ports_mask; 3457 3476 u64 fw_components; 3458 - u8 mmu_enable; 3477 + u8 mmu_disable; 3459 3478 u8 cpu_queues_enable; 3460 3479 u8 pldm; 3461 3480 u8 hard_reset_on_fw_events; ··· 3528 3547 hl_ioctl_t *func; 3529 3548 }; 3530 3549 3531 - static inline bool hl_is_fw_ver_below_1_9(struct hl_device *hdev) 3550 + static inline bool hl_is_fw_sw_ver_below(struct hl_device *hdev, u32 fw_sw_major, u32 fw_sw_minor) 3532 3551 { 3533 - return (hdev->fw_major_version < 42); 3552 + if (hdev->fw_sw_major_ver < fw_sw_major) 3553 + return true; 3554 + if (hdev->fw_sw_major_ver > fw_sw_major) 3555 + return false; 3556 + if (hdev->fw_sw_minor_ver < fw_sw_minor) 3557 + return true; 3558 + return false; 3534 3559 } 3535 3560 3536 3561 /* ··· 3800 3813 u64 curr_pte, bool *is_new_hop); 3801 3814 int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops, 3802 3815 struct hl_hr_mmu_funcs *hr_func); 3803 - void hl_mmu_swap_out(struct hl_ctx *ctx); 3804 - void hl_mmu_swap_in(struct hl_ctx *ctx); 3805 3816 int hl_mmu_if_set_funcs(struct hl_device *hdev); 3806 3817 void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu); 3807 3818 void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu); ··· 3857 3872 int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num); 3858 3873 int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid); 3859 3874 int hl_fw_send_device_activity(struct hl_device *hdev, bool open); 3875 + int hl_fw_send_soft_reset(struct hl_device *hdev); 3860 3876 int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3], 3861 3877 bool is_wc[3]); 3862 3878 int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data); ··· 3907 3921 void hl_dec_ctx_fini(struct hl_ctx *ctx); 3908 3922 3909 3923 void hl_release_pending_user_interrupts(struct hl_device *hdev); 3910 - void hl_abort_waitings_for_completion(struct hl_device *hdev); 3924 + void hl_abort_waiting_for_cs_completions(struct hl_device *hdev); 3911 3925 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx, 3912 3926 struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig); 3913 3927 ··· 3944 3958 u64 *event_mask); 3945 3959 void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask); 3946 3960 void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info); 3961 + void hl_enable_err_info_capture(struct hl_error_info *captured_err_info); 3947 3962 3948 3963 #ifdef CONFIG_DEBUG_FS 3949 3964 3950 3965 void hl_debugfs_init(void); 3951 3966 void hl_debugfs_fini(void); 3967 + int hl_debugfs_device_init(struct hl_device *hdev); 3968 + void hl_debugfs_device_fini(struct hl_device *hdev); 3952 3969 void hl_debugfs_add_device(struct hl_device *hdev); 3953 3970 void hl_debugfs_remove_device(struct hl_device *hdev); 3954 3971 void hl_debugfs_add_file(struct hl_fpriv *hpriv);
+2 -7
drivers/accel/habanalabs/common/habanalabs_drv.c
··· 13 13 14 14 #include <linux/pci.h> 15 15 #include <linux/module.h> 16 + #include <linux/vmalloc.h> 16 17 17 18 #define CREATE_TRACE_POINTS 18 19 #include <trace/events/habanalabs.h> ··· 54 53 module_param(boot_error_status_mask, ulong, 0444); 55 54 MODULE_PARM_DESC(boot_error_status_mask, 56 55 "Mask of the error status during device CPU boot (If bitX is cleared then error X is masked. Default all 1's)"); 57 - 58 - #define PCI_VENDOR_ID_HABANALABS 0x1da3 59 56 60 57 #define PCI_IDS_GOYA 0x0001 61 58 #define PCI_IDS_GAUDI 0x1000 ··· 219 220 220 221 hl_debugfs_add_file(hpriv); 221 222 222 - memset(&hdev->captured_err_info, 0, sizeof(hdev->captured_err_info)); 223 - atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1); 224 - hdev->captured_err_info.undef_opcode.write_enable = true; 223 + hl_enable_err_info_capture(&hdev->captured_err_info); 225 224 226 225 hdev->open_counter++; 227 226 hdev->last_successful_open_jif = jiffies; ··· 304 307 { 305 308 hdev->nic_ports_mask = 0; 306 309 hdev->fw_components = FW_TYPE_ALL_TYPES; 307 - hdev->mmu_enable = MMU_EN_ALL; 308 310 hdev->cpu_queues_enable = 1; 309 311 hdev->pldm = 0; 310 312 hdev->hard_reset_on_fw_events = 1; ··· 378 382 /* If CPU queues not enabled, no way to do heartbeat */ 379 383 if (!hdev->cpu_queues_enable) 380 384 hdev->heartbeat = 0; 381 - 382 385 fixup_device_params_per_asic(hdev, tmp_timeout); 383 386 384 387 return 0;
+16 -19
drivers/accel/habanalabs/common/habanalabs_ioctl.c
··· 62 62 hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev); 63 63 hw_ip.sram_base_address = prop->sram_user_base_address; 64 64 hw_ip.dram_base_address = 65 - hdev->mmu_enable && prop->dram_supports_virtual_memory ? 65 + prop->dram_supports_virtual_memory ? 66 66 prop->dmmu.start_addr : prop->dram_user_base_address; 67 67 hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF; 68 68 hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask; ··· 71 71 72 72 dram_available_size = prop->dram_size - dram_kmd_size; 73 73 74 - if (hdev->mmu_enable == MMU_EN_ALL) 75 - hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size, 76 - prop->dram_page_size) * prop->dram_page_size; 77 - else 78 - hw_ip.dram_size = dram_available_size; 74 + hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size, prop->dram_page_size) * 75 + prop->dram_page_size; 79 76 80 77 if (hw_ip.dram_size > PAGE_SIZE) 81 78 hw_ip.dram_enabled = 1; ··· 839 842 struct hw_err_info *info; 840 843 int rc; 841 844 842 - if ((!user_buf_size) || (!user_buf)) 845 + if (!user_buf) 843 846 return -EINVAL; 844 - 845 - if (user_buf_size < sizeof(struct hl_info_hw_err_event)) 846 - return -ENOMEM; 847 847 848 848 info = &hdev->captured_err_info.hw_err; 849 849 if (!info->event_info_available) 850 - return -ENOENT; 850 + return 0; 851 + 852 + if (user_buf_size < sizeof(struct hl_info_hw_err_event)) 853 + return -ENOMEM; 851 854 852 855 rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event)); 853 856 return rc ? -EFAULT : 0; ··· 861 864 struct fw_err_info *info; 862 865 int rc; 863 866 864 - if ((!user_buf_size) || (!user_buf)) 867 + if (!user_buf) 865 868 return -EINVAL; 866 - 867 - if (user_buf_size < sizeof(struct hl_info_fw_err_event)) 868 - return -ENOMEM; 869 869 870 870 info = &hdev->captured_err_info.fw_err; 871 871 if (!info->event_info_available) 872 - return -ENOENT; 872 + return 0; 873 + 874 + if (user_buf_size < sizeof(struct hl_info_fw_err_event)) 875 + return -ENOMEM; 873 876 874 877 rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event)); 875 878 return rc ? -EFAULT : 0; ··· 1195 1198 1196 1199 out_err: 1197 1200 if (retcode) 1198 - dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", 1201 + dev_dbg_ratelimited(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", 1199 1202 task_pid_nr(current), cmd, nr); 1200 1203 1201 1204 if (kdata != stack_kdata) ··· 1219 1222 if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) { 1220 1223 ioctl = &hl_ioctls[nr]; 1221 1224 } else { 1222 - dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n", 1225 + dev_dbg_ratelimited(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n", 1223 1226 task_pid_nr(current), nr); 1224 1227 return -ENOTTY; 1225 1228 } ··· 1242 1245 if (nr == _IOC_NR(HL_IOCTL_INFO)) { 1243 1246 ioctl = &hl_ioctls_control[nr]; 1244 1247 } else { 1245 - dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n", 1248 + dev_dbg_ratelimited(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n", 1246 1249 task_pid_nr(current), nr); 1247 1250 return -ENOTTY; 1248 1251 }
+1 -1
drivers/accel/habanalabs/common/irq.c
··· 430 430 cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe); 431 431 if ((hdev->event_queue.check_eqe_index) && 432 432 (((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK) != cur_eqe_index)) { 433 - dev_dbg(hdev->dev, 433 + dev_err(hdev->dev, 434 434 "EQE %#x in queue is ready but index does not match %d!=%d", 435 435 cur_eqe, 436 436 ((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
+2 -102
drivers/accel/habanalabs/common/memory.c
··· 1034 1034 } 1035 1035 } 1036 1036 1037 - static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args, 1038 - u64 *paddr) 1039 - { 1040 - struct hl_device *hdev = ctx->hdev; 1041 - struct hl_vm *vm = &hdev->vm; 1042 - struct hl_vm_phys_pg_pack *phys_pg_pack; 1043 - u32 handle; 1044 - 1045 - handle = lower_32_bits(args->map_device.handle); 1046 - spin_lock(&vm->idr_lock); 1047 - phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); 1048 - if (!phys_pg_pack) { 1049 - spin_unlock(&vm->idr_lock); 1050 - dev_err(hdev->dev, "no match for handle %u\n", handle); 1051 - return -EINVAL; 1052 - } 1053 - 1054 - *paddr = phys_pg_pack->pages[0]; 1055 - 1056 - spin_unlock(&vm->idr_lock); 1057 - 1058 - return 0; 1059 - } 1060 - 1061 1037 /** 1062 1038 * map_device_va() - map the given memory. 1063 1039 * @ctx: pointer to the context structure. ··· 2070 2094 return rc; 2071 2095 } 2072 2096 2073 - static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args) 2074 - { 2075 - struct hl_device *hdev = hpriv->hdev; 2076 - u64 block_handle, device_addr = 0; 2077 - struct hl_ctx *ctx = hpriv->ctx; 2078 - u32 handle = 0, block_size; 2079 - int rc; 2080 - 2081 - switch (args->in.op) { 2082 - case HL_MEM_OP_ALLOC: 2083 - if (args->in.alloc.mem_size == 0) { 2084 - dev_err(hdev->dev, "alloc size must be larger than 0\n"); 2085 - rc = -EINVAL; 2086 - goto out; 2087 - } 2088 - 2089 - /* Force contiguous as there are no real MMU 2090 - * translations to overcome physical memory gaps 2091 - */ 2092 - args->in.flags |= HL_MEM_CONTIGUOUS; 2093 - rc = alloc_device_memory(ctx, &args->in, &handle); 2094 - 2095 - memset(args, 0, sizeof(*args)); 2096 - args->out.handle = (__u64) handle; 2097 - break; 2098 - 2099 - case HL_MEM_OP_FREE: 2100 - rc = free_device_memory(ctx, &args->in); 2101 - break; 2102 - 2103 - case HL_MEM_OP_MAP: 2104 - if (args->in.flags & HL_MEM_USERPTR) { 2105 - dev_err(hdev->dev, "Failed to map host memory when MMU is disabled\n"); 2106 - rc = -EPERM; 2107 - } else { 2108 - rc = get_paddr_from_handle(ctx, &args->in, &device_addr); 2109 - memset(args, 0, sizeof(*args)); 2110 - args->out.device_virt_addr = device_addr; 2111 - } 2112 - 2113 - break; 2114 - 2115 - case HL_MEM_OP_UNMAP: 2116 - rc = 0; 2117 - break; 2118 - 2119 - case HL_MEM_OP_MAP_BLOCK: 2120 - rc = map_block(hdev, args->in.map_block.block_addr, &block_handle, &block_size); 2121 - args->out.block_handle = block_handle; 2122 - args->out.block_size = block_size; 2123 - break; 2124 - 2125 - case HL_MEM_OP_EXPORT_DMABUF_FD: 2126 - dev_err(hdev->dev, "Failed to export dma-buf object when MMU is disabled\n"); 2127 - rc = -EPERM; 2128 - break; 2129 - 2130 - case HL_MEM_OP_TS_ALLOC: 2131 - rc = allocate_timestamps_buffers(hpriv, &args->in, &args->out.handle); 2132 - break; 2133 - default: 2134 - dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); 2135 - rc = -EINVAL; 2136 - break; 2137 - } 2138 - 2139 - out: 2140 - return rc; 2141 - } 2142 - 2143 2097 static void ts_buff_release(struct hl_mmap_mem_buf *buf) 2144 2098 { 2145 2099 struct hl_ts_buff *ts_buff = buf->private; ··· 2187 2281 hdev->status[status]); 2188 2282 return -EBUSY; 2189 2283 } 2190 - 2191 - if (!hdev->mmu_enable) 2192 - return mem_ioctl_no_mmu(hpriv, args); 2193 2284 2194 2285 switch (args->in.op) { 2195 2286 case HL_MEM_OP_ALLOC: ··· 2682 2779 atomic64_set(&ctx->dram_phys_mem, 0); 2683 2780 2684 2781 /* 2685 - * - If MMU is enabled, init the ranges as usual. 2686 - * - If MMU is disabled, in case of host mapping, the returned address 2687 - * is the given one. 2688 2782 * In case of DRAM mapping, the returned address is the physical 2689 2783 * address of the memory related to the given handle. 2690 2784 */ 2691 - if (!ctx->hdev->mmu_enable) 2785 + if (ctx->hdev->mmu_disable) 2692 2786 return 0; 2693 2787 2694 2788 dram_range_start = prop->dmmu.start_addr; ··· 2735 2835 struct hl_mem_in args; 2736 2836 int i; 2737 2837 2738 - if (!hdev->mmu_enable) 2838 + if (hdev->mmu_disable) 2739 2839 return; 2740 2840 2741 2841 hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
+8 -48
drivers/accel/habanalabs/common/mmu/mmu.c
··· 44 44 { 45 45 int rc = -EOPNOTSUPP; 46 46 47 - if (!hdev->mmu_enable) 47 + if (hdev->mmu_disable) 48 48 return 0; 49 49 50 50 mutex_init(&hdev->mmu_lock); ··· 82 82 */ 83 83 void hl_mmu_fini(struct hl_device *hdev) 84 84 { 85 - if (!hdev->mmu_enable) 85 + if (hdev->mmu_disable) 86 86 return; 87 87 88 88 if (hdev->mmu_func[MMU_DR_PGT].fini != NULL) ··· 107 107 struct hl_device *hdev = ctx->hdev; 108 108 int rc = -EOPNOTSUPP; 109 109 110 - if (!hdev->mmu_enable) 110 + if (hdev->mmu_disable) 111 111 return 0; 112 112 113 113 if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) { ··· 145 145 { 146 146 struct hl_device *hdev = ctx->hdev; 147 147 148 - if (!hdev->mmu_enable) 148 + if (hdev->mmu_disable) 149 149 return; 150 150 151 151 if (hdev->mmu_func[MMU_DR_PGT].ctx_fini != NULL) ··· 233 233 u64 real_virt_addr; 234 234 bool is_dram_addr; 235 235 236 - if (!hdev->mmu_enable) 236 + if (hdev->mmu_disable) 237 237 return 0; 238 238 239 239 is_dram_addr = hl_is_dram_va(hdev, virt_addr); ··· 301 301 bool is_dram_addr; 302 302 303 303 304 - if (!hdev->mmu_enable) 304 + if (hdev->mmu_disable) 305 305 return 0; 306 306 307 307 is_dram_addr = hl_is_dram_va(hdev, virt_addr); ··· 472 472 return rc; 473 473 } 474 474 475 - /* 476 - * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out 477 - * 478 - * @ctx: pointer to the context structure 479 - * 480 - */ 481 - void hl_mmu_swap_out(struct hl_ctx *ctx) 482 - { 483 - struct hl_device *hdev = ctx->hdev; 484 - 485 - if (!hdev->mmu_enable) 486 - return; 487 - 488 - if (hdev->mmu_func[MMU_DR_PGT].swap_out != NULL) 489 - hdev->mmu_func[MMU_DR_PGT].swap_out(ctx); 490 - 491 - if (hdev->mmu_func[MMU_HR_PGT].swap_out != NULL) 492 - hdev->mmu_func[MMU_HR_PGT].swap_out(ctx); 493 - } 494 - 495 - /* 496 - * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in 497 - * 498 - * @ctx: pointer to the context structure 499 - * 500 - */ 501 - void hl_mmu_swap_in(struct hl_ctx *ctx) 502 - { 503 - struct hl_device *hdev = ctx->hdev; 504 - 505 - if (!hdev->mmu_enable) 506 - return; 507 - 508 - if (hdev->mmu_func[MMU_DR_PGT].swap_in != NULL) 509 - hdev->mmu_func[MMU_DR_PGT].swap_in(ctx); 510 - 511 - if (hdev->mmu_func[MMU_HR_PGT].swap_in != NULL) 512 - hdev->mmu_func[MMU_HR_PGT].swap_in(ctx); 513 - } 514 - 515 475 static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr, 516 476 struct hl_mmu_hop_info *hops, 517 477 u64 *phys_addr) ··· 554 594 int pgt_residency, rc; 555 595 bool is_dram_addr; 556 596 557 - if (!hdev->mmu_enable) 597 + if (hdev->mmu_disable) 558 598 return -EOPNOTSUPP; 559 599 560 600 prop = &hdev->asic_prop; ··· 585 625 586 626 int hl_mmu_if_set_funcs(struct hl_device *hdev) 587 627 { 588 - if (!hdev->mmu_enable) 628 + if (hdev->mmu_disable) 589 629 return 0; 590 630 591 631 switch (hdev->asic_type) {
+29 -28
drivers/accel/habanalabs/common/security.c
··· 284 284 * @instance_offset: offset between instances 285 285 * @pb_blocks: blocks array 286 286 * @blocks_array_size: blocks array size 287 - * @regs_array: register array 288 - * @regs_array_size: register array size 287 + * @user_regs_array: unsecured register array 288 + * @user_regs_array_size: unsecured register array size 289 289 * @mask: enabled instances mask: 1- enabled, 0- disabled 290 290 */ 291 291 int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores, 292 292 u32 dcore_offset, u32 num_instances, u32 instance_offset, 293 293 const u32 pb_blocks[], u32 blocks_array_size, 294 - const u32 *regs_array, u32 regs_array_size, u64 mask) 294 + const u32 *user_regs_array, u32 user_regs_array_size, u64 mask) 295 295 { 296 296 int i, j; 297 297 struct hl_block_glbl_sec *glbl_sec; ··· 303 303 return -ENOMEM; 304 304 305 305 hl_secure_block(hdev, glbl_sec, blocks_array_size); 306 - hl_unsecure_registers(hdev, regs_array, regs_array_size, 0, pb_blocks, 307 - glbl_sec, blocks_array_size); 306 + hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size, 0, 307 + pb_blocks, glbl_sec, blocks_array_size); 308 308 309 309 /* Fill all blocks with the same configuration */ 310 310 for (i = 0 ; i < num_dcores ; i++) { ··· 336 336 * @instance_offset: offset between instances 337 337 * @pb_blocks: blocks array 338 338 * @blocks_array_size: blocks array size 339 - * @regs_array: register array 340 - * @regs_array_size: register array size 339 + * @user_regs_array: unsecured register array 340 + * @user_regs_array_size: unsecured register array size 341 341 * 342 342 */ 343 343 int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset, 344 344 u32 num_instances, u32 instance_offset, 345 345 const u32 pb_blocks[], u32 blocks_array_size, 346 - const u32 *regs_array, u32 regs_array_size) 346 + const u32 *user_regs_array, u32 user_regs_array_size) 347 347 { 348 348 return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset, 349 349 num_instances, instance_offset, pb_blocks, 350 - blocks_array_size, regs_array, regs_array_size, 351 - ULLONG_MAX); 350 + blocks_array_size, user_regs_array, 351 + user_regs_array_size, ULLONG_MAX); 352 352 } 353 353 354 354 /** ··· 364 364 * @instance_offset: offset between instances 365 365 * @pb_blocks: blocks array 366 366 * @blocks_array_size: blocks array size 367 - * @regs_range_array: register range array 368 - * @regs_range_array_size: register range array size 367 + * @user_regs_range_array: unsecured register range array 368 + * @user_regs_range_array_size: unsecured register range array size 369 369 * @mask: enabled instances mask: 1- enabled, 0- disabled 370 370 */ 371 371 int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores, 372 372 u32 dcore_offset, u32 num_instances, u32 instance_offset, 373 373 const u32 pb_blocks[], u32 blocks_array_size, 374 - const struct range *regs_range_array, u32 regs_range_array_size, 375 - u64 mask) 374 + const struct range *user_regs_range_array, 375 + u32 user_regs_range_array_size, u64 mask) 376 376 { 377 377 int i, j, rc = 0; 378 378 struct hl_block_glbl_sec *glbl_sec; ··· 384 384 return -ENOMEM; 385 385 386 386 hl_secure_block(hdev, glbl_sec, blocks_array_size); 387 - rc = hl_unsecure_registers_range(hdev, regs_range_array, 388 - regs_range_array_size, 0, pb_blocks, glbl_sec, 387 + rc = hl_unsecure_registers_range(hdev, user_regs_range_array, 388 + user_regs_range_array_size, 0, pb_blocks, glbl_sec, 389 389 blocks_array_size); 390 390 if (rc) 391 391 goto free_glbl_sec; ··· 422 422 * @instance_offset: offset between instances 423 423 * @pb_blocks: blocks array 424 424 * @blocks_array_size: blocks array size 425 - * @regs_range_array: register range array 426 - * @regs_range_array_size: register range array size 425 + * @user_regs_range_array: unsecured register range array 426 + * @user_regs_range_array_size: unsecured register range array size 427 427 * 428 428 */ 429 429 int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores, 430 430 u32 dcore_offset, u32 num_instances, u32 instance_offset, 431 431 const u32 pb_blocks[], u32 blocks_array_size, 432 - const struct range *regs_range_array, u32 regs_range_array_size) 432 + const struct range *user_regs_range_array, 433 + u32 user_regs_range_array_size) 433 434 { 434 435 return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset, 435 436 num_instances, instance_offset, pb_blocks, 436 - blocks_array_size, regs_range_array, 437 - regs_range_array_size, ULLONG_MAX); 437 + blocks_array_size, user_regs_range_array, 438 + user_regs_range_array_size, ULLONG_MAX); 438 439 } 439 440 440 441 /** ··· 448 447 * @instance_offset: offset between instances 449 448 * @pb_blocks: blocks array 450 449 * @blocks_array_size: blocks array size 451 - * @regs_array: register array 452 - * @regs_array_size: register array size 450 + * @user_regs_array: unsecured register array 451 + * @user_regs_array_size: unsecured register array size 453 452 * 454 453 */ 455 454 int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset, 456 455 u32 num_instances, u32 instance_offset, 457 456 const u32 pb_blocks[], u32 blocks_array_size, 458 - const u32 *regs_array, u32 regs_array_size) 457 + const u32 *user_regs_array, u32 user_regs_array_size) 459 458 { 460 459 int i, rc = 0; 461 460 struct hl_block_glbl_sec *glbl_sec; ··· 467 466 return -ENOMEM; 468 467 469 468 hl_secure_block(hdev, glbl_sec, blocks_array_size); 470 - rc = hl_unsecure_registers(hdev, regs_array, regs_array_size, 0, 471 - pb_blocks, glbl_sec, blocks_array_size); 469 + rc = hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size, 470 + 0, pb_blocks, glbl_sec, blocks_array_size); 472 471 if (rc) 473 472 goto free_glbl_sec; 474 473 ··· 496 495 * @instance_offset: offset between instances 497 496 * @pb_blocks: blocks array 498 497 * @blocks_array_size: blocks array size 499 - * @regs_range_array: register range array 500 - * @regs_range_array_size: register range array size 498 + * @user_regs_range_array: unsecured register range array 499 + * @user_regs_range_array_size: unsecured register range array size 501 500 * 502 501 */ 503 502 int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+1 -12
drivers/accel/habanalabs/gaudi/gaudi.c
··· 114 114 GAUDI_QUEUE_ID_DMA_1_3 115 115 }; 116 116 117 - static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { 118 - "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3", 119 - "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3", 120 - "gaudi cq 5_0", "gaudi cq 5_1", "gaudi cq 5_2", "gaudi cq 5_3", 121 - "gaudi cpu eq" 122 - }; 123 - 124 117 static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = { 125 118 [GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0, 126 119 [GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1, ··· 1469 1476 } 1470 1477 1471 1478 /* Allocate internal mapped CB for non patched CBs */ 1472 - cb = hl_cb_kernel_create(hdev, cb_size, 1473 - hdev->mmu_enable && !patched_cb); 1479 + cb = hl_cb_kernel_create(hdev, cb_size, !patched_cb); 1474 1480 if (!cb) { 1475 1481 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1476 1482 atomic64_inc(&cntr->out_of_mem_drop_cnt); ··· 3642 3650 struct gaudi_device *gaudi = hdev->asic_specific; 3643 3651 u64 hop0_addr; 3644 3652 int rc, i; 3645 - 3646 - if (!hdev->mmu_enable) 3647 - return 0; 3648 3653 3649 3654 if (gaudi->hw_cap_initialized & HW_CAP_MMU) 3650 3655 return 0;
+125 -209
drivers/accel/habanalabs/gaudi2/gaudi2.c
··· 57 57 58 58 #define GAUDI2_NA_EVENT_CAUSE 0xFF 59 59 #define GAUDI2_NUM_OF_QM_ERR_CAUSE 18 60 - #define GAUDI2_NUM_OF_QM_LCP_ERR_CAUSE 25 60 + #define GAUDI2_NUM_OF_LOWER_QM_ERR_CAUSE 25 61 61 #define GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE 3 62 62 #define GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE 14 63 63 #define GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE 3 64 64 #define GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE 2 65 65 #define GAUDI2_NUM_OF_ROT_ERR_CAUSE 22 66 - #define GAUDI2_NUM_OF_TPC_INTR_CAUSE 30 66 + #define GAUDI2_NUM_OF_TPC_INTR_CAUSE 31 67 67 #define GAUDI2_NUM_OF_DEC_ERR_CAUSE 25 68 68 #define GAUDI2_NUM_OF_MME_ERR_CAUSE 16 69 69 #define GAUDI2_NUM_OF_MME_SBTE_ERR_CAUSE 5 ··· 161 161 162 162 #define PSOC_RAZWI_ENG_STR_SIZE 128 163 163 #define PSOC_RAZWI_MAX_ENG_PER_RTR 5 164 + 165 + /* HW scrambles only bits 0-25 */ 166 + #define HW_UNSCRAMBLED_BITS_MASK GENMASK_ULL(63, 26) 164 167 165 168 struct gaudi2_razwi_info { 166 169 u32 axuser_xy; ··· 804 801 "PQC L2H error" 805 802 }; 806 803 807 - static const char * const gaudi2_qman_lower_cp_error_cause[GAUDI2_NUM_OF_QM_LCP_ERR_CAUSE] = { 804 + static const char * const gaudi2_lower_qman_error_cause[GAUDI2_NUM_OF_LOWER_QM_ERR_CAUSE] = { 808 805 "RSVD0", 809 806 "CQ AXI HBW error", 810 807 "CP AXI HBW error", ··· 894 891 "invalid_lock_access", 895 892 "LD_L protection violation", 896 893 "ST_L protection violation", 894 + "D$ L0CS mismatch", 897 895 }; 898 896 899 897 static const char * const guadi2_mme_error_cause[GAUDI2_NUM_OF_MME_ERR_CAUSE] = { ··· 3619 3615 3620 3616 prop->supports_compute_reset = true; 3621 3617 3618 + /* Event queue sanity check added in FW version 1.11 */ 3619 + if (hl_is_fw_sw_ver_below(hdev, 1, 11)) 3620 + hdev->event_queue.check_eqe_index = false; 3621 + else 3622 + hdev->event_queue.check_eqe_index = true; 3623 + 3622 3624 hdev->asic_funcs->set_pci_memory_regions(hdev); 3623 3625 3624 3626 rc = gaudi2_special_blocks_iterator_config(hdev); ··· 3640 3630 special_blocks_free: 3641 3631 gaudi2_special_blocks_iterator_free(hdev); 3642 3632 free_scratchpad_mem: 3643 - hl_asic_dma_pool_free(hdev, gaudi2->scratchpad_kernel_address, 3644 - gaudi2->scratchpad_bus_address); 3633 + hl_asic_dma_free_coherent(hdev, PAGE_SIZE, gaudi2->scratchpad_kernel_address, 3634 + gaudi2->scratchpad_bus_address); 3645 3635 free_virt_msix_db_mem: 3646 3636 hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr); 3647 3637 free_cpu_accessible_dma_pool: ··· 4536 4526 reg_base = gaudi2_tpc_cfg_blocks_bases[tpc_id]; 4537 4527 reg_addr = reg_base + TPC_CFG_STALL_OFFSET; 4538 4528 reg_val = FIELD_PREP(DCORE0_TPC0_CFG_TPC_STALL_V_MASK, 4539 - !!(engine_command == HL_ENGINE_STALL)); 4529 + (engine_command == HL_ENGINE_STALL) ? 1 : 0); 4540 4530 WREG32(reg_addr, reg_val); 4541 4531 4542 4532 if (engine_command == HL_ENGINE_RESUME) { ··· 4560 4550 reg_base = gaudi2_mme_ctrl_lo_blocks_bases[mme_id]; 4561 4551 reg_addr = reg_base + MME_CTRL_LO_QM_STALL_OFFSET; 4562 4552 reg_val = FIELD_PREP(DCORE0_MME_CTRL_LO_QM_STALL_V_MASK, 4563 - !!(engine_command == HL_ENGINE_STALL)); 4553 + (engine_command == HL_ENGINE_STALL) ? 1 : 0); 4564 4554 WREG32(reg_addr, reg_val); 4565 4555 4566 4556 return 0; ··· 4581 4571 reg_base = gaudi2_dma_core_blocks_bases[edma_id]; 4582 4572 reg_addr = reg_base + EDMA_CORE_CFG_STALL_OFFSET; 4583 4573 reg_val = FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_HALT_MASK, 4584 - !!(engine_command == HL_ENGINE_STALL)); 4574 + (engine_command == HL_ENGINE_STALL) ? 1 : 0); 4585 4575 WREG32(reg_addr, reg_val); 4586 4576 4587 4577 if (engine_command == HL_ENGINE_STALL) { ··· 6158 6148 u32 poll_timeout_us) 6159 6149 { 6160 6150 struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; 6151 + int rc = 0; 6161 6152 6162 6153 if (!driver_performs_reset) { 6163 - /* set SP to indicate reset request sent to FW */ 6164 - if (dyn_regs->cpu_rst_status) 6165 - WREG32(le32_to_cpu(dyn_regs->cpu_rst_status), CPU_RST_STATUS_NA); 6166 - else 6167 - WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA); 6154 + if (hl_is_fw_sw_ver_below(hdev, 1, 10)) { 6155 + /* set SP to indicate reset request sent to FW */ 6156 + if (dyn_regs->cpu_rst_status) 6157 + WREG32(le32_to_cpu(dyn_regs->cpu_rst_status), CPU_RST_STATUS_NA); 6158 + else 6159 + WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA); 6160 + WREG32(le32_to_cpu(dyn_regs->gic_host_soft_rst_irq), 6161 + gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id); 6168 6162 6169 - WREG32(le32_to_cpu(dyn_regs->gic_host_soft_rst_irq), 6170 - gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id); 6171 - 6172 - return gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us); 6163 + /* wait for f/w response */ 6164 + rc = gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us); 6165 + } else { 6166 + rc = hl_fw_send_soft_reset(hdev); 6167 + } 6168 + return rc; 6173 6169 } 6174 6170 6175 6171 /* Block access to engines, QMANs and SM during reset, these ··· 7247 7231 7248 7232 gaudi2_iterate_tpcs(hdev, &tpc_iter); 7249 7233 7250 - return tpc_idle_data.is_idle; 7234 + return *tpc_idle_data.is_idle; 7251 7235 } 7252 7236 7253 7237 static bool gaudi2_get_decoder_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, ··· 7753 7737 return !!ecc_data->is_critical; 7754 7738 } 7755 7739 7756 - /* 7757 - * gaudi2_queue_idx_dec - decrement queue index (pi/ci) and handle wrap 7758 - * 7759 - * @idx: the current pi/ci value 7760 - * @q_len: the queue length (power of 2) 7761 - * 7762 - * @return the cyclically decremented index 7763 - */ 7764 - static inline u32 gaudi2_queue_idx_dec(u32 idx, u32 q_len) 7740 + static void print_lower_qman_data_on_err(struct hl_device *hdev, u64 qman_base) 7765 7741 { 7766 - u32 mask = q_len - 1; 7742 + u32 lo, hi, cq_ptr_size, arc_cq_ptr_size; 7743 + u64 cq_ptr, arc_cq_ptr, cp_current_inst; 7767 7744 7768 - /* 7769 - * modular decrement is equivalent to adding (queue_size -1) 7770 - * later we take LSBs to make sure the value is in the 7771 - * range [0, queue_len - 1] 7772 - */ 7773 - return (idx + q_len - 1) & mask; 7774 - } 7745 + lo = RREG32(qman_base + QM_CQ_PTR_LO_4_OFFSET); 7746 + hi = RREG32(qman_base + QM_CQ_PTR_HI_4_OFFSET); 7747 + cq_ptr = ((u64) hi) << 32 | lo; 7748 + cq_ptr_size = RREG32(qman_base + QM_CQ_TSIZE_4_OFFSET); 7775 7749 7776 - /** 7777 - * gaudi2_print_sw_config_stream_data - print SW config stream data 7778 - * 7779 - * @hdev: pointer to the habanalabs device structure 7780 - * @stream: the QMAN's stream 7781 - * @qman_base: base address of QMAN registers block 7782 - */ 7783 - static void gaudi2_print_sw_config_stream_data(struct hl_device *hdev, 7784 - u32 stream, u64 qman_base) 7785 - { 7786 - u64 cq_ptr_lo, cq_ptr_hi, cq_tsize, cq_ptr; 7787 - u32 cq_ptr_lo_off, size; 7750 + lo = RREG32(qman_base + QM_ARC_CQ_PTR_LO_OFFSET); 7751 + hi = RREG32(qman_base + QM_ARC_CQ_PTR_HI_OFFSET); 7752 + arc_cq_ptr = ((u64) hi) << 32 | lo; 7753 + arc_cq_ptr_size = RREG32(qman_base + QM_ARC_CQ_TSIZE_OFFSET); 7788 7754 7789 - cq_ptr_lo_off = mmDCORE0_TPC0_QM_CQ_PTR_LO_1 - mmDCORE0_TPC0_QM_CQ_PTR_LO_0; 7755 + lo = RREG32(qman_base + QM_CP_CURRENT_INST_LO_4_OFFSET); 7756 + hi = RREG32(qman_base + QM_CP_CURRENT_INST_HI_4_OFFSET); 7757 + cp_current_inst = ((u64) hi) << 32 | lo; 7790 7758 7791 - cq_ptr_lo = qman_base + (mmDCORE0_TPC0_QM_CQ_PTR_LO_0 - mmDCORE0_TPC0_QM_BASE) + 7792 - stream * cq_ptr_lo_off; 7793 - 7794 - cq_ptr_hi = cq_ptr_lo + (mmDCORE0_TPC0_QM_CQ_PTR_HI_0 - mmDCORE0_TPC0_QM_CQ_PTR_LO_0); 7795 - 7796 - cq_tsize = cq_ptr_lo + (mmDCORE0_TPC0_QM_CQ_TSIZE_0 - mmDCORE0_TPC0_QM_CQ_PTR_LO_0); 7797 - 7798 - cq_ptr = (((u64) RREG32(cq_ptr_hi)) << 32) | RREG32(cq_ptr_lo); 7799 - size = RREG32(cq_tsize); 7800 - dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %x\n", 7801 - stream, cq_ptr, size); 7802 - } 7803 - 7804 - /** 7805 - * gaudi2_print_last_pqes_on_err - print last PQEs on error 7806 - * 7807 - * @hdev: pointer to the habanalabs device structure 7808 - * @qid_base: first QID of the QMAN (out of 4 streams) 7809 - * @stream: the QMAN's stream 7810 - * @qman_base: base address of QMAN registers block 7811 - * @pr_sw_conf: if true print the SW config stream data (CQ PTR and SIZE) 7812 - */ 7813 - static void gaudi2_print_last_pqes_on_err(struct hl_device *hdev, u32 qid_base, u32 stream, 7814 - u64 qman_base, bool pr_sw_conf) 7815 - { 7816 - u32 ci, qm_ci_stream_off; 7817 - struct hl_hw_queue *q; 7818 - u64 pq_ci; 7819 - int i; 7820 - 7821 - q = &hdev->kernel_queues[qid_base + stream]; 7822 - 7823 - qm_ci_stream_off = mmDCORE0_TPC0_QM_PQ_CI_1 - mmDCORE0_TPC0_QM_PQ_CI_0; 7824 - pq_ci = qman_base + (mmDCORE0_TPC0_QM_PQ_CI_0 - mmDCORE0_TPC0_QM_BASE) + 7825 - stream * qm_ci_stream_off; 7826 - 7827 - hdev->asic_funcs->hw_queues_lock(hdev); 7828 - 7829 - if (pr_sw_conf) 7830 - gaudi2_print_sw_config_stream_data(hdev, stream, qman_base); 7831 - 7832 - ci = RREG32(pq_ci); 7833 - 7834 - /* we should start printing form ci -1 */ 7835 - ci = gaudi2_queue_idx_dec(ci, HL_QUEUE_LENGTH); 7836 - 7837 - for (i = 0; i < PQ_FETCHER_CACHE_SIZE; i++) { 7838 - struct hl_bd *bd; 7839 - u64 addr; 7840 - u32 len; 7841 - 7842 - bd = q->kernel_address; 7843 - bd += ci; 7844 - 7845 - len = le32_to_cpu(bd->len); 7846 - /* len 0 means uninitialized entry- break */ 7847 - if (!len) 7848 - break; 7849 - 7850 - addr = le64_to_cpu(bd->ptr); 7851 - 7852 - dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %x\n", 7853 - stream, ci, addr, len); 7854 - 7855 - /* get previous ci, wrap if needed */ 7856 - ci = gaudi2_queue_idx_dec(ci, HL_QUEUE_LENGTH); 7857 - } 7858 - 7859 - hdev->asic_funcs->hw_queues_unlock(hdev); 7860 - } 7861 - 7862 - /** 7863 - * print_qman_data_on_err - extract QMAN data on error 7864 - * 7865 - * @hdev: pointer to the habanalabs device structure 7866 - * @qid_base: first QID of the QMAN (out of 4 streams) 7867 - * @stream: the QMAN's stream 7868 - * @qman_base: base address of QMAN registers block 7869 - * 7870 - * This function attempt to extract as much data as possible on QMAN error. 7871 - * On upper CP print the SW config stream data and last 8 PQEs. 7872 - * On lower CP print SW config data and last PQEs of ALL 4 upper CPs 7873 - */ 7874 - static void print_qman_data_on_err(struct hl_device *hdev, u32 qid_base, u32 stream, u64 qman_base) 7875 - { 7876 - u32 i; 7877 - 7878 - if (stream != QMAN_STREAMS) { 7879 - gaudi2_print_last_pqes_on_err(hdev, qid_base, stream, qman_base, true); 7880 - return; 7881 - } 7882 - 7883 - gaudi2_print_sw_config_stream_data(hdev, stream, qman_base); 7884 - 7885 - for (i = 0 ; i < QMAN_STREAMS ; i++) 7886 - gaudi2_print_last_pqes_on_err(hdev, qid_base, i, qman_base, false); 7759 + dev_info(hdev->dev, 7760 + "LowerQM. CQ: {ptr %#llx, size %u}, ARC_CQ: {ptr %#llx, size %u}, CP: {instruction %#llx}\n", 7761 + cq_ptr, cq_ptr_size, arc_cq_ptr, arc_cq_ptr_size, cp_current_inst); 7887 7762 } 7888 7763 7889 7764 static int gaudi2_handle_qman_err_generic(struct hl_device *hdev, u16 event_type, ··· 7795 7888 continue; 7796 7889 7797 7890 if (i == QMAN_STREAMS) { 7798 - snprintf(reg_desc, ARRAY_SIZE(reg_desc), "LowerCP"); 7799 - num_error_causes = GAUDI2_NUM_OF_QM_LCP_ERR_CAUSE; 7891 + snprintf(reg_desc, ARRAY_SIZE(reg_desc), "LowerQM"); 7892 + num_error_causes = GAUDI2_NUM_OF_LOWER_QM_ERR_CAUSE; 7800 7893 } else { 7801 7894 snprintf(reg_desc, ARRAY_SIZE(reg_desc), "stream%u", i); 7802 7895 num_error_causes = GAUDI2_NUM_OF_QM_ERR_CAUSE; ··· 7807 7900 gaudi2_print_event(hdev, event_type, true, 7808 7901 "%s. err cause: %s", reg_desc, 7809 7902 i == QMAN_STREAMS ? 7810 - gaudi2_qman_lower_cp_error_cause[j] : 7903 + gaudi2_lower_qman_error_cause[j] : 7811 7904 gaudi2_qman_error_cause[j]); 7812 7905 error_count++; 7813 7906 } 7814 7907 7815 - print_qman_data_on_err(hdev, qid_base, i, qman_base); 7908 + if (i == QMAN_STREAMS) 7909 + print_lower_qman_data_on_err(hdev, qman_base); 7816 7910 } 7817 7911 7818 7912 arb_err_val = RREG32(arb_err_addr); ··· 7941 8033 u8 module_sub_idx, u64 *event_mask) 7942 8034 { 7943 8035 bool via_sft = false; 7944 - u32 hbw_rtr_id, lbw_rtr_id, dcore_id, dcore_rtr_id, eng_id; 8036 + u32 hbw_rtr_id, lbw_rtr_id, dcore_id, dcore_rtr_id, eng_id, binned_idx; 7945 8037 u64 hbw_rtr_mstr_if_base_addr, lbw_rtr_mstr_if_base_addr; 7946 8038 u32 hbw_shrd_aw = 0, hbw_shrd_ar = 0; 7947 8039 u32 lbw_shrd_aw = 0, lbw_shrd_ar = 0; ··· 7949 8041 7950 8042 switch (module) { 7951 8043 case RAZWI_TPC: 8044 + sprintf(initiator_name, "TPC_%u", module_idx); 8045 + if (hdev->tpc_binning) { 8046 + binned_idx = __ffs(hdev->tpc_binning); 8047 + if (binned_idx == module_idx) 8048 + module_idx = TPC_ID_DCORE0_TPC6; 8049 + } 8050 + 7952 8051 hbw_rtr_id = gaudi2_tpc_initiator_hbw_rtr_id[module_idx]; 7953 8052 7954 - if (hl_is_fw_ver_below_1_9(hdev) && 8053 + if (hl_is_fw_sw_ver_below(hdev, 1, 9) && 7955 8054 !hdev->asic_prop.fw_security_enabled && 7956 8055 ((module_idx == 0) || (module_idx == 1))) 7957 8056 lbw_rtr_id = DCORE0_RTR0; 7958 8057 else 7959 8058 lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx]; 7960 - sprintf(initiator_name, "TPC_%u", module_idx); 7961 8059 break; 7962 8060 case RAZWI_MME: 7963 8061 sprintf(initiator_name, "MME_%u", module_idx); ··· 8022 8108 sprintf(initiator_name, "NIC_%u", module_idx); 8023 8109 break; 8024 8110 case RAZWI_DEC: 8111 + sprintf(initiator_name, "DEC_%u", module_idx); 8112 + if (hdev->decoder_binning) { 8113 + binned_idx = __ffs(hdev->decoder_binning); 8114 + if (binned_idx == module_idx) 8115 + module_idx = DEC_ID_PCIE_VDEC1; 8116 + } 8025 8117 hbw_rtr_id = gaudi2_dec_initiator_hbw_rtr_id[module_idx]; 8026 8118 lbw_rtr_id = gaudi2_dec_initiator_lbw_rtr_id[module_idx]; 8027 - sprintf(initiator_name, "DEC_%u", module_idx); 8028 8119 break; 8029 8120 case RAZWI_ROT: 8030 8121 hbw_rtr_id = gaudi2_rot_initiator_hbw_rtr_id[module_idx]; ··· 8170 8251 u16 num_of_eng, eng_id[PSOC_RAZWI_MAX_ENG_PER_RTR]; 8171 8252 char eng_name_str[PSOC_RAZWI_ENG_STR_SIZE]; 8172 8253 bool razwi_happened = false; 8254 + u64 addr; 8173 8255 int i; 8174 8256 8175 8257 num_of_eng = gaudi2_psoc_razwi_get_engines(common_razwi_info, ARRAY_SIZE(common_razwi_info), ··· 8189 8269 if (RREG32(base[i] + DEC_RAZWI_HBW_AW_SET)) { 8190 8270 addr_hi = RREG32(base[i] + DEC_RAZWI_HBW_AW_ADDR_HI); 8191 8271 addr_lo = RREG32(base[i] + DEC_RAZWI_HBW_AW_ADDR_LO); 8192 - dev_err(hdev->dev, 8272 + addr = ((u64)addr_hi << 32) + addr_lo; 8273 + if (addr) { 8274 + dev_err(hdev->dev, 8193 8275 "PSOC HBW AW RAZWI: %s, address (aligned to 128 byte): 0x%llX\n", 8194 - eng_name_str, ((u64)addr_hi << 32) + addr_lo); 8195 - hl_handle_razwi(hdev, ((u64)addr_hi << 32) + addr_lo, &eng_id[0], 8276 + eng_name_str, addr); 8277 + hl_handle_razwi(hdev, addr, &eng_id[0], 8196 8278 num_of_eng, HL_RAZWI_HBW | HL_RAZWI_WRITE, event_mask); 8197 - razwi_happened = true; 8279 + razwi_happened = true; 8280 + } 8198 8281 } 8199 8282 8200 8283 if (RREG32(base[i] + DEC_RAZWI_HBW_AR_SET)) { 8201 8284 addr_hi = RREG32(base[i] + DEC_RAZWI_HBW_AR_ADDR_HI); 8202 8285 addr_lo = RREG32(base[i] + DEC_RAZWI_HBW_AR_ADDR_LO); 8203 - dev_err(hdev->dev, 8286 + addr = ((u64)addr_hi << 32) + addr_lo; 8287 + if (addr) { 8288 + dev_err(hdev->dev, 8204 8289 "PSOC HBW AR RAZWI: %s, address (aligned to 128 byte): 0x%llX\n", 8205 - eng_name_str, ((u64)addr_hi << 32) + addr_lo); 8206 - hl_handle_razwi(hdev, ((u64)addr_hi << 32) + addr_lo, &eng_id[0], 8290 + eng_name_str, addr); 8291 + hl_handle_razwi(hdev, addr, &eng_id[0], 8207 8292 num_of_eng, HL_RAZWI_HBW | HL_RAZWI_READ, event_mask); 8208 - razwi_happened = true; 8293 + razwi_happened = true; 8294 + } 8209 8295 } 8210 8296 8211 8297 if (RREG32(base[i] + DEC_RAZWI_LBW_AW_SET)) { 8212 8298 addr_lo = RREG32(base[i] + DEC_RAZWI_LBW_AW_ADDR); 8213 - dev_err(hdev->dev, 8299 + if (addr_lo) { 8300 + dev_err(hdev->dev, 8214 8301 "PSOC LBW AW RAZWI: %s, address (aligned to 128 byte): 0x%X\n", 8215 8302 eng_name_str, addr_lo); 8216 - hl_handle_razwi(hdev, addr_lo, &eng_id[0], 8303 + hl_handle_razwi(hdev, addr_lo, &eng_id[0], 8217 8304 num_of_eng, HL_RAZWI_LBW | HL_RAZWI_WRITE, event_mask); 8218 - razwi_happened = true; 8305 + razwi_happened = true; 8306 + } 8219 8307 } 8220 8308 8221 8309 if (RREG32(base[i] + DEC_RAZWI_LBW_AR_SET)) { 8222 8310 addr_lo = RREG32(base[i] + DEC_RAZWI_LBW_AR_ADDR); 8223 - dev_err(hdev->dev, 8224 - "PSOC LBW AR RAZWI: %s, address (aligned to 128 byte): 0x%X\n", 8225 - eng_name_str, addr_lo); 8226 - hl_handle_razwi(hdev, addr_lo, &eng_id[0], 8311 + if (addr_lo) { 8312 + dev_err(hdev->dev, 8313 + "PSOC LBW AR RAZWI: %s, address (aligned to 128 byte): 0x%X\n", 8314 + eng_name_str, addr_lo); 8315 + hl_handle_razwi(hdev, addr_lo, &eng_id[0], 8227 8316 num_of_eng, HL_RAZWI_LBW | HL_RAZWI_READ, event_mask); 8228 - razwi_happened = true; 8317 + razwi_happened = true; 8318 + } 8229 8319 } 8230 8320 /* In common case the loop will break, when there is only one engine id, or 8231 8321 * several engines with the same router. The exceptional case is with psoc razwi ··· 8719 8789 return error_count; 8720 8790 } 8721 8791 8722 - static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type, int sts_addr) 8792 + static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type, u64 intr_cause) 8723 8793 { 8724 - u32 error_count = 0, sts_val = RREG32(sts_addr); 8794 + u32 error_count = 0; 8725 8795 int i; 8726 8796 8727 8797 for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++) 8728 - if (sts_val & BIT(i)) { 8798 + if (intr_cause & BIT(i)) { 8729 8799 gaudi2_print_event(hdev, event_type, true, 8730 8800 "err cause: %s", gaudi2_dma_core_interrupts_cause[i]); 8731 8801 error_count++; ··· 8734 8804 hl_check_for_glbl_errors(hdev); 8735 8805 8736 8806 return error_count; 8737 - } 8738 - 8739 - static int gaudi2_handle_pdma_core_event(struct hl_device *hdev, u16 event_type, int pdma_idx) 8740 - { 8741 - u32 sts_addr; 8742 - 8743 - sts_addr = mmPDMA0_CORE_ERR_CAUSE + pdma_idx * PDMA_OFFSET; 8744 - return gaudi2_handle_dma_core_event(hdev, event_type, sts_addr); 8745 - } 8746 - 8747 - static int gaudi2_handle_edma_core_event(struct hl_device *hdev, u16 event_type, int edma_idx) 8748 - { 8749 - static const int edma_event_index_map[] = {2, 3, 0, 1, 6, 7, 4, 5}; 8750 - u32 sts_addr, index; 8751 - 8752 - index = edma_event_index_map[edma_idx]; 8753 - 8754 - sts_addr = mmDCORE0_EDMA0_CORE_ERR_CAUSE + 8755 - DCORE_OFFSET * (index / NUM_OF_EDMA_PER_DCORE) + 8756 - DCORE_EDMA_OFFSET * (index % NUM_OF_EDMA_PER_DCORE); 8757 - return gaudi2_handle_dma_core_event(hdev, event_type, sts_addr); 8758 8807 } 8759 8808 8760 8809 static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev, u64 *event_mask) ··· 8775 8866 u32 error_count = 0; 8776 8867 int i; 8777 8868 8869 + gaudi2_print_event(hdev, event_type, true, 8870 + "intr_cause_data: %#llx", intr_cause_data); 8871 + 8778 8872 for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) { 8779 8873 if (!(intr_cause_data & BIT_ULL(i))) 8780 8874 continue; ··· 8786 8874 "err cause: %s", gaudi2_pcie_addr_dec_error_cause[i]); 8787 8875 error_count++; 8788 8876 8789 - switch (intr_cause_data & BIT_ULL(i)) { 8790 - case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK: 8791 - hl_check_for_glbl_errors(hdev); 8792 - break; 8793 - case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK: 8794 - gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask); 8795 - break; 8796 - } 8877 + /* 8878 + * Always check for LBW and HBW additional info as the indication itself is 8879 + * sometimes missing 8880 + */ 8797 8881 } 8882 + 8883 + hl_check_for_glbl_errors(hdev); 8884 + gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask); 8798 8885 8799 8886 return error_count; 8800 8887 } ··· 8848 8937 addr <<= 32; 8849 8938 addr |= RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA)); 8850 8939 8851 - if (!is_pmmu) 8852 - addr = gaudi2_mmu_descramble_addr(hdev, addr); 8940 + if (is_pmmu) { 8941 + dev_err_ratelimited(hdev->dev, "PMMU page fault on va 0x%llx\n", addr); 8942 + } else { 8853 8943 8854 - dev_err_ratelimited(hdev->dev, "%s page fault on va 0x%llx\n", 8855 - is_pmmu ? "PMMU" : "HMMU", addr); 8944 + addr = gaudi2_mmu_descramble_addr(hdev, addr); 8945 + addr &= HW_UNSCRAMBLED_BITS_MASK; 8946 + dev_err_ratelimited(hdev->dev, "HMMU page fault on va range 0x%llx - 0x%llx\n", 8947 + addr, addr + ~HW_UNSCRAMBLED_BITS_MASK); 8948 + } 8949 + 8856 8950 hl_handle_page_fault(hdev, addr, 0, is_pmmu, event_mask); 8857 8951 8858 8952 WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID), 0); ··· 9625 9709 case GAUDI2_EVENT_KDMA_CH0_AXI_ERR_RSP: 9626 9710 case GAUDI2_EVENT_KDMA0_CORE: 9627 9711 error_count = gaudi2_handle_kdma_core_event(hdev, event_type, 9628 - le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); 9712 + le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); 9629 9713 event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; 9630 9714 break; 9631 9715 9632 9716 case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_HDMA5_CORE: 9633 - index = event_type - GAUDI2_EVENT_HDMA2_CORE; 9634 - error_count = gaudi2_handle_edma_core_event(hdev, event_type, index); 9717 + error_count = gaudi2_handle_dma_core_event(hdev, event_type, 9718 + le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); 9635 9719 event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; 9636 9720 break; 9637 9721 9638 9722 case GAUDI2_EVENT_PDMA0_CORE ... GAUDI2_EVENT_PDMA1_CORE: 9639 - index = event_type - GAUDI2_EVENT_PDMA0_CORE; 9640 - error_count = gaudi2_handle_pdma_core_event(hdev, event_type, index); 9723 + error_count = gaudi2_handle_dma_core_event(hdev, event_type, 9724 + le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); 9641 9725 event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; 9642 9726 break; 9643 9727
+1 -1
drivers/accel/habanalabs/gaudi2/gaudi2P.h
··· 98 98 #define GAUDI2_DEFAULT_CARD_NAME "HL225" 99 99 100 100 #define QMAN_STREAMS 4 101 - #define PQ_FETCHER_CACHE_SIZE 8 101 + 102 102 #define NUM_OF_MME_SBTE_PORTS 5 103 103 #define NUM_OF_MME_WB_PORTS 2 104 104
+6 -9
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
··· 479 479 mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS, 480 480 mmDCORE0_EDMA0_CORE_CTX_IDX, 481 481 mmDCORE0_EDMA0_CORE_CTX_IDX_INC, 482 + mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG, 482 483 mmDCORE0_EDMA0_QM_CQ_CFG0_0, 483 484 mmDCORE0_EDMA0_QM_CQ_CFG0_1, 484 485 mmDCORE0_EDMA0_QM_CQ_CFG0_2, ··· 1534 1533 mmDCORE0_TPC0_CFG_QM_KERNEL_CONFIG, 1535 1534 mmDCORE0_TPC0_CFG_QM_KERNEL_ID, 1536 1535 mmDCORE0_TPC0_CFG_QM_POWER_LOOP, 1536 + mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_0, 1537 + mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_1, 1538 + mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_2, 1539 + mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_3, 1537 1540 mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_LO, 1538 1541 mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_HI, 1539 1542 mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_LO, ··· 1546 1541 mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_HI, 1547 1542 mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_LO, 1548 1543 mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_HI, 1544 + mmDCORE0_TPC0_CFG_FP8_143_BIAS, 1549 1545 mmDCORE0_TPC0_CFG_ROUND_CSR, 1550 1546 mmDCORE0_TPC0_CFG_CONV_ROUND_CSR, 1551 1547 mmDCORE0_TPC0_CFG_SEMAPHORE, ··· 3447 3441 gaudi2_pb_thermal_sensor0, 3448 3442 ARRAY_SIZE(gaudi2_pb_thermal_sensor0), NULL, HL_PB_NA); 3449 3443 } 3450 - 3451 - /* HBM */ 3452 - /* Temporarily skip until SW-63348 is solved 3453 - * instance_offset = mmHBM1_MC0_BASE - mmHBM0_MC0_BASE; 3454 - * rc |= hl_init_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, GAUDI2_HBM_NUM, 3455 - * instance_offset, gaudi2_pb_hbm, 3456 - * ARRAY_SIZE(gaudi2_pb_hbm), NULL, HL_PB_NA, 3457 - * prop->dram_enabled_mask); 3458 - */ 3459 3444 3460 3445 /* Scheduler ARCs */ 3461 3446 instance_offset = mmARC_FARM_ARC1_AUX_BASE - mmARC_FARM_ARC0_AUX_BASE;
-3
drivers/accel/habanalabs/goya/goya.c
··· 2671 2671 u64 hop0_addr; 2672 2672 int rc, i; 2673 2673 2674 - if (!hdev->mmu_enable) 2675 - return 0; 2676 - 2677 2674 if (goya->hw_cap_initialized & HW_CAP_MMU) 2678 2675 return 0; 2679 2676
+2 -7
drivers/accel/habanalabs/goya/goya_coresight.c
··· 371 371 return false; 372 372 } 373 373 374 - if (hdev->mmu_enable) { 375 - range_start = prop->dmmu.start_addr; 376 - range_end = prop->dmmu.end_addr; 377 - } else { 378 - range_start = prop->dram_user_base_address; 379 - range_end = prop->dram_end_address; 380 - } 374 + range_start = prop->dmmu.start_addr; 375 + range_end = prop->dmmu.end_addr; 381 376 382 377 return hl_mem_area_inside_range(addr, size, range_start, range_end); 383 378 }
+13 -9
drivers/accel/habanalabs/include/common/cpucp_if.h
··· 359 359 union { 360 360 __le64 data_placeholder; 361 361 struct hl_eq_ecc_data ecc_data; 362 - struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Gaudi1 HBM */ 362 + struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Obsolete */ 363 363 struct hl_eq_sm_sei_data sm_sei_data; 364 364 struct cpucp_pkt_sync_err pkt_sync_err; 365 365 struct hl_eq_fw_alive fw_alive; ··· 653 653 * which address is passed via the CpuCp packet. In addition, the host's driver 654 654 * passes the max size it allows the CpuCP to write to the structure, to prevent 655 655 * data corruption in case of mismatched driver/FW versions. 656 - * Relevant only to Gaudi. 656 + * Obsolete. 657 657 * 658 658 * CPUCP_PACKET_GENERIC_PASSTHROUGH - 659 659 * Generic opcode for all firmware info that is only passed to host ··· 665 665 * 666 666 * CPUCP_PACKET_REGISTER_INTERRUPTS - 667 667 * Packet to register interrupts indicating LKD is ready to receive events from FW. 668 + * 669 + * CPUCP_PACKET_SOFT_RESET - 670 + * Packet to perform soft-reset. 668 671 */ 669 672 670 673 enum cpucp_packet_id { ··· 734 731 CPUCP_PACKET_RESERVED11, /* not used */ 735 732 CPUCP_PACKET_RESERVED12, /* internal */ 736 733 CPUCP_PACKET_REGISTER_INTERRUPTS, /* internal */ 734 + CPUCP_PACKET_SOFT_RESET, /* internal */ 737 735 CPUCP_PACKET_ID_MAX /* must be last */ 738 736 }; 739 737 ··· 868 864 enum cpucp_led_index { 869 865 CPUCP_LED0_INDEX = 0, 870 866 CPUCP_LED1_INDEX, 871 - CPUCP_LED2_INDEX 867 + CPUCP_LED2_INDEX, 868 + CPUCP_LED_MAX_INDEX = CPUCP_LED2_INDEX 872 869 }; 873 870 874 871 /* 875 872 * enum cpucp_packet_rc - Error return code 876 873 * @cpucp_packet_success -> in case of success. 877 - * @cpucp_packet_invalid -> this is to support Goya and Gaudi platform. 874 + * @cpucp_packet_invalid -> this is to support first generation platforms. 878 875 * @cpucp_packet_fault -> in case of processing error like failing to 879 876 * get device binding or semaphore etc. 880 - * @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported. This is 881 - * supported Greco onwards. 877 + * @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported. 882 878 * @cpucp_packet_invalid_params -> when checking parameter like length of buffer 883 - * or attribute value etc. Supported Greco onwards. 879 + * or attribute value etc. 884 880 * @cpucp_packet_rc_max -> It indicates size of enum so should be at last. 885 881 */ 886 882 enum cpucp_packet_rc { ··· 1365 1361 #define DCORE_MON_REGS_SZ 512 1366 1362 /* 1367 1363 * struct dcore_monitor_regs_data - DCORE monitor regs data. 1368 - * the structure follows sync manager block layout. relevant only to Gaudi. 1364 + * the structure follows sync manager block layout. Obsolete. 1369 1365 * @mon_pay_addrl: array of payload address low bits. 1370 1366 * @mon_pay_addrh: array of payload address high bits. 1371 1367 * @mon_pay_data: array of payload data. ··· 1380 1376 __le32 mon_status[DCORE_MON_REGS_SZ]; 1381 1377 }; 1382 1378 1383 - /* contains SM data for each SYNC_MNGR (relevant only to Gaudi) */ 1379 + /* contains SM data for each SYNC_MNGR (Obsolete) */ 1384 1380 struct cpucp_monitor_dump { 1385 1381 struct dcore_monitor_regs_data sync_mngr_w_s; 1386 1382 struct dcore_monitor_regs_data sync_mngr_e_s;
+7 -34
drivers/accel/habanalabs/include/common/hl_boot_if.h
··· 35 35 CPU_BOOT_ERR_TPM_FAIL = 20, 36 36 CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL = 21, 37 37 CPU_BOOT_ERR_EEPROM_FAIL = 22, 38 + CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL = 23, 38 39 CPU_BOOT_ERR_ENABLED = 31, 39 40 CPU_BOOT_ERR_SCND_EN = 63, 40 41 CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */ ··· 52 51 (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \ 53 52 (1 << CPU_BOOT_ERR_BINNING_FAIL) | \ 54 53 (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \ 54 + (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \ 55 55 (1 << CPU_BOOT_ERR_EEPROM_FAIL)) 56 56 57 57 /* ··· 134 132 * CPU_BOOT_ERR_EEPROM_FAIL Failed reading EEPROM data. Defaults 135 133 * are used. 136 134 * 135 + * CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL Failed scrubbing the Engines/ARCFarm 136 + * memories. Boot disabled until reset. 137 + * 137 138 * CPU_BOOT_ERR0_ENABLED Error registers enabled. 138 139 * This is a main indication that the 139 140 * running FW populates the error ··· 162 157 #define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL) 163 158 #define CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL (1 << CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL) 164 159 #define CPU_BOOT_ERR0_EEPROM_FAIL (1 << CPU_BOOT_ERR_EEPROM_FAIL) 160 + #define CPU_BOOT_ERR0_ENG_ARC_MEM_SCRUB_FAIL (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) 165 161 #define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED) 166 162 #define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED) 167 163 ··· 750 744 }; 751 745 }; 752 746 753 - /** 754 - * HL_MODULES_MAX_NUM is determined by the size of modules_mask in struct 755 - * hl_component_versions 756 - */ 757 - enum hl_modules { 758 - HL_MODULES_BOOT_INFO = 0, 759 - HL_MODULES_EEPROM, 760 - HL_MODULES_FDT, 761 - HL_MODULES_I2C, 762 - HL_MODULES_LZ4, 763 - HL_MODULES_MBEDTLS, 764 - HL_MODULES_MAX_NUM = 16 765 - }; 766 - 767 - /** 768 - * HL_COMPONENTS_MAX_NUM is determined by the size of components_mask in 769 - * struct cpucp_versions 770 - */ 771 - enum hl_components { 772 - HL_COMPONENTS_PID = 0, 773 - HL_COMPONENTS_MGMT, 774 - HL_COMPONENTS_PREBOOT, 775 - HL_COMPONENTS_PPBOOT, 776 - HL_COMPONENTS_ARMCP, 777 - HL_COMPONENTS_CPLD, 778 - HL_COMPONENTS_UBOOT, 779 - HL_COMPONENTS_FUSE, 780 - HL_COMPONENTS_MAX_NUM = 16 781 - }; 782 - 783 747 #define NAME_MAX_LEN 32 /* bytes */ 784 748 struct hl_module_data { 785 749 __u8 name[NAME_MAX_LEN]; ··· 763 787 * @component: version of the component itself. 764 788 * @fw_os: Firmware OS Version. 765 789 * @comp_name: Name of the component. 766 - * @modules_mask: i'th bit (from LSB) is a flag - on if module i in enum 767 - * hl_modules is used. 768 790 * @modules_counter: number of set bits in modules_mask. 769 791 * @reserved: reserved for future use. 770 792 * @modules: versions of the component's modules. Elborated explanation in ··· 774 800 __u8 component[VERSION_MAX_LEN]; 775 801 __u8 fw_os[VERSION_MAX_LEN]; 776 802 __u8 comp_name[NAME_MAX_LEN]; 777 - __le16 modules_mask; 778 803 __u8 modules_counter; 779 - __u8 reserved[1]; 804 + __u8 reserved[3]; 780 805 struct hl_module_data modules[]; 781 806 }; 782 807
+11
drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
··· 242 242 #define QM_FENCE2_OFFSET (mmPDMA0_QM_CP_FENCE2_RDATA_0 - mmPDMA0_QM_BASE) 243 243 #define QM_SEI_STATUS_OFFSET (mmPDMA0_QM_SEI_STATUS - mmPDMA0_QM_BASE) 244 244 245 + #define QM_CQ_PTR_LO_4_OFFSET (mmPDMA0_QM_CQ_PTR_LO_4 - mmPDMA0_QM_BASE) 246 + #define QM_CQ_PTR_HI_4_OFFSET (mmPDMA0_QM_CQ_PTR_HI_4 - mmPDMA0_QM_BASE) 247 + #define QM_CQ_TSIZE_4_OFFSET (mmPDMA0_QM_CQ_TSIZE_4 - mmPDMA0_QM_BASE) 248 + 249 + #define QM_ARC_CQ_PTR_LO_OFFSET (mmPDMA0_QM_ARC_CQ_PTR_LO - mmPDMA0_QM_BASE) 250 + #define QM_ARC_CQ_PTR_HI_OFFSET (mmPDMA0_QM_ARC_CQ_PTR_HI - mmPDMA0_QM_BASE) 251 + #define QM_ARC_CQ_TSIZE_OFFSET (mmPDMA0_QM_ARC_CQ_TSIZE - mmPDMA0_QM_BASE) 252 + 253 + #define QM_CP_CURRENT_INST_LO_4_OFFSET (mmPDMA0_QM_CP_CURRENT_INST_LO_4 - mmPDMA0_QM_BASE) 254 + #define QM_CP_CURRENT_INST_HI_4_OFFSET (mmPDMA0_QM_CP_CURRENT_INST_HI_4 - mmPDMA0_QM_BASE) 255 + 245 256 #define SFT_OFFSET (mmSFT1_HBW_RTR_IF0_RTR_H3_BASE - mmSFT0_HBW_RTR_IF0_RTR_H3_BASE) 246 257 #define SFT_IF_RTR_OFFSET (mmSFT0_HBW_RTR_IF1_RTR_H3_BASE - mmSFT0_HBW_RTR_IF0_RTR_H3_BASE) 247 258
+1 -1
drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h
··· 62 62 u32 fake_security_enable : 1; 63 63 u32 fake_sig_validation_en : 1; 64 64 u32 bist_skip_enable : 1; 65 - u32 bist_need_iatu_config : 1; 65 + u32 reserved1 : 1; 66 66 u32 fake_bis_compliant : 1; 67 67 u32 wd_rst_cause_arm : 1; 68 68 u32 wd_rst_cause_arcpid : 1;
+3 -11
drivers/gpu/drm/Kconfig
··· 95 95 config DRM_KMS_HELPER 96 96 tristate 97 97 depends on DRM 98 + select FB_SYS_HELPERS_DEFERRED if DRM_FBDEV_EMULATION 98 99 help 99 100 CRTC helpers for KMS drivers. 100 101 ··· 133 132 bool "Enable legacy fbdev support for your modesetting driver" 134 133 depends on DRM_KMS_HELPER 135 134 depends on FB=y || FB=DRM_KMS_HELPER 136 - select FB_CFB_FILLRECT 137 - select FB_CFB_COPYAREA 138 - select FB_CFB_IMAGEBLIT 139 - select FB_DEFERRED_IO 140 - select FB_SYS_FOPS 141 - select FB_SYS_FILLRECT 142 - select FB_SYS_COPYAREA 143 - select FB_SYS_IMAGEBLIT 144 135 select FRAMEBUFFER_CONSOLE if !EXPERT 145 136 select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE 146 137 default y ··· 216 223 config DRM_GEM_DMA_HELPER 217 224 tristate 218 225 depends on DRM 226 + select FB_SYS_HELPERS if DRM_FBDEV_EMULATION 219 227 help 220 228 Choose this if you need the GEM DMA helper functions 221 229 ··· 289 295 290 296 source "drivers/gpu/drm/atmel-hlcdc/Kconfig" 291 297 292 - source "drivers/gpu/drm/rcar-du/Kconfig" 293 - 294 - source "drivers/gpu/drm/shmobile/Kconfig" 298 + source "drivers/gpu/drm/renesas/Kconfig" 295 299 296 300 source "drivers/gpu/drm/sun4i/Kconfig" 297 301
+1 -2
drivers/gpu/drm/Makefile
··· 156 156 obj-$(CONFIG_DRM_AST) += ast/ 157 157 obj-$(CONFIG_DRM_ARMADA) += armada/ 158 158 obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc/ 159 - obj-y += rcar-du/ 160 - obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ 159 + obj-y += renesas/ 161 160 obj-y += omapdrm/ 162 161 obj-$(CONFIG_DRM_SUN4I) += sun4i/ 163 162 obj-y += tilcdc/
+4 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2747 2747 .compat_ioctl = amdgpu_kms_compat_ioctl, 2748 2748 #endif 2749 2749 #ifdef CONFIG_PROC_FS 2750 - .show_fdinfo = amdgpu_show_fdinfo 2750 + .show_fdinfo = drm_show_fdinfo, 2751 2751 #endif 2752 2752 }; 2753 2753 ··· 2802 2802 .dumb_map_offset = amdgpu_mode_dumb_mmap, 2803 2803 .fops = &amdgpu_driver_kms_fops, 2804 2804 .release = &amdgpu_driver_release_kms, 2805 + #ifdef CONFIG_PROC_FS 2806 + .show_fdinfo = amdgpu_show_fdinfo, 2807 + #endif 2805 2808 2806 2809 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 2807 2810 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+15 -17
drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
··· 53 53 [AMDGPU_HW_IP_VCN_JPEG] = "jpeg", 54 54 }; 55 55 56 - void amdgpu_show_fdinfo(struct seq_file *m, struct file *f) 56 + void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) 57 57 { 58 - struct drm_file *file = f->private_data; 59 58 struct amdgpu_device *adev = drm_to_adev(file->minor->dev); 60 59 struct amdgpu_fpriv *fpriv = file->driver_priv; 61 60 struct amdgpu_vm *vm = &fpriv->vm; ··· 86 87 * ****************************************************************** 87 88 */ 88 89 89 - seq_printf(m, "pasid:\t%u\n", fpriv->vm.pasid); 90 - seq_printf(m, "drm-driver:\t%s\n", file->minor->dev->driver->name); 91 - seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn); 92 - seq_printf(m, "drm-client-id:\t%Lu\n", vm->immediate.fence_context); 93 - seq_printf(m, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL); 94 - seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL); 95 - seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL); 96 - seq_printf(m, "amd-memory-visible-vram:\t%llu KiB\n", 90 + drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid); 91 + drm_printf(p, "drm-driver:\t%s\n", file->minor->dev->driver->name); 92 + drm_printf(p, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn); 93 + drm_printf(p, "drm-client-id:\t%Lu\n", vm->immediate.fence_context); 94 + drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL); 95 + drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL); 96 + drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL); 97 + drm_printf(p, "amd-memory-visible-vram:\t%llu KiB\n", 97 98 stats.visible_vram/1024UL); 98 - seq_printf(m, "amd-evicted-vram:\t%llu KiB\n", 99 + drm_printf(p, "amd-evicted-vram:\t%llu KiB\n", 99 100 stats.evicted_vram/1024UL); 100 - seq_printf(m, "amd-evicted-visible-vram:\t%llu KiB\n", 101 + drm_printf(p, "amd-evicted-visible-vram:\t%llu KiB\n", 101 102 stats.evicted_visible_vram/1024UL); 102 - seq_printf(m, "amd-requested-vram:\t%llu KiB\n", 103 + drm_printf(p, "amd-requested-vram:\t%llu KiB\n", 103 104 stats.requested_vram/1024UL); 104 - seq_printf(m, "amd-requested-visible-vram:\t%llu KiB\n", 105 + drm_printf(p, "amd-requested-visible-vram:\t%llu KiB\n", 105 106 stats.requested_visible_vram/1024UL); 106 - seq_printf(m, "amd-requested-gtt:\t%llu KiB\n", 107 + drm_printf(p, "amd-requested-gtt:\t%llu KiB\n", 107 108 stats.requested_gtt/1024UL); 108 - 109 109 for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) { 110 110 if (!usage[hw_ip]) 111 111 continue; 112 112 113 - seq_printf(m, "drm-engine-%s:\t%Ld ns\n", amdgpu_ip_name[hw_ip], 113 + drm_printf(p, "drm-engine-%s:\t%Ld ns\n", amdgpu_ip_name[hw_ip], 114 114 ktime_to_ns(usage[hw_ip])); 115 115 } 116 116 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
··· 37 37 #include "amdgpu_ids.h" 38 38 39 39 uint32_t amdgpu_get_ip_count(struct amdgpu_device *adev, int id); 40 - void amdgpu_show_fdinfo(struct seq_file *m, struct file *f); 40 + void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file); 41 41 42 42 #endif
+1
drivers/gpu/drm/armada/Kconfig
··· 3 3 tristate "DRM support for Marvell Armada SoCs" 4 4 depends on DRM && HAVE_CLK && ARM && MMU 5 5 select DRM_KMS_HELPER 6 + select FB_IO_HELPERS if DRM_FBDEV_EMULATION 6 7 help 7 8 Support the "LCD" controllers found on the Marvell Armada 510 8 9 devices. There are two controllers on the device, each controller
+2 -5
drivers/gpu/drm/armada/armada_fbdev.c
··· 5 5 */ 6 6 7 7 #include <linux/errno.h> 8 + #include <linux/fb.h> 8 9 #include <linux/kernel.h> 9 10 #include <linux/module.h> 10 11 ··· 34 33 35 34 static const struct fb_ops armada_fb_ops = { 36 35 .owner = THIS_MODULE, 36 + FB_DEFAULT_IO_OPS, 37 37 DRM_FB_HELPER_DEFAULT_OPS, 38 - .fb_read = drm_fb_helper_cfb_read, 39 - .fb_write = drm_fb_helper_cfb_write, 40 - .fb_fillrect = drm_fb_helper_cfb_fillrect, 41 - .fb_copyarea = drm_fb_helper_cfb_copyarea, 42 - .fb_imageblit = drm_fb_helper_cfb_imageblit, 43 38 .fb_destroy = armada_fbdev_fb_destroy, 44 39 }; 45 40
+1
drivers/gpu/drm/bridge/Kconfig
··· 227 227 select DRM_KMS_HELPER 228 228 select DRM_MIPI_DSI 229 229 select DRM_PANEL_BRIDGE 230 + select GENERIC_PHY_MIPI_DPHY 230 231 help 231 232 The Samsung MIPI DSIM bridge controller driver. 232 233 This MIPI DSIM bridge can be found it on Exynos SoCs and
+1 -1
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
··· 1393 1393 .of_match_table = adv7511_of_ids, 1394 1394 }, 1395 1395 .id_table = adv7511_i2c_ids, 1396 - .probe_new = adv7511_probe, 1396 + .probe = adv7511_probe, 1397 1397 .remove = adv7511_remove, 1398 1398 }; 1399 1399
+1 -1
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
··· 815 815 .name = "anx6345", 816 816 .of_match_table = of_match_ptr(anx6345_match_table), 817 817 }, 818 - .probe_new = anx6345_i2c_probe, 818 + .probe = anx6345_i2c_probe, 819 819 .remove = anx6345_i2c_remove, 820 820 .id_table = anx6345_id, 821 821 };
+1 -1
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
··· 1389 1389 .name = "anx7814", 1390 1390 .of_match_table = of_match_ptr(anx78xx_match_table), 1391 1391 }, 1392 - .probe_new = anx78xx_i2c_probe, 1392 + .probe = anx78xx_i2c_probe, 1393 1393 .remove = anx78xx_i2c_remove, 1394 1394 .id_table = anx78xx_id, 1395 1395 };
+1 -1
drivers/gpu/drm/bridge/analogix/anx7625.c
··· 2800 2800 .of_match_table = anx_match_table, 2801 2801 .pm = &anx7625_pm_ops, 2802 2802 }, 2803 - .probe_new = anx7625_i2c_probe, 2803 + .probe = anx7625_i2c_probe, 2804 2804 .remove = anx7625_i2c_remove, 2805 2805 2806 2806 .id_table = anx7625_id,
+1 -1
drivers/gpu/drm/bridge/chipone-icn6211.c
··· 795 795 MODULE_DEVICE_TABLE(i2c, chipone_i2c_id); 796 796 797 797 static struct i2c_driver chipone_i2c_driver = { 798 - .probe_new = chipone_i2c_probe, 798 + .probe = chipone_i2c_probe, 799 799 .id_table = chipone_i2c_id, 800 800 .driver = { 801 801 .name = "chipone-icn6211-i2c",
+1 -1
drivers/gpu/drm/bridge/chrontel-ch7033.c
··· 603 603 MODULE_DEVICE_TABLE(i2c, ch7033_ids); 604 604 605 605 static struct i2c_driver ch7033_driver = { 606 - .probe_new = ch7033_probe, 606 + .probe = ch7033_probe, 607 607 .remove = ch7033_remove, 608 608 .driver = { 609 609 .name = "ch7033",
+1 -1
drivers/gpu/drm/bridge/cros-ec-anx7688.c
··· 173 173 MODULE_DEVICE_TABLE(of, cros_ec_anx7688_bridge_match_table); 174 174 175 175 static struct i2c_driver cros_ec_anx7688_bridge_driver = { 176 - .probe_new = cros_ec_anx7688_bridge_probe, 176 + .probe = cros_ec_anx7688_bridge_probe, 177 177 .remove = cros_ec_anx7688_bridge_remove, 178 178 .driver = { 179 179 .name = "cros-ec-anx7688-bridge",
+32 -29
drivers/gpu/drm/bridge/display-connector.c
··· 24 24 struct gpio_desc *hpd_gpio; 25 25 int hpd_irq; 26 26 27 - struct regulator *dp_pwr; 27 + struct regulator *supply; 28 28 struct gpio_desc *ddc_en; 29 29 }; 30 30 ··· 191 191 return IRQ_HANDLED; 192 192 } 193 193 194 + static int display_connector_get_supply(struct platform_device *pdev, 195 + struct display_connector *conn, 196 + const char *name) 197 + { 198 + conn->supply = devm_regulator_get_optional(&pdev->dev, name); 199 + 200 + if (conn->supply == ERR_PTR(-ENODEV)) 201 + conn->supply = NULL; 202 + 203 + return PTR_ERR_OR_ZERO(conn->supply); 204 + } 205 + 194 206 static int display_connector_probe(struct platform_device *pdev) 195 207 { 196 208 struct display_connector *conn; ··· 328 316 if (type == DRM_MODE_CONNECTOR_DisplayPort) { 329 317 int ret; 330 318 331 - conn->dp_pwr = devm_regulator_get_optional(&pdev->dev, "dp-pwr"); 332 - 333 - if (IS_ERR(conn->dp_pwr)) { 334 - ret = PTR_ERR(conn->dp_pwr); 335 - 336 - switch (ret) { 337 - case -ENODEV: 338 - conn->dp_pwr = NULL; 339 - break; 340 - 341 - case -EPROBE_DEFER: 342 - return -EPROBE_DEFER; 343 - 344 - default: 345 - dev_err(&pdev->dev, "failed to get DP PWR regulator: %d\n", ret); 346 - return ret; 347 - } 348 - } 349 - 350 - if (conn->dp_pwr) { 351 - ret = regulator_enable(conn->dp_pwr); 352 - if (ret) { 353 - dev_err(&pdev->dev, "failed to enable DP PWR regulator: %d\n", ret); 354 - return ret; 355 - } 356 - } 319 + ret = display_connector_get_supply(pdev, conn, "dp-pwr"); 320 + if (ret < 0) 321 + return dev_err_probe(&pdev->dev, ret, "failed to get DP PWR regulator\n"); 357 322 } 358 323 359 324 /* enable DDC */ 360 325 if (type == DRM_MODE_CONNECTOR_HDMIA) { 326 + int ret; 327 + 361 328 conn->ddc_en = devm_gpiod_get_optional(&pdev->dev, "ddc-en", 362 329 GPIOD_OUT_HIGH); 363 330 364 331 if (IS_ERR(conn->ddc_en)) { 365 332 dev_err(&pdev->dev, "Couldn't get ddc-en gpio\n"); 366 333 return PTR_ERR(conn->ddc_en); 334 + } 335 + 336 + ret = display_connector_get_supply(pdev, conn, "hdmi-pwr"); 337 + if (ret < 0) 338 + return dev_err_probe(&pdev->dev, ret, "failed to get HDMI +5V Power regulator\n"); 339 + } 340 + 341 + if (conn->supply) { 342 + ret = regulator_enable(conn->supply); 343 + if (ret) { 344 + dev_err(&pdev->dev, "failed to enable PWR regulator: %d\n", ret); 345 + return ret; 367 346 } 368 347 } 369 348 ··· 389 386 if (conn->ddc_en) 390 387 gpiod_set_value(conn->ddc_en, 0); 391 388 392 - if (conn->dp_pwr) 393 - regulator_disable(conn->dp_pwr); 389 + if (conn->supply) 390 + regulator_disable(conn->supply); 394 391 395 392 drm_bridge_remove(&conn->bridge); 396 393
+5
drivers/gpu/drm/bridge/imx/Kconfig
··· 1 1 if ARCH_MXC || COMPILE_TEST 2 2 3 + config DRM_IMX_LDB_HELPER 4 + tristate 5 + 3 6 config DRM_IMX8QM_LDB 4 7 tristate "Freescale i.MX8QM LVDS display bridge" 5 8 depends on OF 6 9 depends on COMMON_CLK 10 + select DRM_IMX_LDB_HELPER 7 11 select DRM_KMS_HELPER 8 12 help 9 13 Choose this to enable the internal LVDS Display Bridge(LDB) found in ··· 17 13 tristate "Freescale i.MX8QXP LVDS display bridge" 18 14 depends on OF 19 15 depends on COMMON_CLK 16 + select DRM_IMX_LDB_HELPER 20 17 select DRM_KMS_HELPER 21 18 help 22 19 Choose this to enable the internal LVDS Display Bridge(LDB) found in
+1 -4
drivers/gpu/drm/bridge/imx/Makefile
··· 1 - imx8qm-ldb-objs := imx-ldb-helper.o imx8qm-ldb-drv.o 1 + obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o 2 2 obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o 3 - 4 - imx8qxp-ldb-objs := imx-ldb-helper.o imx8qxp-ldb-drv.o 5 3 obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o 6 - 7 4 obj-$(CONFIG_DRM_IMX8QXP_PIXEL_COMBINER) += imx8qxp-pixel-combiner.o 8 5 obj-$(CONFIG_DRM_IMX8QXP_PIXEL_LINK) += imx8qxp-pixel-link.o 9 6 obj-$(CONFIG_DRM_IMX8QXP_PIXEL_LINK_TO_DPI) += imx8qxp-pxl2dpi.o
+17
drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
··· 4 4 * Copyright 2019,2020,2022 NXP 5 5 */ 6 6 7 + #include <linux/export.h> 7 8 #include <linux/media-bus-format.h> 8 9 #include <linux/mfd/syscon.h> 10 + #include <linux/module.h> 9 11 #include <linux/of.h> 10 12 #include <linux/regmap.h> 11 13 ··· 21 19 { 22 20 return ldb_ch->link_type == LDB_CH_SINGLE_LINK; 23 21 } 22 + EXPORT_SYMBOL_GPL(ldb_channel_is_single_link); 24 23 25 24 bool ldb_channel_is_split_link(struct ldb_channel *ldb_ch) 26 25 { 27 26 return ldb_ch->link_type == LDB_CH_DUAL_LINK_EVEN_ODD_PIXELS || 28 27 ldb_ch->link_type == LDB_CH_DUAL_LINK_ODD_EVEN_PIXELS; 29 28 } 29 + EXPORT_SYMBOL_GPL(ldb_channel_is_split_link); 30 30 31 31 int ldb_bridge_atomic_check_helper(struct drm_bridge *bridge, 32 32 struct drm_bridge_state *bridge_state, ··· 42 38 43 39 return 0; 44 40 } 41 + EXPORT_SYMBOL_GPL(ldb_bridge_atomic_check_helper); 45 42 46 43 void ldb_bridge_mode_set_helper(struct drm_bridge *bridge, 47 44 const struct drm_display_mode *mode, ··· 74 69 break; 75 70 } 76 71 } 72 + EXPORT_SYMBOL_GPL(ldb_bridge_mode_set_helper); 77 73 78 74 void ldb_bridge_enable_helper(struct drm_bridge *bridge) 79 75 { ··· 87 81 */ 88 82 regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ldb_ctrl); 89 83 } 84 + EXPORT_SYMBOL_GPL(ldb_bridge_enable_helper); 90 85 91 86 void ldb_bridge_disable_helper(struct drm_bridge *bridge) 92 87 { ··· 102 95 103 96 regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ldb_ctrl); 104 97 } 98 + EXPORT_SYMBOL_GPL(ldb_bridge_disable_helper); 105 99 106 100 int ldb_bridge_attach_helper(struct drm_bridge *bridge, 107 101 enum drm_bridge_attach_flags flags) ··· 125 117 ldb_ch->next_bridge, bridge, 126 118 DRM_BRIDGE_ATTACH_NO_CONNECTOR); 127 119 } 120 + EXPORT_SYMBOL_GPL(ldb_bridge_attach_helper); 128 121 129 122 int ldb_init_helper(struct ldb *ldb) 130 123 { ··· 166 157 167 158 return 0; 168 159 } 160 + EXPORT_SYMBOL_GPL(ldb_init_helper); 169 161 170 162 int ldb_find_next_bridge_helper(struct ldb *ldb) 171 163 { ··· 194 184 195 185 return 0; 196 186 } 187 + EXPORT_SYMBOL_GPL(ldb_find_next_bridge_helper); 197 188 198 189 void ldb_add_bridge_helper(struct ldb *ldb, 199 190 const struct drm_bridge_funcs *bridge_funcs) ··· 215 204 drm_bridge_add(&ldb_ch->bridge); 216 205 } 217 206 } 207 + EXPORT_SYMBOL_GPL(ldb_add_bridge_helper); 218 208 219 209 void ldb_remove_bridge_helper(struct ldb *ldb) 220 210 { ··· 231 219 drm_bridge_remove(&ldb_ch->bridge); 232 220 } 233 221 } 222 + EXPORT_SYMBOL_GPL(ldb_remove_bridge_helper); 223 + 224 + MODULE_DESCRIPTION("i.MX8 LVDS Display Bridge(LDB)/Pixel Mapper bridge helper"); 225 + MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>"); 226 + MODULE_LICENSE("GPL");
drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+1 -1
drivers/gpu/drm/bridge/ite-it6505.c
··· 3479 3479 .of_match_table = it6505_of_match, 3480 3480 .pm = &it6505_bridge_pm_ops, 3481 3481 }, 3482 - .probe_new = it6505_i2c_probe, 3482 + .probe = it6505_i2c_probe, 3483 3483 .remove = it6505_i2c_remove, 3484 3484 .shutdown = it6505_shutdown, 3485 3485 .id_table = it6505_id,
+1 -1
drivers/gpu/drm/bridge/ite-it66121.c
··· 1640 1640 .name = "it66121", 1641 1641 .of_match_table = it66121_dt_match, 1642 1642 }, 1643 - .probe_new = it66121_probe, 1643 + .probe = it66121_probe, 1644 1644 .remove = it66121_remove, 1645 1645 .id_table = it66121_id, 1646 1646 };
+1 -1
drivers/gpu/drm/bridge/lontium-lt8912b.c
··· 773 773 .name = "lt8912", 774 774 .of_match_table = lt8912_dt_match, 775 775 }, 776 - .probe_new = lt8912_probe, 776 + .probe = lt8912_probe, 777 777 .remove = lt8912_remove, 778 778 .id_table = lt8912_id, 779 779 };
+1 -1
drivers/gpu/drm/bridge/lontium-lt9211.c
··· 787 787 MODULE_DEVICE_TABLE(of, lt9211_match_table); 788 788 789 789 static struct i2c_driver lt9211_driver = { 790 - .probe_new = lt9211_probe, 790 + .probe = lt9211_probe, 791 791 .remove = lt9211_remove, 792 792 .id_table = lt9211_id, 793 793 .driver = {
+1 -1
drivers/gpu/drm/bridge/lontium-lt9611.c
··· 1192 1192 .name = "lt9611", 1193 1193 .of_match_table = lt9611_match_table, 1194 1194 }, 1195 - .probe_new = lt9611_probe, 1195 + .probe = lt9611_probe, 1196 1196 .remove = lt9611_remove, 1197 1197 .id_table = lt9611_id, 1198 1198 };
+1 -1
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
··· 1011 1011 .of_match_table = lt9611uxc_match_table, 1012 1012 .dev_groups = lt9611uxc_attr_groups, 1013 1013 }, 1014 - .probe_new = lt9611uxc_probe, 1014 + .probe = lt9611uxc_probe, 1015 1015 .remove = lt9611uxc_remove, 1016 1016 .id_table = lt9611uxc_id, 1017 1017 };
+2 -2
drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
··· 375 375 376 376 static struct i2c_driver stdp4028_ge_b850v3_fw_driver = { 377 377 .id_table = stdp4028_ge_b850v3_fw_i2c_table, 378 - .probe_new = stdp4028_ge_b850v3_fw_probe, 378 + .probe = stdp4028_ge_b850v3_fw_probe, 379 379 .remove = stdp4028_ge_b850v3_fw_remove, 380 380 .driver = { 381 381 .name = "stdp4028-ge-b850v3-fw", ··· 422 422 423 423 static struct i2c_driver stdp2690_ge_b850v3_fw_driver = { 424 424 .id_table = stdp2690_ge_b850v3_fw_i2c_table, 425 - .probe_new = stdp2690_ge_b850v3_fw_probe, 425 + .probe = stdp2690_ge_b850v3_fw_probe, 426 426 .remove = stdp2690_ge_b850v3_fw_remove, 427 427 .driver = { 428 428 .name = "stdp2690-ge-b850v3-fw",
+1 -1
drivers/gpu/drm/bridge/nxp-ptn3460.c
··· 335 335 336 336 static struct i2c_driver ptn3460_driver = { 337 337 .id_table = ptn3460_i2c_table, 338 - .probe_new = ptn3460_probe, 338 + .probe = ptn3460_probe, 339 339 .remove = ptn3460_remove, 340 340 .driver = { 341 341 .name = "nxp,ptn3460",
+1 -1
drivers/gpu/drm/bridge/parade-ps8622.c
··· 538 538 539 539 static struct i2c_driver ps8622_driver = { 540 540 .id_table = ps8622_i2c_table, 541 - .probe_new = ps8622_probe, 541 + .probe = ps8622_probe, 542 542 .remove = ps8622_remove, 543 543 .driver = { 544 544 .name = "ps8622",
+1 -1
drivers/gpu/drm/bridge/parade-ps8640.c
··· 791 791 MODULE_DEVICE_TABLE(of, ps8640_match); 792 792 793 793 static struct i2c_driver ps8640_driver = { 794 - .probe_new = ps8640_probe, 794 + .probe = ps8640_probe, 795 795 .remove = ps8640_remove, 796 796 .driver = { 797 797 .name = "ps8640",
+141 -25
drivers/gpu/drm/bridge/samsung-dsim.c
··· 220 220 221 221 #define OLD_SCLK_MIPI_CLK_NAME "pll_clk" 222 222 223 + #define PS_TO_CYCLE(ps, hz) DIV64_U64_ROUND_CLOSEST(((ps) * (hz)), 1000000000000ULL) 224 + 223 225 static const char *const clk_names[5] = { 224 226 "bus_clk", 225 227 "sclk_mipi", ··· 409 407 .num_bits_resol = 11, 410 408 .pll_p_offset = 13, 411 409 .reg_values = reg_values, 410 + .m_min = 41, 411 + .m_max = 125, 412 + .min_freq = 500, 412 413 }; 413 414 414 415 static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = { ··· 425 420 .num_bits_resol = 11, 426 421 .pll_p_offset = 13, 427 422 .reg_values = reg_values, 423 + .m_min = 41, 424 + .m_max = 125, 425 + .min_freq = 500, 428 426 }; 429 427 430 428 static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = { ··· 439 431 .num_bits_resol = 11, 440 432 .pll_p_offset = 13, 441 433 .reg_values = reg_values, 434 + .m_min = 41, 435 + .m_max = 125, 436 + .min_freq = 500, 442 437 }; 443 438 444 439 static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = { ··· 454 443 .num_bits_resol = 12, 455 444 .pll_p_offset = 13, 456 445 .reg_values = exynos5433_reg_values, 446 + .m_min = 41, 447 + .m_max = 125, 448 + .min_freq = 500, 457 449 }; 458 450 459 451 static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = { ··· 469 455 .num_bits_resol = 12, 470 456 .pll_p_offset = 13, 471 457 .reg_values = exynos5422_reg_values, 458 + .m_min = 41, 459 + .m_max = 125, 460 + .min_freq = 500, 472 461 }; 473 462 474 463 static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = { ··· 488 471 */ 489 472 .pll_p_offset = 14, 490 473 .reg_values = imx8mm_dsim_reg_values, 474 + .m_min = 64, 475 + .m_max = 1023, 476 + .min_freq = 1050, 491 477 }; 492 478 493 479 static const struct samsung_dsim_driver_data * ··· 569 549 tmp = (u64)fout * (_p << _s); 570 550 do_div(tmp, fin); 571 551 _m = tmp; 572 - if (_m < 41 || _m > 125) 552 + if (_m < driver_data->m_min || _m > driver_data->m_max) 573 553 continue; 574 554 575 555 tmp = (u64)_m * fin; 576 556 do_div(tmp, _p); 577 - if (tmp < 500 * MHZ || 557 + if (tmp < driver_data->min_freq * MHZ || 578 558 tmp > driver_data->max_freq * MHZ) 579 559 continue; 580 560 ··· 660 640 reg = samsung_dsim_read(dsi, DSIM_STATUS_REG); 661 641 } while ((reg & DSIM_PLL_STABLE) == 0); 662 642 643 + dsi->hs_clock = fout; 644 + 663 645 return fout; 664 646 } 665 647 666 648 static int samsung_dsim_enable_clock(struct samsung_dsim *dsi) 667 649 { 668 - unsigned long hs_clk, byte_clk, esc_clk; 650 + unsigned long hs_clk, byte_clk, esc_clk, pix_clk; 669 651 unsigned long esc_div; 670 652 u32 reg; 653 + struct drm_display_mode *m = &dsi->mode; 654 + int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); 671 655 672 - hs_clk = samsung_dsim_set_pll(dsi, dsi->burst_clk_rate); 656 + /* m->clock is in KHz */ 657 + pix_clk = m->clock * 1000; 658 + 659 + /* Use burst_clk_rate if available, otherwise use the pix_clk */ 660 + if (dsi->burst_clk_rate) 661 + hs_clk = samsung_dsim_set_pll(dsi, dsi->burst_clk_rate); 662 + else 663 + hs_clk = samsung_dsim_set_pll(dsi, DIV_ROUND_UP(pix_clk * bpp, dsi->lanes)); 664 + 673 665 if (!hs_clk) { 674 666 dev_err(dsi->dev, "failed to configure DSI PLL\n"); 675 667 return -EFAULT; ··· 719 687 const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; 720 688 const unsigned int *reg_values = driver_data->reg_values; 721 689 u32 reg; 690 + struct phy_configure_opts_mipi_dphy cfg; 691 + int clk_prepare, lpx, clk_zero, clk_post, clk_trail; 692 + int hs_exit, hs_prepare, hs_zero, hs_trail; 693 + unsigned long long byte_clock = dsi->hs_clock / 8; 722 694 723 695 if (driver_data->has_freqband) 724 696 return; 725 697 698 + phy_mipi_dphy_get_default_config_for_hsclk(dsi->hs_clock, 699 + dsi->lanes, &cfg); 700 + 701 + /* 702 + * TODO: 703 + * The tech Applications Processor manuals for i.MX8M Mini, Nano, 704 + * and Plus don't state what the definition of the PHYTIMING 705 + * bits are beyond their address and bit position. 706 + * After reviewing NXP's downstream code, it appears 707 + * that the various PHYTIMING registers take the number 708 + * of cycles and use various dividers on them. This 709 + * calculation does not result in an exact match to the 710 + * downstream code, but it is very close to the values 711 + * generated by their lookup table, and it appears 712 + * to sync at a variety of resolutions. If someone 713 + * can get a more accurate mathematical equation needed 714 + * for these registers, this should be updated. 715 + */ 716 + 717 + lpx = PS_TO_CYCLE(cfg.lpx, byte_clock); 718 + hs_exit = PS_TO_CYCLE(cfg.hs_exit, byte_clock); 719 + clk_prepare = PS_TO_CYCLE(cfg.clk_prepare, byte_clock); 720 + clk_zero = PS_TO_CYCLE(cfg.clk_zero, byte_clock); 721 + clk_post = PS_TO_CYCLE(cfg.clk_post, byte_clock); 722 + clk_trail = PS_TO_CYCLE(cfg.clk_trail, byte_clock); 723 + hs_prepare = PS_TO_CYCLE(cfg.hs_prepare, byte_clock); 724 + hs_zero = PS_TO_CYCLE(cfg.hs_zero, byte_clock); 725 + hs_trail = PS_TO_CYCLE(cfg.hs_trail, byte_clock); 726 + 726 727 /* B D-PHY: D-PHY Master & Slave Analog Block control */ 727 728 reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] | 728 729 reg_values[PHYCTRL_SLEW_UP]; 730 + 729 731 samsung_dsim_write(dsi, DSIM_PHYCTRL_REG, reg); 730 732 731 733 /* ··· 767 701 * T HS-EXIT: Time that the transmitter drives LP-11 following a HS 768 702 * burst 769 703 */ 770 - reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT]; 704 + 705 + reg = DSIM_PHYTIMING_LPX(lpx) | DSIM_PHYTIMING_HS_EXIT(hs_exit); 706 + 771 707 samsung_dsim_write(dsi, DSIM_PHYTIMING_REG, reg); 772 708 773 709 /* ··· 785 717 * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after 786 718 * the last payload clock bit of a HS transmission burst 787 719 */ 788 - reg = reg_values[PHYTIMING_CLK_PREPARE] | 789 - reg_values[PHYTIMING_CLK_ZERO] | 790 - reg_values[PHYTIMING_CLK_POST] | 791 - reg_values[PHYTIMING_CLK_TRAIL]; 720 + 721 + reg = DSIM_PHYTIMING1_CLK_PREPARE(clk_prepare) | 722 + DSIM_PHYTIMING1_CLK_ZERO(clk_zero) | 723 + DSIM_PHYTIMING1_CLK_POST(clk_post) | 724 + DSIM_PHYTIMING1_CLK_TRAIL(clk_trail); 792 725 793 726 samsung_dsim_write(dsi, DSIM_PHYTIMING1_REG, reg); 794 727 ··· 802 733 * T HS-TRAIL: Time that the transmitter drives the flipped differential 803 734 * state after last payload data bit of a HS transmission burst 804 735 */ 805 - reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] | 806 - reg_values[PHYTIMING_HS_TRAIL]; 736 + 737 + reg = DSIM_PHYTIMING2_HS_PREPARE(hs_prepare) | 738 + DSIM_PHYTIMING2_HS_ZERO(hs_zero) | 739 + DSIM_PHYTIMING2_HS_TRAIL(hs_trail); 740 + 807 741 samsung_dsim_write(dsi, DSIM_PHYTIMING2_REG, reg); 808 742 } 809 743 ··· 938 866 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); 939 867 reg &= ~DSIM_STOP_STATE_CNT_MASK; 940 868 reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]); 869 + 870 + if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) 871 + reg |= DSIM_FORCE_STOP_STATE; 872 + 941 873 samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); 942 874 943 875 reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff); ··· 957 881 u32 reg; 958 882 959 883 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { 884 + int byte_clk_khz = dsi->hs_clock / 1000 / 8; 885 + int hfp = (m->hsync_start - m->hdisplay) * byte_clk_khz / m->clock; 886 + int hbp = (m->htotal - m->hsync_end) * byte_clk_khz / m->clock; 887 + int hsa = (m->hsync_end - m->hsync_start) * byte_clk_khz / m->clock; 888 + 889 + /* remove packet overhead when possible */ 890 + hfp = max(hfp - 6, 0); 891 + hbp = max(hbp - 6, 0); 892 + hsa = max(hsa - 6, 0); 893 + 894 + dev_dbg(dsi->dev, "calculated hfp: %u, hbp: %u, hsa: %u", 895 + hfp, hbp, hsa); 896 + 960 897 reg = DSIM_CMD_ALLOW(0xf) 961 898 | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay) 962 899 | DSIM_MAIN_VBP(m->vtotal - m->vsync_end); 963 900 samsung_dsim_write(dsi, DSIM_MVPORCH_REG, reg); 964 901 965 - reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay) 966 - | DSIM_MAIN_HBP(m->htotal - m->hsync_end); 902 + reg = DSIM_MAIN_HFP(hfp) | DSIM_MAIN_HBP(hbp); 967 903 samsung_dsim_write(dsi, DSIM_MHPORCH_REG, reg); 968 904 969 905 reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start) 970 - | DSIM_MAIN_HSA(m->hsync_end - m->hsync_start); 906 + | DSIM_MAIN_HSA(hsa); 971 907 samsung_dsim_write(dsi, DSIM_MSYNC_REG, reg); 972 908 } 973 909 reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) | ··· 1435 1347 ret = samsung_dsim_init(dsi); 1436 1348 if (ret) 1437 1349 return; 1350 + 1351 + samsung_dsim_set_display_mode(dsi); 1352 + samsung_dsim_set_display_enable(dsi, true); 1438 1353 } 1439 1354 } 1440 1355 ··· 1445 1354 struct drm_bridge_state *old_bridge_state) 1446 1355 { 1447 1356 struct samsung_dsim *dsi = bridge_to_dsi(bridge); 1357 + u32 reg; 1448 1358 1449 - samsung_dsim_set_display_mode(dsi); 1450 - samsung_dsim_set_display_enable(dsi, true); 1359 + if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) { 1360 + samsung_dsim_set_display_mode(dsi); 1361 + samsung_dsim_set_display_enable(dsi, true); 1362 + } else { 1363 + reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); 1364 + reg &= ~DSIM_FORCE_STOP_STATE; 1365 + samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); 1366 + } 1451 1367 1452 1368 dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE; 1453 1369 } ··· 1463 1365 struct drm_bridge_state *old_bridge_state) 1464 1366 { 1465 1367 struct samsung_dsim *dsi = bridge_to_dsi(bridge); 1368 + u32 reg; 1466 1369 1467 1370 if (!(dsi->state & DSIM_STATE_ENABLED)) 1468 1371 return; 1372 + 1373 + if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) { 1374 + reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); 1375 + reg |= DSIM_FORCE_STOP_STATE; 1376 + samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); 1377 + } 1469 1378 1470 1379 dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE; 1471 1380 } ··· 1794 1689 }; 1795 1690 1796 1691 static int samsung_dsim_of_read_u32(const struct device_node *np, 1797 - const char *propname, u32 *out_value) 1692 + const char *propname, u32 *out_value, bool optional) 1798 1693 { 1799 1694 int ret = of_property_read_u32(np, propname, out_value); 1800 1695 1801 - if (ret < 0) 1696 + if (ret < 0 && !optional) 1802 1697 pr_err("%pOF: failed to get '%s' property\n", np, propname); 1803 1698 1804 1699 return ret; ··· 1811 1706 u32 lane_polarities[5] = { 0 }; 1812 1707 struct device_node *endpoint; 1813 1708 int i, nr_lanes, ret; 1709 + struct clk *pll_clk; 1814 1710 1815 1711 ret = samsung_dsim_of_read_u32(node, "samsung,pll-clock-frequency", 1816 - &dsi->pll_clk_rate); 1817 - if (ret < 0) 1818 - return ret; 1712 + &dsi->pll_clk_rate, 1); 1713 + /* If it doesn't exist, read it from the clock instead of failing */ 1714 + if (ret < 0) { 1715 + dev_dbg(dev, "Using sclk_mipi for pll clock frequency\n"); 1716 + pll_clk = devm_clk_get(dev, "sclk_mipi"); 1717 + if (!IS_ERR(pll_clk)) 1718 + dsi->pll_clk_rate = clk_get_rate(pll_clk); 1719 + else 1720 + return PTR_ERR(pll_clk); 1721 + } 1819 1722 1723 + /* If it doesn't exist, use pixel clock instead of failing */ 1820 1724 ret = samsung_dsim_of_read_u32(node, "samsung,burst-clock-frequency", 1821 - &dsi->burst_clk_rate); 1822 - if (ret < 0) 1823 - return ret; 1725 + &dsi->burst_clk_rate, 1); 1726 + if (ret < 0) { 1727 + dev_dbg(dev, "Using pixel clock for HS clock frequency\n"); 1728 + dsi->burst_clk_rate = 0; 1729 + } 1824 1730 1825 1731 ret = samsung_dsim_of_read_u32(node, "samsung,esc-clock-frequency", 1826 - &dsi->esc_clk_rate); 1732 + &dsi->esc_clk_rate, 0); 1827 1733 if (ret < 0) 1828 1734 return ret; 1829 1735
+1 -1
drivers/gpu/drm/bridge/sii902x.c
··· 1151 1151 MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids); 1152 1152 1153 1153 static struct i2c_driver sii902x_driver = { 1154 - .probe_new = sii902x_probe, 1154 + .probe = sii902x_probe, 1155 1155 .remove = sii902x_remove, 1156 1156 .driver = { 1157 1157 .name = "sii902x",
+1 -1
drivers/gpu/drm/bridge/sii9234.c
··· 955 955 .name = "sii9234", 956 956 .of_match_table = sii9234_dt_match, 957 957 }, 958 - .probe_new = sii9234_probe, 958 + .probe = sii9234_probe, 959 959 .remove = sii9234_remove, 960 960 .id_table = sii9234_id, 961 961 };
+1 -1
drivers/gpu/drm/bridge/sil-sii8620.c
··· 2378 2378 .name = "sii8620", 2379 2379 .of_match_table = of_match_ptr(sii8620_dt_match), 2380 2380 }, 2381 - .probe_new = sii8620_probe, 2381 + .probe = sii8620_probe, 2382 2382 .remove = sii8620_remove, 2383 2383 .id_table = sii8620_id, 2384 2384 };
+15
drivers/gpu/drm/bridge/tc358762.c
··· 11 11 */ 12 12 13 13 #include <linux/delay.h> 14 + #include <linux/gpio/consumer.h> 14 15 #include <linux/mod_devicetable.h> 15 16 #include <linux/module.h> 16 17 #include <linux/of_graph.h> ··· 64 63 struct drm_bridge bridge; 65 64 struct regulator *regulator; 66 65 struct drm_bridge *panel_bridge; 66 + struct gpio_desc *reset_gpio; 67 67 bool pre_enabled; 68 68 int error; 69 69 }; ··· 140 138 141 139 ctx->pre_enabled = false; 142 140 141 + if (ctx->reset_gpio) 142 + gpiod_set_value_cansleep(ctx->reset_gpio, 0); 143 + 143 144 ret = regulator_disable(ctx->regulator); 144 145 if (ret < 0) 145 146 dev_err(ctx->dev, "error disabling regulators (%d)\n", ret); ··· 156 151 ret = regulator_enable(ctx->regulator); 157 152 if (ret < 0) 158 153 dev_err(ctx->dev, "error enabling regulators (%d)\n", ret); 154 + 155 + if (ctx->reset_gpio) { 156 + gpiod_set_value_cansleep(ctx->reset_gpio, 1); 157 + usleep_range(5000, 10000); 158 + } 159 159 160 160 ret = tc358762_init(ctx); 161 161 if (ret < 0) ··· 194 184 return PTR_ERR(panel_bridge); 195 185 196 186 ctx->panel_bridge = panel_bridge; 187 + 188 + /* Reset GPIO is optional */ 189 + ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); 190 + if (IS_ERR(ctx->reset_gpio)) 191 + return PTR_ERR(ctx->reset_gpio); 197 192 198 193 return 0; 199 194 }
+195 -2
drivers/gpu/drm/bridge/tc358767.c
··· 1781 1781 1782 1782 static bool tc_readable_reg(struct device *dev, unsigned int reg) 1783 1783 { 1784 - return reg != SYSCTRL; 1784 + switch (reg) { 1785 + /* DSI D-PHY Layer */ 1786 + case 0x004: 1787 + case 0x020: 1788 + case 0x024: 1789 + case 0x028: 1790 + case 0x02c: 1791 + case 0x030: 1792 + case 0x038: 1793 + case 0x040: 1794 + case 0x044: 1795 + case 0x048: 1796 + case 0x04c: 1797 + case 0x050: 1798 + case 0x054: 1799 + /* DSI PPI Layer */ 1800 + case PPI_STARTPPI: 1801 + case 0x108: 1802 + case 0x110: 1803 + case PPI_LPTXTIMECNT: 1804 + case PPI_LANEENABLE: 1805 + case PPI_TX_RX_TA: 1806 + case 0x140: 1807 + case PPI_D0S_ATMR: 1808 + case PPI_D1S_ATMR: 1809 + case 0x14c: 1810 + case 0x150: 1811 + case PPI_D0S_CLRSIPOCOUNT: 1812 + case PPI_D1S_CLRSIPOCOUNT: 1813 + case PPI_D2S_CLRSIPOCOUNT: 1814 + case PPI_D3S_CLRSIPOCOUNT: 1815 + case 0x180: 1816 + case 0x184: 1817 + case 0x188: 1818 + case 0x18c: 1819 + case 0x190: 1820 + case 0x1a0: 1821 + case 0x1a4: 1822 + case 0x1a8: 1823 + case 0x1ac: 1824 + case 0x1b0: 1825 + case 0x1c0: 1826 + case 0x1c4: 1827 + case 0x1c8: 1828 + case 0x1cc: 1829 + case 0x1d0: 1830 + case 0x1e0: 1831 + case 0x1e4: 1832 + case 0x1f0: 1833 + case 0x1f4: 1834 + /* DSI Protocol Layer */ 1835 + case DSI_STARTDSI: 1836 + case 0x208: 1837 + case DSI_LANEENABLE: 1838 + case 0x214: 1839 + case 0x218: 1840 + case 0x220: 1841 + case 0x224: 1842 + case 0x228: 1843 + case 0x230: 1844 + /* DSI General */ 1845 + case 0x300: 1846 + /* DSI Application Layer */ 1847 + case 0x400: 1848 + case 0x404: 1849 + /* DPI */ 1850 + case DPIPXLFMT: 1851 + /* Parallel Output */ 1852 + case POCTRL: 1853 + /* Video Path0 Configuration */ 1854 + case VPCTRL0: 1855 + case HTIM01: 1856 + case HTIM02: 1857 + case VTIM01: 1858 + case VTIM02: 1859 + case VFUEN0: 1860 + /* System */ 1861 + case TC_IDREG: 1862 + case 0x504: 1863 + case SYSSTAT: 1864 + case SYSRSTENB: 1865 + case SYSCTRL: 1866 + /* I2C */ 1867 + case 0x520: 1868 + /* GPIO */ 1869 + case GPIOM: 1870 + case GPIOC: 1871 + case GPIOO: 1872 + case GPIOI: 1873 + /* Interrupt */ 1874 + case INTCTL_G: 1875 + case INTSTS_G: 1876 + case 0x570: 1877 + case 0x574: 1878 + case INT_GP0_LCNT: 1879 + case INT_GP1_LCNT: 1880 + /* DisplayPort Control */ 1881 + case DP0CTL: 1882 + /* DisplayPort Clock */ 1883 + case DP0_VIDMNGEN0: 1884 + case DP0_VIDMNGEN1: 1885 + case DP0_VMNGENSTATUS: 1886 + case 0x628: 1887 + case 0x62c: 1888 + case 0x630: 1889 + /* DisplayPort Main Channel */ 1890 + case DP0_SECSAMPLE: 1891 + case DP0_VIDSYNCDELAY: 1892 + case DP0_TOTALVAL: 1893 + case DP0_STARTVAL: 1894 + case DP0_ACTIVEVAL: 1895 + case DP0_SYNCVAL: 1896 + case DP0_MISC: 1897 + /* DisplayPort Aux Channel */ 1898 + case DP0_AUXCFG0: 1899 + case DP0_AUXCFG1: 1900 + case DP0_AUXADDR: 1901 + case 0x66c: 1902 + case 0x670: 1903 + case 0x674: 1904 + case 0x678: 1905 + case 0x67c: 1906 + case 0x680: 1907 + case 0x684: 1908 + case 0x688: 1909 + case DP0_AUXSTATUS: 1910 + case DP0_AUXI2CADR: 1911 + /* DisplayPort Link Training */ 1912 + case DP0_SRCCTRL: 1913 + case DP0_LTSTAT: 1914 + case DP0_SNKLTCHGREQ: 1915 + case DP0_LTLOOPCTRL: 1916 + case DP0_SNKLTCTRL: 1917 + case 0x6e8: 1918 + case 0x6ec: 1919 + case 0x6f0: 1920 + case 0x6f4: 1921 + /* DisplayPort Audio */ 1922 + case 0x700: 1923 + case 0x704: 1924 + case 0x708: 1925 + case 0x70c: 1926 + case 0x710: 1927 + case 0x714: 1928 + case 0x718: 1929 + case 0x71c: 1930 + case 0x720: 1931 + /* DisplayPort Source Control */ 1932 + case DP1_SRCCTRL: 1933 + /* DisplayPort PHY */ 1934 + case DP_PHY_CTRL: 1935 + case 0x810: 1936 + case 0x814: 1937 + case 0x820: 1938 + case 0x840: 1939 + /* I2S */ 1940 + case 0x880: 1941 + case 0x888: 1942 + case 0x88c: 1943 + case 0x890: 1944 + case 0x894: 1945 + case 0x898: 1946 + case 0x89c: 1947 + case 0x8a0: 1948 + case 0x8a4: 1949 + case 0x8a8: 1950 + case 0x8ac: 1951 + case 0x8b0: 1952 + case 0x8b4: 1953 + /* PLL */ 1954 + case DP0_PLLCTRL: 1955 + case DP1_PLLCTRL: 1956 + case PXL_PLLCTRL: 1957 + case PXL_PLLPARAM: 1958 + case SYS_PLLPARAM: 1959 + /* HDCP */ 1960 + case 0x980: 1961 + case 0x984: 1962 + case 0x988: 1963 + case 0x98c: 1964 + case 0x990: 1965 + case 0x994: 1966 + case 0x998: 1967 + case 0x99c: 1968 + case 0x9a0: 1969 + case 0x9a4: 1970 + case 0x9a8: 1971 + case 0x9ac: 1972 + /* Debug */ 1973 + case TSTCTL: 1974 + case PLL_DBG: 1975 + return true; 1976 + } 1977 + return false; 1785 1978 } 1786 1979 1787 1980 static const struct regmap_range tc_volatile_ranges[] = { ··· 2402 2209 .of_match_table = tc358767_of_ids, 2403 2210 }, 2404 2211 .id_table = tc358767_i2c_ids, 2405 - .probe_new = tc_probe, 2212 + .probe = tc_probe, 2406 2213 .remove = tc_remove, 2407 2214 }; 2408 2215 module_i2c_driver(tc358767_driver);
+1 -1
drivers/gpu/drm/bridge/tc358768.c
··· 1134 1134 .of_match_table = tc358768_of_ids, 1135 1135 }, 1136 1136 .id_table = tc358768_i2c_ids, 1137 - .probe_new = tc358768_i2c_probe, 1137 + .probe = tc358768_i2c_probe, 1138 1138 .remove = tc358768_i2c_remove, 1139 1139 }; 1140 1140 module_i2c_driver(tc358768_driver);
+1 -1
drivers/gpu/drm/bridge/tc358775.c
··· 728 728 .of_match_table = tc358775_of_ids, 729 729 }, 730 730 .id_table = tc358775_i2c_ids, 731 - .probe_new = tc_probe, 731 + .probe = tc_probe, 732 732 .remove = tc_remove, 733 733 }; 734 734 module_i2c_driver(tc358775_driver);
+1 -1
drivers/gpu/drm/bridge/ti-dlpc3433.c
··· 400 400 MODULE_DEVICE_TABLE(of, dlpc3433_match_table); 401 401 402 402 static struct i2c_driver dlpc3433_driver = { 403 - .probe_new = dlpc3433_probe, 403 + .probe = dlpc3433_probe, 404 404 .remove = dlpc3433_remove, 405 405 .id_table = dlpc3433_id, 406 406 .driver = {
+17 -4
drivers/gpu/drm/bridge/ti-sn65dsi83.c
··· 321 321 return dsi_div - 1; 322 322 } 323 323 324 - static void sn65dsi83_atomic_enable(struct drm_bridge *bridge, 325 - struct drm_bridge_state *old_bridge_state) 324 + static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge, 325 + struct drm_bridge_state *old_bridge_state) 326 326 { 327 327 struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); 328 328 struct drm_atomic_state *state = old_bridge_state->base.state; ··· 485 485 /* Trigger reset after CSR register update. */ 486 486 regmap_write(ctx->regmap, REG_RC_RESET, REG_RC_RESET_SOFT_RESET); 487 487 488 + /* Wait for 10ms after soft reset as specified in datasheet */ 489 + usleep_range(10000, 12000); 490 + } 491 + 492 + static void sn65dsi83_atomic_enable(struct drm_bridge *bridge, 493 + struct drm_bridge_state *old_bridge_state) 494 + { 495 + struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); 496 + unsigned int pval; 497 + 488 498 /* Clear all errors that got asserted during initialization. */ 489 499 regmap_read(ctx->regmap, REG_IRQ_STAT, &pval); 490 500 regmap_write(ctx->regmap, REG_IRQ_STAT, pval); 491 501 492 - usleep_range(10000, 12000); 502 + /* Wait for 1ms and check for errors in status register */ 503 + usleep_range(1000, 1100); 493 504 regmap_read(ctx->regmap, REG_IRQ_STAT, &pval); 494 505 if (pval) 495 506 dev_err(ctx->dev, "Unexpected link status 0x%02x\n", pval); ··· 567 556 .attach = sn65dsi83_attach, 568 557 .detach = sn65dsi83_detach, 569 558 .atomic_enable = sn65dsi83_atomic_enable, 559 + .atomic_pre_enable = sn65dsi83_atomic_pre_enable, 570 560 .atomic_disable = sn65dsi83_atomic_disable, 571 561 .mode_valid = sn65dsi83_mode_valid, 572 562 ··· 710 698 711 699 ctx->bridge.funcs = &sn65dsi83_funcs; 712 700 ctx->bridge.of_node = dev->of_node; 701 + ctx->bridge.pre_enable_prev_first = true; 713 702 drm_bridge_add(&ctx->bridge); 714 703 715 704 ret = sn65dsi83_host_attach(ctx); ··· 748 735 MODULE_DEVICE_TABLE(of, sn65dsi83_match_table); 749 736 750 737 static struct i2c_driver sn65dsi83_driver = { 751 - .probe_new = sn65dsi83_probe, 738 + .probe = sn65dsi83_probe, 752 739 .remove = sn65dsi83_remove, 753 740 .id_table = sn65dsi83_id, 754 741 .driver = {
+1 -1
drivers/gpu/drm/bridge/ti-sn65dsi86.c
··· 1970 1970 .of_match_table = ti_sn65dsi86_match_table, 1971 1971 .pm = &ti_sn65dsi86_pm_ops, 1972 1972 }, 1973 - .probe_new = ti_sn65dsi86_probe, 1973 + .probe = ti_sn65dsi86_probe, 1974 1974 .id_table = ti_sn65dsi86_id, 1975 1975 }; 1976 1976
+1 -1
drivers/gpu/drm/bridge/ti-tfp410.c
··· 408 408 .of_match_table = of_match_ptr(tfp410_match), 409 409 }, 410 410 .id_table = tfp410_i2c_ids, 411 - .probe_new = tfp410_i2c_probe, 411 + .probe = tfp410_i2c_probe, 412 412 .remove = tfp410_i2c_remove, 413 413 }; 414 414 #endif /* IS_ENABLED(CONFIG_I2C) */
+22 -214
drivers/gpu/drm/drm_fb_helper.c
··· 670 670 drm_rect_init(clip, x1, y1, x2 - x1, y2 - y1); 671 671 } 672 672 673 + /* Don't use in new code. */ 674 + void drm_fb_helper_damage_range(struct fb_info *info, off_t off, size_t len) 675 + { 676 + struct drm_fb_helper *fb_helper = info->par; 677 + struct drm_rect damage_area; 678 + 679 + drm_fb_helper_memory_range_to_clip(info, off, len, &damage_area); 680 + drm_fb_helper_damage(fb_helper, damage_area.x1, damage_area.y1, 681 + drm_rect_width(&damage_area), 682 + drm_rect_height(&damage_area)); 683 + } 684 + EXPORT_SYMBOL(drm_fb_helper_damage_range); 685 + 686 + /* Don't use in new code. */ 687 + void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height) 688 + { 689 + struct drm_fb_helper *fb_helper = info->par; 690 + 691 + drm_fb_helper_damage(fb_helper, x, y, width, height); 692 + } 693 + EXPORT_SYMBOL(drm_fb_helper_damage_area); 694 + 673 695 /** 674 696 * drm_fb_helper_deferred_io() - fbdev deferred_io callback function 675 697 * @info: fb_info struct pointer ··· 735 713 } 736 714 } 737 715 EXPORT_SYMBOL(drm_fb_helper_deferred_io); 738 - 739 - /** 740 - * drm_fb_helper_sys_read - Implements struct &fb_ops.fb_read for system memory 741 - * @info: fb_info struct pointer 742 - * @buf: userspace buffer to read from framebuffer memory 743 - * @count: number of bytes to read from framebuffer memory 744 - * @ppos: read offset within framebuffer memory 745 - * 746 - * Returns: 747 - * The number of bytes read on success, or an error code otherwise. 748 - */ 749 - ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, 750 - size_t count, loff_t *ppos) 751 - { 752 - return fb_sys_read(info, buf, count, ppos); 753 - } 754 - EXPORT_SYMBOL(drm_fb_helper_sys_read); 755 - 756 - /** 757 - * drm_fb_helper_sys_write - Implements struct &fb_ops.fb_write for system memory 758 - * @info: fb_info struct pointer 759 - * @buf: userspace buffer to write to framebuffer memory 760 - * @count: number of bytes to write to framebuffer memory 761 - * @ppos: write offset within framebuffer memory 762 - * 763 - * Returns: 764 - * The number of bytes written on success, or an error code otherwise. 765 - */ 766 - ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, 767 - size_t count, loff_t *ppos) 768 - { 769 - struct drm_fb_helper *helper = info->par; 770 - loff_t pos = *ppos; 771 - ssize_t ret; 772 - struct drm_rect damage_area; 773 - 774 - ret = fb_sys_write(info, buf, count, ppos); 775 - if (ret <= 0) 776 - return ret; 777 - 778 - if (helper->funcs->fb_dirty) { 779 - drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); 780 - drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1, 781 - drm_rect_width(&damage_area), 782 - drm_rect_height(&damage_area)); 783 - } 784 - 785 - return ret; 786 - } 787 - EXPORT_SYMBOL(drm_fb_helper_sys_write); 788 - 789 - /** 790 - * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect 791 - * @info: fbdev registered by the helper 792 - * @rect: info about rectangle to fill 793 - * 794 - * A wrapper around sys_fillrect implemented by fbdev core 795 - */ 796 - void drm_fb_helper_sys_fillrect(struct fb_info *info, 797 - const struct fb_fillrect *rect) 798 - { 799 - struct drm_fb_helper *helper = info->par; 800 - 801 - sys_fillrect(info, rect); 802 - 803 - if (helper->funcs->fb_dirty) 804 - drm_fb_helper_damage(helper, rect->dx, rect->dy, rect->width, rect->height); 805 - } 806 - EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); 807 - 808 - /** 809 - * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea 810 - * @info: fbdev registered by the helper 811 - * @area: info about area to copy 812 - * 813 - * A wrapper around sys_copyarea implemented by fbdev core 814 - */ 815 - void drm_fb_helper_sys_copyarea(struct fb_info *info, 816 - const struct fb_copyarea *area) 817 - { 818 - struct drm_fb_helper *helper = info->par; 819 - 820 - sys_copyarea(info, area); 821 - 822 - if (helper->funcs->fb_dirty) 823 - drm_fb_helper_damage(helper, area->dx, area->dy, area->width, area->height); 824 - } 825 - EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); 826 - 827 - /** 828 - * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit 829 - * @info: fbdev registered by the helper 830 - * @image: info about image to blit 831 - * 832 - * A wrapper around sys_imageblit implemented by fbdev core 833 - */ 834 - void drm_fb_helper_sys_imageblit(struct fb_info *info, 835 - const struct fb_image *image) 836 - { 837 - struct drm_fb_helper *helper = info->par; 838 - 839 - sys_imageblit(info, image); 840 - 841 - if (helper->funcs->fb_dirty) 842 - drm_fb_helper_damage(helper, image->dx, image->dy, image->width, image->height); 843 - } 844 - EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); 845 - 846 - /** 847 - * drm_fb_helper_cfb_read - Implements struct &fb_ops.fb_read for I/O memory 848 - * @info: fb_info struct pointer 849 - * @buf: userspace buffer to read from framebuffer memory 850 - * @count: number of bytes to read from framebuffer memory 851 - * @ppos: read offset within framebuffer memory 852 - * 853 - * Returns: 854 - * The number of bytes read on success, or an error code otherwise. 855 - */ 856 - ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf, 857 - size_t count, loff_t *ppos) 858 - { 859 - return fb_io_read(info, buf, count, ppos); 860 - } 861 - EXPORT_SYMBOL(drm_fb_helper_cfb_read); 862 - 863 - /** 864 - * drm_fb_helper_cfb_write - Implements struct &fb_ops.fb_write for I/O memory 865 - * @info: fb_info struct pointer 866 - * @buf: userspace buffer to write to framebuffer memory 867 - * @count: number of bytes to write to framebuffer memory 868 - * @ppos: write offset within framebuffer memory 869 - * 870 - * Returns: 871 - * The number of bytes written on success, or an error code otherwise. 872 - */ 873 - ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf, 874 - size_t count, loff_t *ppos) 875 - { 876 - struct drm_fb_helper *helper = info->par; 877 - loff_t pos = *ppos; 878 - ssize_t ret; 879 - struct drm_rect damage_area; 880 - 881 - ret = fb_io_write(info, buf, count, ppos); 882 - if (ret <= 0) 883 - return ret; 884 - 885 - if (helper->funcs->fb_dirty) { 886 - drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); 887 - drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1, 888 - drm_rect_width(&damage_area), 889 - drm_rect_height(&damage_area)); 890 - } 891 - 892 - return ret; 893 - } 894 - EXPORT_SYMBOL(drm_fb_helper_cfb_write); 895 - 896 - /** 897 - * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect 898 - * @info: fbdev registered by the helper 899 - * @rect: info about rectangle to fill 900 - * 901 - * A wrapper around cfb_fillrect implemented by fbdev core 902 - */ 903 - void drm_fb_helper_cfb_fillrect(struct fb_info *info, 904 - const struct fb_fillrect *rect) 905 - { 906 - struct drm_fb_helper *helper = info->par; 907 - 908 - cfb_fillrect(info, rect); 909 - 910 - if (helper->funcs->fb_dirty) 911 - drm_fb_helper_damage(helper, rect->dx, rect->dy, rect->width, rect->height); 912 - } 913 - EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); 914 - 915 - /** 916 - * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea 917 - * @info: fbdev registered by the helper 918 - * @area: info about area to copy 919 - * 920 - * A wrapper around cfb_copyarea implemented by fbdev core 921 - */ 922 - void drm_fb_helper_cfb_copyarea(struct fb_info *info, 923 - const struct fb_copyarea *area) 924 - { 925 - struct drm_fb_helper *helper = info->par; 926 - 927 - cfb_copyarea(info, area); 928 - 929 - if (helper->funcs->fb_dirty) 930 - drm_fb_helper_damage(helper, area->dx, area->dy, area->width, area->height); 931 - } 932 - EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); 933 - 934 - /** 935 - * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit 936 - * @info: fbdev registered by the helper 937 - * @image: info about image to blit 938 - * 939 - * A wrapper around cfb_imageblit implemented by fbdev core 940 - */ 941 - void drm_fb_helper_cfb_imageblit(struct fb_info *info, 942 - const struct fb_image *image) 943 - { 944 - struct drm_fb_helper *helper = info->par; 945 - 946 - cfb_imageblit(info, image); 947 - 948 - if (helper->funcs->fb_dirty) 949 - drm_fb_helper_damage(helper, image->dx, image->dy, image->width, image->height); 950 - } 951 - EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); 952 716 953 717 /** 954 718 * drm_fb_helper_set_suspend - wrapper around fb_set_suspend
+5 -6
drivers/gpu/drm/drm_fbdev_dma.c
··· 1 1 // SPDX-License-Identifier: MIT 2 2 3 + #include <linux/fb.h> 4 + 3 5 #include <drm/drm_crtc_helper.h> 4 6 #include <drm/drm_drv.h> 5 7 #include <drm/drm_fb_helper.h> ··· 66 64 .owner = THIS_MODULE, 67 65 .fb_open = drm_fbdev_dma_fb_open, 68 66 .fb_release = drm_fbdev_dma_fb_release, 69 - .fb_read = drm_fb_helper_sys_read, 70 - .fb_write = drm_fb_helper_sys_write, 67 + __FB_DEFAULT_SYS_OPS_RDWR, 71 68 DRM_FB_HELPER_DEFAULT_OPS, 72 - .fb_fillrect = drm_fb_helper_sys_fillrect, 73 - .fb_copyarea = drm_fb_helper_sys_copyarea, 74 - .fb_imageblit = drm_fb_helper_sys_imageblit, 75 - .fb_destroy = drm_fbdev_dma_fb_destroy, 69 + __FB_DEFAULT_SYS_OPS_DRAW, 76 70 .fb_mmap = drm_fbdev_dma_fb_mmap, 71 + .fb_destroy = drm_fbdev_dma_fb_destroy, 77 72 }; 78 73 79 74 /*
+5 -6
drivers/gpu/drm/drm_fbdev_generic.c
··· 34 34 return 0; 35 35 } 36 36 37 + FB_GEN_DEFAULT_DEFERRED_SYS_OPS(drm_fbdev_generic, 38 + drm_fb_helper_damage_range, 39 + drm_fb_helper_damage_area); 40 + 37 41 static void drm_fbdev_generic_fb_destroy(struct fb_info *info) 38 42 { 39 43 struct drm_fb_helper *fb_helper = info->par; ··· 60 56 .owner = THIS_MODULE, 61 57 .fb_open = drm_fbdev_generic_fb_open, 62 58 .fb_release = drm_fbdev_generic_fb_release, 63 - .fb_read = drm_fb_helper_sys_read, 64 - .fb_write = drm_fb_helper_sys_write, 59 + FB_DEFAULT_DEFERRED_OPS(drm_fbdev_generic), 65 60 DRM_FB_HELPER_DEFAULT_OPS, 66 - .fb_fillrect = drm_fb_helper_sys_fillrect, 67 - .fb_copyarea = drm_fb_helper_sys_copyarea, 68 - .fb_imageblit = drm_fb_helper_sys_imageblit, 69 - .fb_mmap = fb_deferred_io_mmap, 70 61 .fb_destroy = drm_fbdev_generic_fb_destroy, 71 62 }; 72 63
+132
drivers/gpu/drm/drm_file.c
··· 42 42 #include <drm/drm_client.h> 43 43 #include <drm/drm_drv.h> 44 44 #include <drm/drm_file.h> 45 + #include <drm/drm_gem.h> 45 46 #include <drm/drm_print.h> 46 47 47 48 #include "drm_crtc_internal.h" ··· 149 148 */ 150 149 struct drm_file *drm_file_alloc(struct drm_minor *minor) 151 150 { 151 + static atomic64_t ident = ATOMIC_INIT(0); 152 152 struct drm_device *dev = minor->dev; 153 153 struct drm_file *file; 154 154 int ret; ··· 158 156 if (!file) 159 157 return ERR_PTR(-ENOMEM); 160 158 159 + /* Get a unique identifier for fdinfo: */ 160 + file->client_id = atomic64_inc_return(&ident); 161 161 file->pid = get_pid(task_tgid(current)); 162 162 file->minor = minor; 163 163 ··· 871 867 spin_unlock_irqrestore(&dev->event_lock, irqflags); 872 868 } 873 869 EXPORT_SYMBOL(drm_send_event); 870 + 871 + static void print_size(struct drm_printer *p, const char *stat, 872 + const char *region, u64 sz) 873 + { 874 + const char *units[] = {"", " KiB", " MiB"}; 875 + unsigned u; 876 + 877 + for (u = 0; u < ARRAY_SIZE(units) - 1; u++) { 878 + if (sz < SZ_1K) 879 + break; 880 + sz = div_u64(sz, SZ_1K); 881 + } 882 + 883 + drm_printf(p, "drm-%s-%s:\t%llu%s\n", stat, region, sz, units[u]); 884 + } 885 + 886 + /** 887 + * drm_print_memory_stats - A helper to print memory stats 888 + * @p: The printer to print output to 889 + * @stats: The collected memory stats 890 + * @supported_status: Bitmask of optional stats which are available 891 + * @region: The memory region 892 + * 893 + */ 894 + void drm_print_memory_stats(struct drm_printer *p, 895 + const struct drm_memory_stats *stats, 896 + enum drm_gem_object_status supported_status, 897 + const char *region) 898 + { 899 + print_size(p, "total", region, stats->private + stats->shared); 900 + print_size(p, "shared", region, stats->shared); 901 + print_size(p, "active", region, stats->active); 902 + 903 + if (supported_status & DRM_GEM_OBJECT_RESIDENT) 904 + print_size(p, "resident", region, stats->resident); 905 + 906 + if (supported_status & DRM_GEM_OBJECT_PURGEABLE) 907 + print_size(p, "purgeable", region, stats->purgeable); 908 + } 909 + EXPORT_SYMBOL(drm_print_memory_stats); 910 + 911 + /** 912 + * drm_show_memory_stats - Helper to collect and show standard fdinfo memory stats 913 + * @p: the printer to print output to 914 + * @file: the DRM file 915 + * 916 + * Helper to iterate over GEM objects with a handle allocated in the specified 917 + * file. 918 + */ 919 + void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file) 920 + { 921 + struct drm_gem_object *obj; 922 + struct drm_memory_stats status = {}; 923 + enum drm_gem_object_status supported_status; 924 + int id; 925 + 926 + spin_lock(&file->table_lock); 927 + idr_for_each_entry (&file->object_idr, obj, id) { 928 + enum drm_gem_object_status s = 0; 929 + 930 + if (obj->funcs && obj->funcs->status) { 931 + s = obj->funcs->status(obj); 932 + supported_status = DRM_GEM_OBJECT_RESIDENT | 933 + DRM_GEM_OBJECT_PURGEABLE; 934 + } 935 + 936 + if (obj->handle_count > 1) { 937 + status.shared += obj->size; 938 + } else { 939 + status.private += obj->size; 940 + } 941 + 942 + if (s & DRM_GEM_OBJECT_RESIDENT) { 943 + status.resident += obj->size; 944 + } else { 945 + /* If already purged or not yet backed by pages, don't 946 + * count it as purgeable: 947 + */ 948 + s &= ~DRM_GEM_OBJECT_PURGEABLE; 949 + } 950 + 951 + if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) { 952 + status.active += obj->size; 953 + 954 + /* If still active, don't count as purgeable: */ 955 + s &= ~DRM_GEM_OBJECT_PURGEABLE; 956 + } 957 + 958 + if (s & DRM_GEM_OBJECT_PURGEABLE) 959 + status.purgeable += obj->size; 960 + } 961 + spin_unlock(&file->table_lock); 962 + 963 + drm_print_memory_stats(p, &status, supported_status, "memory"); 964 + } 965 + EXPORT_SYMBOL(drm_show_memory_stats); 966 + 967 + /** 968 + * drm_show_fdinfo - helper for drm file fops 969 + * @m: output stream 970 + * @f: the device file instance 971 + * 972 + * Helper to implement fdinfo, for userspace to query usage stats, etc, of a 973 + * process using the GPU. See also &drm_driver.show_fdinfo. 974 + * 975 + * For text output format description please see Documentation/gpu/drm-usage-stats.rst 976 + */ 977 + void drm_show_fdinfo(struct seq_file *m, struct file *f) 978 + { 979 + struct drm_file *file = f->private_data; 980 + struct drm_device *dev = file->minor->dev; 981 + struct drm_printer p = drm_seq_file_printer(m); 982 + 983 + drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name); 984 + drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id); 985 + 986 + if (dev_is_pci(dev->dev)) { 987 + struct pci_dev *pdev = to_pci_dev(dev->dev); 988 + 989 + drm_printf(&p, "drm-pdev:\t%04x:%02x:%02x.%d\n", 990 + pci_domain_nr(pdev->bus), pdev->bus->number, 991 + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 992 + } 993 + 994 + if (dev->driver->show_fdinfo) 995 + dev->driver->show_fdinfo(&p, file); 996 + } 997 + EXPORT_SYMBOL(drm_show_fdinfo); 874 998 875 999 /** 876 1000 * mock_drm_getfile - Create a new struct file for the drm device
+1
drivers/gpu/drm/exynos/Kconfig
··· 7 7 select DRM_DISPLAY_HELPER if DRM_EXYNOS_DP 8 8 select DRM_KMS_HELPER 9 9 select VIDEOMODE_HELPERS 10 + select FB_IO_HELPERS if DRM_FBDEV_EMULATION 10 11 select SND_SOC_HDMI_CODEC if SND_SOC 11 12 help 12 13 Choose this option if you have a Samsung SoC Exynos chipset.
+4 -5
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
··· 8 8 * Seung-Woo Kim <sw0312.kim@samsung.com> 9 9 */ 10 10 11 + #include <linux/fb.h> 12 + 11 13 #include <drm/drm_crtc_helper.h> 12 14 #include <drm/drm_drv.h> 13 15 #include <drm/drm_fb_helper.h> ··· 49 47 50 48 static const struct fb_ops exynos_drm_fb_ops = { 51 49 .owner = THIS_MODULE, 50 + __FB_DEFAULT_IO_OPS_RDWR, 52 51 DRM_FB_HELPER_DEFAULT_OPS, 52 + __FB_DEFAULT_IO_OPS_DRAW, 53 53 .fb_mmap = exynos_drm_fb_mmap, 54 - .fb_read = drm_fb_helper_cfb_read, 55 - .fb_write = drm_fb_helper_cfb_write, 56 - .fb_fillrect = drm_fb_helper_cfb_fillrect, 57 - .fb_copyarea = drm_fb_helper_cfb_copyarea, 58 - .fb_imageblit = drm_fb_helper_cfb_imageblit, 59 54 .fb_destroy = exynos_drm_fb_destroy, 60 55 }; 61 56
+1
drivers/gpu/drm/gma500/Kconfig
··· 3 3 tristate "Intel GMA500/600/3600/3650 KMS Framebuffer" 4 4 depends on DRM && PCI && X86 && MMU 5 5 select DRM_KMS_HELPER 6 + select FB_IO_HELPERS if DRM_FBDEV_EMULATION 6 7 select I2C 7 8 select I2C_ALGOBIT 8 9 # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
+3 -5
drivers/gpu/drm/gma500/fbdev.c
··· 5 5 * 6 6 **************************************************************************/ 7 7 8 + #include <linux/fb.h> 8 9 #include <linux/pfn_t.h> 9 10 10 11 #include <drm/drm_crtc_helper.h> ··· 135 134 136 135 static const struct fb_ops psb_fbdev_fb_ops = { 137 136 .owner = THIS_MODULE, 137 + __FB_DEFAULT_IO_OPS_RDWR, 138 138 DRM_FB_HELPER_DEFAULT_OPS, 139 139 .fb_setcolreg = psb_fbdev_fb_setcolreg, 140 - .fb_read = drm_fb_helper_cfb_read, 141 - .fb_write = drm_fb_helper_cfb_write, 142 - .fb_fillrect = drm_fb_helper_cfb_fillrect, 143 - .fb_copyarea = drm_fb_helper_cfb_copyarea, 144 - .fb_imageblit = drm_fb_helper_cfb_imageblit, 140 + __FB_DEFAULT_IO_OPS_DRAW, 145 141 .fb_mmap = psb_fbdev_fb_mmap, 146 142 .fb_destroy = psb_fbdev_fb_destroy, 147 143 };
+1 -1
drivers/gpu/drm/i2c/tda9950.c
··· 492 492 MODULE_DEVICE_TABLE(i2c, tda9950_ids); 493 493 494 494 static struct i2c_driver tda9950_driver = { 495 - .probe_new = tda9950_probe, 495 + .probe = tda9950_probe, 496 496 .remove = tda9950_remove, 497 497 .driver = { 498 498 .name = "tda9950",
+1 -1
drivers/gpu/drm/i2c/tda998x_drv.c
··· 2099 2099 MODULE_DEVICE_TABLE(i2c, tda998x_ids); 2100 2100 2101 2101 static struct i2c_driver tda998x_driver = { 2102 - .probe_new = tda998x_probe, 2102 + .probe = tda998x_probe, 2103 2103 .remove = tda998x_remove, 2104 2104 .driver = { 2105 2105 .name = "tda998x",
+1
drivers/gpu/drm/i915/Kconfig
··· 17 17 select DRM_KMS_HELPER 18 18 select DRM_PANEL 19 19 select DRM_MIPI_DSI 20 + select FB_IO_HELPERS if DRM_FBDEV_EMULATION 20 21 select RELAY 21 22 select I2C 22 23 select I2C_ALGOBIT
+1
drivers/gpu/drm/i915/Kconfig.debug
··· 157 157 config DRM_I915_DEBUG_GUC 158 158 bool "Enable additional driver debugging for GuC" 159 159 depends on DRM_I915 160 + select STACKDEPOT 160 161 default n 161 162 help 162 163 Choose this option to turn on extra driver debugging that may affect
+8 -6
drivers/gpu/drm/i915/display/intel_fbdev.c
··· 28 28 #include <linux/console.h> 29 29 #include <linux/delay.h> 30 30 #include <linux/errno.h> 31 + #include <linux/fb.h> 31 32 #include <linux/init.h> 32 33 #include <linux/kernel.h> 33 34 #include <linux/mm.h> ··· 85 84 intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU); 86 85 } 87 86 87 + FB_GEN_DEFAULT_DEFERRED_IO_OPS(intel_fbdev, 88 + drm_fb_helper_damage_range, 89 + drm_fb_helper_damage_area) 90 + 88 91 static int intel_fbdev_set_par(struct fb_info *info) 89 92 { 90 93 struct intel_fbdev *ifbdev = to_intel_fbdev(info->par); ··· 140 135 141 136 static const struct fb_ops intelfb_ops = { 142 137 .owner = THIS_MODULE, 138 + __FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev), 143 139 DRM_FB_HELPER_DEFAULT_OPS, 144 140 .fb_set_par = intel_fbdev_set_par, 145 - .fb_read = drm_fb_helper_cfb_read, 146 - .fb_write = drm_fb_helper_cfb_write, 147 - .fb_fillrect = drm_fb_helper_cfb_fillrect, 148 - .fb_copyarea = drm_fb_helper_cfb_copyarea, 149 - .fb_imageblit = drm_fb_helper_cfb_imageblit, 150 - .fb_pan_display = intel_fbdev_pan_display, 151 141 .fb_blank = intel_fbdev_blank, 142 + .fb_pan_display = intel_fbdev_pan_display, 143 + __FB_DEFAULT_DEFERRED_OPS_DRAW(intel_fbdev), 152 144 .fb_mmap = intel_fbdev_mmap, 153 145 }; 154 146
+5 -1
drivers/gpu/drm/i915/gem/i915_gem_context.c
··· 964 964 RCU_INIT_POINTER(ce->gem_context, ctx); 965 965 966 966 GEM_BUG_ON(intel_context_is_pinned(ce)); 967 - ce->ring_size = SZ_16K; 967 + 968 + if (ce->engine->class == COMPUTE_CLASS) 969 + ce->ring_size = SZ_512K; 970 + else 971 + ce->ring_size = SZ_16K; 968 972 969 973 i915_vm_put(ce->vm); 970 974 ce->vm = i915_gem_context_get_eb_vm(ctx);
+40
drivers/gpu/drm/i915/gem/i915_gem_create.c
··· 245 245 unsigned int n_placements; 246 246 unsigned int placement_mask; 247 247 unsigned long flags; 248 + unsigned int pat_index; 248 249 }; 249 250 250 251 static void repr_placements(char *buf, size_t size, ··· 395 394 return 0; 396 395 } 397 396 397 + static int ext_set_pat(struct i915_user_extension __user *base, void *data) 398 + { 399 + struct create_ext *ext_data = data; 400 + struct drm_i915_private *i915 = ext_data->i915; 401 + struct drm_i915_gem_create_ext_set_pat ext; 402 + unsigned int max_pat_index; 403 + 404 + BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) != 405 + offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd)); 406 + 407 + /* Limiting the extension only to Meteor Lake */ 408 + if (!IS_METEORLAKE(i915)) 409 + return -ENODEV; 410 + 411 + if (copy_from_user(&ext, base, sizeof(ext))) 412 + return -EFAULT; 413 + 414 + max_pat_index = INTEL_INFO(i915)->max_pat_index; 415 + 416 + if (ext.pat_index > max_pat_index) { 417 + drm_dbg(&i915->drm, "PAT index is invalid: %u\n", 418 + ext.pat_index); 419 + return -EINVAL; 420 + } 421 + 422 + ext_data->pat_index = ext.pat_index; 423 + 424 + return 0; 425 + } 426 + 398 427 static const i915_user_extension_fn create_extensions[] = { 399 428 [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements, 400 429 [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected, 430 + [I915_GEM_CREATE_EXT_SET_PAT] = ext_set_pat, 401 431 }; 402 432 433 + #define PAT_INDEX_NOT_SET 0xffff 403 434 /** 404 435 * i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it. 405 436 * @dev: drm device pointer ··· 451 418 if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) 452 419 return -EINVAL; 453 420 421 + ext_data.pat_index = PAT_INDEX_NOT_SET; 454 422 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 455 423 create_extensions, 456 424 ARRAY_SIZE(create_extensions), ··· 487 453 ext_data.flags); 488 454 if (IS_ERR(obj)) 489 455 return PTR_ERR(obj); 456 + 457 + if (ext_data.pat_index != PAT_INDEX_NOT_SET) { 458 + i915_gem_object_set_pat_index(obj, ext_data.pat_index); 459 + /* Mark pat_index is set by UMD */ 460 + obj->pat_set_by_user = true; 461 + } 490 462 491 463 return i915_gem_publish(obj, file, &args->size, &args->handle); 492 464 }
+6
drivers/gpu/drm/i915/gem/i915_gem_object.c
··· 209 209 return false; 210 210 211 211 /* 212 + * Always flush cache for UMD objects at creation time. 213 + */ 214 + if (obj->pat_set_by_user) 215 + return true; 216 + 217 + /* 212 218 * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it 213 219 * possible for userspace to bypass the GTT caching bits set by the 214 220 * kernel, as per the given object cache_level. This is troublesome
+10 -4
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
··· 348 348 continue; 349 349 350 350 ce = intel_context_create(data[m].ce[0]->engine); 351 - if (IS_ERR(ce)) 351 + if (IS_ERR(ce)) { 352 + err = PTR_ERR(ce); 352 353 goto out; 354 + } 353 355 354 356 err = intel_context_pin(ce); 355 357 if (err) { ··· 371 369 372 370 worker = kthread_create_worker(0, "igt/parallel:%s", 373 371 data[n].ce[0]->engine->name); 374 - if (IS_ERR(worker)) 372 + if (IS_ERR(worker)) { 373 + err = PTR_ERR(worker); 375 374 goto out; 375 + } 376 376 377 377 data[n].worker = worker; 378 378 } ··· 403 399 } 404 400 } 405 401 406 - if (igt_live_test_end(&t)) 407 - err = -EIO; 402 + if (igt_live_test_end(&t)) { 403 + err = err ?: -EIO; 404 + break; 405 + } 408 406 } 409 407 410 408 out:
+38
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
··· 177 177 return cs; 178 178 } 179 179 180 + static int mtl_dummy_pipe_control(struct i915_request *rq) 181 + { 182 + /* Wa_14016712196 */ 183 + if (IS_MTL_GRAPHICS_STEP(rq->engine->i915, M, STEP_A0, STEP_B0) || 184 + IS_MTL_GRAPHICS_STEP(rq->engine->i915, P, STEP_A0, STEP_B0)) { 185 + u32 *cs; 186 + 187 + /* dummy PIPE_CONTROL + depth flush */ 188 + cs = intel_ring_begin(rq, 6); 189 + if (IS_ERR(cs)) 190 + return PTR_ERR(cs); 191 + cs = gen12_emit_pipe_control(cs, 192 + 0, 193 + PIPE_CONTROL_DEPTH_CACHE_FLUSH, 194 + LRC_PPHWSP_SCRATCH_ADDR); 195 + intel_ring_advance(rq, cs); 196 + } 197 + 198 + return 0; 199 + } 200 + 180 201 int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) 181 202 { 182 203 struct intel_engine_cs *engine = rq->engine; 183 204 184 205 if (mode & EMIT_FLUSH) { 185 206 u32 flags = 0; 207 + int err; 186 208 u32 *cs; 209 + 210 + err = mtl_dummy_pipe_control(rq); 211 + if (err) 212 + return err; 187 213 188 214 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; 189 215 flags |= PIPE_CONTROL_FLUSH_L3; ··· 243 217 if (mode & EMIT_INVALIDATE) { 244 218 u32 flags = 0; 245 219 u32 *cs, count; 220 + int err; 221 + 222 + err = mtl_dummy_pipe_control(rq); 223 + if (err) 224 + return err; 246 225 247 226 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; 248 227 flags |= PIPE_CONTROL_TLB_INVALIDATE; ··· 763 732 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 764 733 PIPE_CONTROL_DC_FLUSH_ENABLE | 765 734 PIPE_CONTROL_FLUSH_ENABLE); 735 + 736 + /* Wa_14016712196 */ 737 + if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) || 738 + IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) 739 + /* dummy PIPE_CONTROL + depth flush */ 740 + cs = gen12_emit_pipe_control(cs, 0, 741 + PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0); 766 742 767 743 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) 768 744 /* Wa_1409600907 */
+16 -13
drivers/gpu/drm/i915/gt/intel_ggtt.c
··· 1015 1015 1016 1016 /* 1017 1017 * For pre-gen8 platforms pat_index is the same as enum i915_cache_level, 1018 - * so these PTE encode functions are left with using cache_level. 1018 + * so the switch-case statements in these PTE encode functions are still valid. 1019 1019 * See translation table LEGACY_CACHELEVEL. 1020 1020 */ 1021 1021 static u64 snb_pte_encode(dma_addr_t addr, 1022 - enum i915_cache_level level, 1022 + unsigned int pat_index, 1023 1023 u32 flags) 1024 1024 { 1025 1025 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 1026 1026 1027 - switch (level) { 1027 + switch (pat_index) { 1028 1028 case I915_CACHE_L3_LLC: 1029 1029 case I915_CACHE_LLC: 1030 1030 pte |= GEN6_PTE_CACHE_LLC; ··· 1033 1033 pte |= GEN6_PTE_UNCACHED; 1034 1034 break; 1035 1035 default: 1036 - MISSING_CASE(level); 1036 + MISSING_CASE(pat_index); 1037 1037 } 1038 1038 1039 1039 return pte; 1040 1040 } 1041 1041 1042 1042 static u64 ivb_pte_encode(dma_addr_t addr, 1043 - enum i915_cache_level level, 1043 + unsigned int pat_index, 1044 1044 u32 flags) 1045 1045 { 1046 1046 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 1047 1047 1048 - switch (level) { 1048 + switch (pat_index) { 1049 1049 case I915_CACHE_L3_LLC: 1050 1050 pte |= GEN7_PTE_CACHE_L3_LLC; 1051 1051 break; ··· 1056 1056 pte |= GEN6_PTE_UNCACHED; 1057 1057 break; 1058 1058 default: 1059 - MISSING_CASE(level); 1059 + MISSING_CASE(pat_index); 1060 1060 } 1061 1061 1062 1062 return pte; 1063 1063 } 1064 1064 1065 1065 static u64 byt_pte_encode(dma_addr_t addr, 1066 - enum i915_cache_level level, 1066 + unsigned int pat_index, 1067 1067 u32 flags) 1068 1068 { 1069 1069 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; ··· 1071 1071 if (!(flags & PTE_READ_ONLY)) 1072 1072 pte |= BYT_PTE_WRITEABLE; 1073 1073 1074 - if (level != I915_CACHE_NONE) 1074 + if (pat_index != I915_CACHE_NONE) 1075 1075 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; 1076 1076 1077 1077 return pte; 1078 1078 } 1079 1079 1080 1080 static u64 hsw_pte_encode(dma_addr_t addr, 1081 - enum i915_cache_level level, 1081 + unsigned int pat_index, 1082 1082 u32 flags) 1083 1083 { 1084 1084 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 1085 1085 1086 - if (level != I915_CACHE_NONE) 1086 + if (pat_index != I915_CACHE_NONE) 1087 1087 pte |= HSW_WB_LLC_AGE3; 1088 1088 1089 1089 return pte; 1090 1090 } 1091 1091 1092 1092 static u64 iris_pte_encode(dma_addr_t addr, 1093 - enum i915_cache_level level, 1093 + unsigned int pat_index, 1094 1094 u32 flags) 1095 1095 { 1096 1096 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 1097 1097 1098 - switch (level) { 1098 + switch (pat_index) { 1099 1099 case I915_CACHE_NONE: 1100 1100 break; 1101 1101 case I915_CACHE_WT: ··· 1325 1325 if (drm_mm_node_allocated(&ggtt->error_capture)) 1326 1326 ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start, 1327 1327 ggtt->error_capture.size); 1328 + 1329 + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) 1330 + intel_uc_resume_mappings(&gt->uc); 1328 1331 1329 1332 ggtt->invalidate(ggtt); 1330 1333
+4 -4
drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
··· 18 18 static void gmch_ggtt_insert_page(struct i915_address_space *vm, 19 19 dma_addr_t addr, 20 20 u64 offset, 21 - enum i915_cache_level cache_level, 21 + unsigned int pat_index, 22 22 u32 unused) 23 23 { 24 - unsigned int flags = (cache_level == I915_CACHE_NONE) ? 24 + unsigned int flags = (pat_index == I915_CACHE_NONE) ? 25 25 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 26 26 27 27 intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); ··· 29 29 30 30 static void gmch_ggtt_insert_entries(struct i915_address_space *vm, 31 31 struct i915_vma_resource *vma_res, 32 - enum i915_cache_level cache_level, 32 + unsigned int pat_index, 33 33 u32 unused) 34 34 { 35 - unsigned int flags = (cache_level == I915_CACHE_NONE) ? 35 + unsigned int flags = (pat_index == I915_CACHE_NONE) ? 36 36 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 37 37 38 38 intel_gmch_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
+8 -4
drivers/gpu/drm/i915/gt/selftest_execlists.c
··· 1530 1530 struct drm_i915_gem_object *obj; 1531 1531 struct i915_vma *vma; 1532 1532 enum intel_engine_id id; 1533 - int err = -ENOMEM; 1534 1533 u32 *map; 1534 + int err; 1535 1535 1536 1536 /* 1537 1537 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can ··· 1539 1539 */ 1540 1540 1541 1541 ctx_hi = kernel_context(gt->i915, NULL); 1542 - if (!ctx_hi) 1543 - return -ENOMEM; 1542 + if (IS_ERR(ctx_hi)) 1543 + return PTR_ERR(ctx_hi); 1544 + 1544 1545 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; 1545 1546 1546 1547 ctx_lo = kernel_context(gt->i915, NULL); 1547 - if (!ctx_lo) 1548 + if (IS_ERR(ctx_lo)) { 1549 + err = PTR_ERR(ctx_lo); 1548 1550 goto err_ctx_hi; 1551 + } 1552 + 1549 1553 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; 1550 1554 1551 1555 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+9 -2
drivers/gpu/drm/i915/gt/selftest_tlb.c
··· 190 190 191 191 static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt) 192 192 { 193 + struct intel_memory_region *mr = gt->i915->mm.regions[INTEL_REGION_LMEM_0]; 194 + resource_size_t size = SZ_1G; 195 + 193 196 /* 194 197 * Allocation of largest possible page size allows to test all types 195 - * of pages. 198 + * of pages. To succeed with both allocations, especially in case of Small 199 + * BAR, try to allocate no more than quarter of mappable memory. 196 200 */ 197 - return i915_gem_object_create_lmem(gt->i915, SZ_1G, I915_BO_ALLOC_CONTIGUOUS); 201 + if (mr && size > mr->io_size / 4) 202 + size = mr->io_size / 4; 203 + 204 + return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS); 198 205 } 199 206 200 207 static struct drm_i915_gem_object *create_smem(struct intel_gt *gt)
-21
drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
··· 167 167 * - **flags**, holds various bits to control message handling 168 168 */ 169 169 170 - /* 171 - * Definition of the command transport message header (DW0) 172 - * 173 - * bit[4..0] message len (in dwords) 174 - * bit[7..5] reserved 175 - * bit[8] response (G2H only) 176 - * bit[8] write fence to desc (H2G only) 177 - * bit[9] write status to H2G buff (H2G only) 178 - * bit[10] send status back via G2H (H2G only) 179 - * bit[15..11] reserved 180 - * bit[31..16] action code 181 - */ 182 - #define GUC_CT_MSG_LEN_SHIFT 0 183 - #define GUC_CT_MSG_LEN_MASK 0x1F 184 - #define GUC_CT_MSG_IS_RESPONSE (1 << 8) 185 - #define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8) 186 - #define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9) 187 - #define GUC_CT_MSG_SEND_STATUS (1 << 10) 188 - #define GUC_CT_MSG_ACTION_SHIFT 16 189 - #define GUC_CT_MSG_ACTION_MASK 0xFFFF 190 - 191 170 #endif /* _ABI_GUC_COMMUNICATION_CTB_ABI_H */
+30
drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h
··· 24 24 * | | 30:28 | **TYPE** - message type | 25 25 * | | | - _`GUC_HXG_TYPE_REQUEST` = 0 | 26 26 * | | | - _`GUC_HXG_TYPE_EVENT` = 1 | 27 + * | | | - _`GUC_HXG_TYPE_FAST_REQUEST` = 2 | 27 28 * | | | - _`GUC_HXG_TYPE_NO_RESPONSE_BUSY` = 3 | 28 29 * | | | - _`GUC_HXG_TYPE_NO_RESPONSE_RETRY` = 5 | 29 30 * | | | - _`GUC_HXG_TYPE_RESPONSE_FAILURE` = 6 | ··· 47 46 #define GUC_HXG_MSG_0_TYPE (0x7 << 28) 48 47 #define GUC_HXG_TYPE_REQUEST 0u 49 48 #define GUC_HXG_TYPE_EVENT 1u 49 + #define GUC_HXG_TYPE_FAST_REQUEST 2u 50 50 #define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u 51 51 #define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u 52 52 #define GUC_HXG_TYPE_RESPONSE_FAILURE 6u ··· 90 88 #define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfff << 16) 91 89 #define GUC_HXG_REQUEST_MSG_0_ACTION (0xffff << 0) 92 90 #define GUC_HXG_REQUEST_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD 91 + 92 + /** 93 + * DOC: HXG Fast Request 94 + * 95 + * The `HXG Request`_ message should be used to initiate asynchronous activity 96 + * for which confirmation or return data is not expected. 97 + * 98 + * If confirmation is required then `HXG Request`_ shall be used instead. 99 + * 100 + * The recipient of this message may only use `HXG Failure`_ message if it was 101 + * unable to accept this request (like invalid data). 102 + * 103 + * Format of `HXG Fast Request`_ message is same as `HXG Request`_ except @TYPE. 104 + * 105 + * +---+-------+--------------------------------------------------------------+ 106 + * | | Bits | Description | 107 + * +===+=======+==============================================================+ 108 + * | 0 | 31 | ORIGIN - see `HXG Message`_ | 109 + * | +-------+--------------------------------------------------------------+ 110 + * | | 30:28 | TYPE = `GUC_HXG_TYPE_FAST_REQUEST`_ | 111 + * | +-------+--------------------------------------------------------------+ 112 + * | | 27:16 | DATA0 - see `HXG Request`_ | 113 + * | +-------+--------------------------------------------------------------+ 114 + * | | 15:0 | ACTION - see `HXG Request`_ | 115 + * +---+-------+--------------------------------------------------------------+ 116 + * |...| | DATAn - see `HXG Request`_ | 117 + * +---+-------+--------------------------------------------------------------+ 118 + */ 93 119 94 120 /** 95 121 * DOC: HXG Event
+74
drivers/gpu/drm/i915/gt/uc/intel_gsc_binary_headers.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef _INTEL_GSC_BINARY_HEADERS_H_ 7 + #define _INTEL_GSC_BINARY_HEADERS_H_ 8 + 9 + #include <linux/types.h> 10 + 11 + /* Code partition directory (CPD) structures */ 12 + struct intel_gsc_cpd_header_v2 { 13 + u32 header_marker; 14 + #define INTEL_GSC_CPD_HEADER_MARKER 0x44504324 15 + 16 + u32 num_of_entries; 17 + u8 header_version; 18 + u8 entry_version; 19 + u8 header_length; /* in bytes */ 20 + u8 flags; 21 + u32 partition_name; 22 + u32 crc32; 23 + } __packed; 24 + 25 + struct intel_gsc_cpd_entry { 26 + u8 name[12]; 27 + 28 + /* 29 + * Bits 0-24: offset from the beginning of the code partition 30 + * Bit 25: huffman compressed 31 + * Bits 26-31: reserved 32 + */ 33 + u32 offset; 34 + #define INTEL_GSC_CPD_ENTRY_OFFSET_MASK GENMASK(24, 0) 35 + #define INTEL_GSC_CPD_ENTRY_HUFFMAN_COMP BIT(25) 36 + 37 + /* 38 + * Module/Item length, in bytes. For Huffman-compressed modules, this 39 + * refers to the uncompressed size. For software-compressed modules, 40 + * this refers to the compressed size. 41 + */ 42 + u32 length; 43 + 44 + u8 reserved[4]; 45 + } __packed; 46 + 47 + struct intel_gsc_version { 48 + u16 major; 49 + u16 minor; 50 + u16 hotfix; 51 + u16 build; 52 + } __packed; 53 + 54 + struct intel_gsc_manifest_header { 55 + u32 header_type; /* 0x4 for manifest type */ 56 + u32 header_length; /* in dwords */ 57 + u32 header_version; 58 + u32 flags; 59 + u32 vendor; 60 + u32 date; 61 + u32 size; /* In dwords, size of entire manifest (header + extensions) */ 62 + u32 header_id; 63 + u32 internal_data; 64 + struct intel_gsc_version fw_version; 65 + u32 security_version; 66 + struct intel_gsc_version meu_kit_version; 67 + u32 meu_manifest_version; 68 + u8 general_data[4]; 69 + u8 reserved3[56]; 70 + u32 modulus_size; /* in dwords */ 71 + u32 exponent_size; /* in dwords */ 72 + } __packed; 73 + 74 + #endif
+2 -2
drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
··· 5 5 6 6 #include <linux/component.h> 7 7 8 - #include "drm/i915_component.h" 9 - #include "drm/i915_gsc_proxy_mei_interface.h" 8 + #include <drm/i915_component.h> 9 + #include <drm/i915_gsc_proxy_mei_interface.h> 10 10 11 11 #include "gt/intel_gt.h" 12 12 #include "gt/intel_gt_print.h"
+29 -5
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
··· 29 29 30 30 if (actions & GSC_ACTION_FW_LOAD) { 31 31 ret = intel_gsc_uc_fw_upload(gsc); 32 - if (ret == -EEXIST) /* skip proxy if not a new load */ 33 - actions &= ~GSC_ACTION_FW_LOAD; 34 - else if (ret) 32 + if (!ret) 33 + /* setup proxy on a new load */ 34 + actions |= GSC_ACTION_SW_PROXY; 35 + else if (ret != -EEXIST) 35 36 goto out_put; 37 + 38 + /* 39 + * The HuC auth can be done both before or after the proxy init; 40 + * if done after, a proxy request will be issued and must be 41 + * serviced before the authentication can complete. 42 + * Since this worker also handles proxy requests, we can't 43 + * perform an action that requires the proxy from within it and 44 + * then stall waiting for it, because we'd be blocking the 45 + * service path. Therefore, it is easier for us to load HuC 46 + * first and do proxy later. The GSC will ack the HuC auth and 47 + * then send the HuC proxy request as part of the proxy init 48 + * flow. 49 + * Note that we can only do the GSC auth if the GuC auth was 50 + * successful. 51 + */ 52 + if (intel_uc_uses_huc(&gt->uc) && 53 + intel_huc_is_authenticated(&gt->uc.huc, INTEL_HUC_AUTH_BY_GUC)) 54 + intel_huc_auth(&gt->uc.huc, INTEL_HUC_AUTH_BY_GSC); 36 55 } 37 56 38 - if (actions & (GSC_ACTION_FW_LOAD | GSC_ACTION_SW_PROXY)) { 57 + if (actions & GSC_ACTION_SW_PROXY) { 39 58 if (!intel_gsc_uc_fw_init_done(gsc)) { 40 59 gt_err(gt, "Proxy request received with GSC not loaded!\n"); 41 60 goto out_put; ··· 109 90 { 110 91 struct intel_gt *gt = gsc_uc_to_gt(gsc); 111 92 112 - intel_uc_fw_init_early(&gsc->fw, INTEL_UC_FW_TYPE_GSC); 93 + /* 94 + * GSC FW needs to be copied to a dedicated memory allocations for 95 + * loading (see gsc->local), so we don't need to GGTT map the FW image 96 + * itself into GGTT. 97 + */ 98 + intel_uc_fw_init_early(&gsc->fw, INTEL_UC_FW_TYPE_GSC, false); 113 99 INIT_WORK(&gsc->work, gsc_work); 114 100 115 101 /* we can arrive here from i915_driver_early_probe for primary
+2 -2
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
··· 99 99 u64 host_session_id) 100 100 { 101 101 host_session_id &= ~HOST_SESSION_MASK; 102 - if (heci_client_id == HECI_MEADDRESS_PXP) 102 + if (host_session_id && heci_client_id == HECI_MEADDRESS_PXP) 103 103 host_session_id |= HOST_SESSION_PXP_SINGLE; 104 104 105 105 header->validity_marker = GSC_HECI_VALIDITY_MARKER; ··· 202 202 if (++trials < 10) 203 203 goto retry; 204 204 else 205 - err = EAGAIN; 205 + err = -EAGAIN; 206 206 } 207 207 } 208 208 i915_gem_ww_ctx_fini(&ww);
+1 -1
drivers/gpu/drm/i915/gt/uc/intel_guc.c
··· 164 164 struct intel_gt *gt = guc_to_gt(guc); 165 165 struct drm_i915_private *i915 = gt->i915; 166 166 167 - intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC); 167 + intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, true); 168 168 intel_guc_ct_init_early(&guc->ct); 169 169 intel_guc_log_init_early(&guc->log); 170 170 intel_guc_submission_init_early(guc);
+71 -10
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
··· 376 376 } 377 377 } 378 378 379 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 380 + static void ct_track_lost_and_found(struct intel_guc_ct *ct, u32 fence, u32 action) 381 + { 382 + unsigned int lost = fence % ARRAY_SIZE(ct->requests.lost_and_found); 383 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) 384 + unsigned long entries[SZ_32]; 385 + unsigned int n; 386 + 387 + n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); 388 + 389 + /* May be called under spinlock, so avoid sleeping */ 390 + ct->requests.lost_and_found[lost].stack = stack_depot_save(entries, n, GFP_NOWAIT); 391 + #endif 392 + ct->requests.lost_and_found[lost].fence = fence; 393 + ct->requests.lost_and_found[lost].action = action; 394 + } 395 + #endif 396 + 379 397 static u32 ct_get_next_fence(struct intel_guc_ct *ct) 380 398 { 381 399 /* For now it's trivial */ ··· 444 426 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) | 445 427 FIELD_PREP(GUC_CTB_MSG_0_FENCE, fence); 446 428 447 - type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_EVENT : 429 + type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_FAST_REQUEST : 448 430 GUC_HXG_TYPE_REQUEST; 449 431 hxg = FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) | 450 - FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | 451 - GUC_HXG_EVENT_MSG_0_DATA0, action[0]); 432 + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION | 433 + GUC_HXG_REQUEST_MSG_0_DATA0, action[0]); 452 434 453 435 CT_DEBUG(ct, "writing (tail %u) %*ph %*ph %*ph\n", 454 436 tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]); ··· 464 446 tail = (tail + 1) % size; 465 447 } 466 448 GEM_BUG_ON(tail > size); 449 + 450 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 451 + ct_track_lost_and_found(ct, fence, 452 + FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0])); 453 + #endif 467 454 468 455 /* 469 456 * make sure H2G buffer update and LRC tail update (if this triggering a ··· 698 675 699 676 GEM_BUG_ON(!ct->enabled); 700 677 GEM_BUG_ON(!len); 701 - GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); 678 + GEM_BUG_ON(len > GUC_CTB_HXG_MSG_MAX_LEN - GUC_CTB_HDR_LEN); 702 679 GEM_BUG_ON(!response_buf && response_buf_size); 703 680 might_sleep(); 704 681 ··· 976 953 return -EPIPE; 977 954 } 978 955 956 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 957 + static bool ct_check_lost_and_found(struct intel_guc_ct *ct, u32 fence) 958 + { 959 + unsigned int n; 960 + char *buf = NULL; 961 + bool found = false; 962 + 963 + lockdep_assert_held(&ct->requests.lock); 964 + 965 + for (n = 0; n < ARRAY_SIZE(ct->requests.lost_and_found); n++) { 966 + if (ct->requests.lost_and_found[n].fence != fence) 967 + continue; 968 + found = true; 969 + 970 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) 971 + buf = kmalloc(SZ_4K, GFP_NOWAIT); 972 + if (buf && stack_depot_snprint(ct->requests.lost_and_found[n].stack, 973 + buf, SZ_4K, 0)) { 974 + CT_ERROR(ct, "Fence %u was used by action %#04x sent at\n%s", 975 + fence, ct->requests.lost_and_found[n].action, buf); 976 + break; 977 + } 978 + #endif 979 + CT_ERROR(ct, "Fence %u was used by action %#04x\n", 980 + fence, ct->requests.lost_and_found[n].action); 981 + break; 982 + } 983 + kfree(buf); 984 + return found; 985 + } 986 + #else 987 + static bool ct_check_lost_and_found(struct intel_guc_ct *ct, u32 fence) 988 + { 989 + return false; 990 + } 991 + #endif 992 + 979 993 static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response) 980 994 { 981 995 u32 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, response->msg[0]); ··· 1054 994 break; 1055 995 } 1056 996 if (!found) { 1057 - CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence); 1058 - CT_ERROR(ct, "Could not find fence=%u, last_fence=%u\n", fence, 1059 - ct->requests.last_fence); 1060 - list_for_each_entry(req, &ct->requests.pending, link) 1061 - CT_ERROR(ct, "request %u awaits response\n", 1062 - req->fence); 997 + CT_ERROR(ct, "Unsolicited response message: len %u, data %#x (fence %u, last %u)\n", 998 + len, hxg[0], fence, ct->requests.last_fence); 999 + if (!ct_check_lost_and_found(ct, fence)) { 1000 + list_for_each_entry(req, &ct->requests.pending, link) 1001 + CT_ERROR(ct, "request %u awaits response\n", 1002 + req->fence); 1003 + } 1063 1004 err = -ENOKEY; 1064 1005 } 1065 1006 spin_unlock_irqrestore(&ct->requests.lock, flags);
+11
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
··· 8 8 9 9 #include <linux/interrupt.h> 10 10 #include <linux/spinlock.h> 11 + #include <linux/stackdepot.h> 11 12 #include <linux/workqueue.h> 12 13 #include <linux/ktime.h> 13 14 #include <linux/wait.h> ··· 82 81 83 82 struct list_head incoming; /* incoming requests */ 84 83 struct work_struct worker; /* handler for incoming requests */ 84 + 85 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 86 + struct { 87 + u16 fence; 88 + u16 action; 89 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) 90 + depot_stack_handle_t stack; 91 + #endif 92 + } lost_and_found[SZ_16]; 93 + #endif 85 94 } requests; 86 95 87 96 /** @stall_time: time of first time a CTB submission is stalled */
-33
drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
··· 35 35 #define GUC_MAX_CONTEXT_ID 65535 36 36 #define GUC_INVALID_CONTEXT_ID GUC_MAX_CONTEXT_ID 37 37 38 - #define GUC_RENDER_ENGINE 0 39 - #define GUC_VIDEO_ENGINE 1 40 - #define GUC_BLITTER_ENGINE 2 41 - #define GUC_VIDEOENHANCE_ENGINE 3 42 - #define GUC_VIDEO_ENGINE2 4 43 - #define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) 44 - 45 38 #define GUC_RENDER_CLASS 0 46 39 #define GUC_VIDEO_CLASS 1 47 40 #define GUC_VIDEOENHANCE_CLASS 2 ··· 490 497 u32 flags; 491 498 }; 492 499 u32 version; 493 - } __packed; 494 - 495 - struct guc_ctx_report { 496 - u32 report_return_status; 497 - u32 reserved1[64]; 498 - u32 affected_count; 499 - u32 reserved2[2]; 500 - } __packed; 501 - 502 - /* GuC Shared Context Data Struct */ 503 - struct guc_shared_ctx_data { 504 - u32 addr_of_last_preempted_data_low; 505 - u32 addr_of_last_preempted_data_high; 506 - u32 addr_of_last_preempted_data_high_tmp; 507 - u32 padding; 508 - u32 is_mapped_to_proxy; 509 - u32 proxy_ctx_id; 510 - u32 engine_reset_ctx_id; 511 - u32 media_reset_count; 512 - u32 reserved1[8]; 513 - u32 uk_last_ctx_switch_reason; 514 - u32 was_reset; 515 - u32 lrca_gpu_addr; 516 - u64 execlist_ctx; 517 - u32 reserved2[66]; 518 - struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM]; 519 500 } __packed; 520 501 521 502 /* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
+163 -61
drivers/gpu/drm/i915/gt/uc/intel_huc.c
··· 6 6 #include <linux/types.h> 7 7 8 8 #include "gt/intel_gt.h" 9 - #include "gt/intel_gt_print.h" 10 9 #include "intel_guc_reg.h" 11 10 #include "intel_huc.h" 11 + #include "intel_huc_print.h" 12 12 #include "i915_drv.h" 13 + #include "i915_reg.h" 14 + #include "pxp/intel_pxp_cmd_interface_43.h" 13 15 14 16 #include <linux/device/bus.h> 15 17 #include <linux/mei_aux.h> 16 - 17 - #define huc_printk(_huc, _level, _fmt, ...) \ 18 - gt_##_level(huc_to_gt(_huc), "HuC: " _fmt, ##__VA_ARGS__) 19 - #define huc_err(_huc, _fmt, ...) huc_printk((_huc), err, _fmt, ##__VA_ARGS__) 20 - #define huc_warn(_huc, _fmt, ...) huc_printk((_huc), warn, _fmt, ##__VA_ARGS__) 21 - #define huc_notice(_huc, _fmt, ...) huc_printk((_huc), notice, _fmt, ##__VA_ARGS__) 22 - #define huc_info(_huc, _fmt, ...) huc_printk((_huc), info, _fmt, ##__VA_ARGS__) 23 - #define huc_dbg(_huc, _fmt, ...) huc_printk((_huc), dbg, _fmt, ##__VA_ARGS__) 24 - #define huc_probe_error(_huc, _fmt, ...) huc_printk((_huc), probe_error, _fmt, ##__VA_ARGS__) 25 18 26 19 /** 27 20 * DOC: HuC ··· 24 31 * capabilities by adding HuC specific commands to batch buffers. 25 32 * 26 33 * The kernel driver is only responsible for loading the HuC firmware and 27 - * triggering its security authentication, which is performed by the GuC on 28 - * older platforms and by the GSC on newer ones. For the GuC to correctly 29 - * perform the authentication, the HuC binary must be loaded before the GuC one. 34 + * triggering its security authentication. This is done differently depending 35 + * on the platform: 36 + * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA 37 + * and the authentication via GuC 38 + * - DG2: load and authentication are both performed via GSC. 39 + * - MTL and newer platforms: the load is performed via DMA (same as with 40 + * not-DG2 older platforms), while the authentication is done in 2-steps, 41 + * a first auth for clear-media workloads via GuC and a second one for all 42 + * workloads via GSC. 43 + * On platforms where the GuC does the authentication, to correctly do so the 44 + * HuC binary must be loaded before the GuC one. 30 45 * Loading the HuC is optional; however, not using the HuC might negatively 31 46 * impact power usage and/or performance of media workloads, depending on the 32 47 * use-cases. 33 48 * HuC must be reloaded on events that cause the WOPCM to lose its contents 34 - * (S3/S4, FLR); GuC-authenticated HuC must also be reloaded on GuC/GT reset, 35 - * while GSC-managed HuC will survive that. 49 + * (S3/S4, FLR); on older platforms the HuC must also be reloaded on GuC/GT 50 + * reset, while on newer ones it will survive that. 36 51 * 37 52 * See https://github.com/intel/media-driver for the latest details on HuC 38 53 * functionality. ··· 116 115 { 117 116 struct intel_huc *huc = container_of(hrtimer, struct intel_huc, delayed_load.timer); 118 117 119 - if (!intel_huc_is_authenticated(huc)) { 118 + if (!intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)) { 120 119 if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC) 121 120 huc_notice(huc, "timed out waiting for MEI GSC\n"); 122 121 else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP) ··· 134 133 { 135 134 ktime_t delay; 136 135 137 - GEM_BUG_ON(intel_huc_is_authenticated(huc)); 136 + GEM_BUG_ON(intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)); 138 137 139 138 /* 140 139 * On resume we don't have to wait for MEI-GSC to be re-probed, but we ··· 277 276 struct drm_i915_private *i915 = huc_to_gt(huc)->i915; 278 277 struct intel_gt *gt = huc_to_gt(huc); 279 278 280 - intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC); 279 + intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, true); 281 280 282 281 /* 283 282 * we always init the fence as already completed, even if HuC is not ··· 294 293 } 295 294 296 295 if (GRAPHICS_VER(i915) >= 11) { 297 - huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO; 298 - huc->status.mask = HUC_LOAD_SUCCESSFUL; 299 - huc->status.value = HUC_LOAD_SUCCESSFUL; 296 + huc->status[INTEL_HUC_AUTH_BY_GUC].reg = GEN11_HUC_KERNEL_LOAD_INFO; 297 + huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_LOAD_SUCCESSFUL; 298 + huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_LOAD_SUCCESSFUL; 300 299 } else { 301 - huc->status.reg = HUC_STATUS2; 302 - huc->status.mask = HUC_FW_VERIFIED; 303 - huc->status.value = HUC_FW_VERIFIED; 300 + huc->status[INTEL_HUC_AUTH_BY_GUC].reg = HUC_STATUS2; 301 + huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_FW_VERIFIED; 302 + huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_FW_VERIFIED; 303 + } 304 + 305 + if (IS_DG2(i915)) { 306 + huc->status[INTEL_HUC_AUTH_BY_GSC].reg = GEN11_HUC_KERNEL_LOAD_INFO; 307 + huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HUC_LOAD_SUCCESSFUL; 308 + huc->status[INTEL_HUC_AUTH_BY_GSC].value = HUC_LOAD_SUCCESSFUL; 309 + } else { 310 + huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS5(MTL_GSC_HECI1_BASE); 311 + huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI_FWSTS5_HUC_AUTH_DONE; 312 + huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI_FWSTS5_HUC_AUTH_DONE; 304 313 } 305 314 } 306 315 ··· 318 307 static int check_huc_loading_mode(struct intel_huc *huc) 319 308 { 320 309 struct intel_gt *gt = huc_to_gt(huc); 321 - bool fw_needs_gsc = intel_huc_is_loaded_by_gsc(huc); 322 - bool hw_uses_gsc = false; 310 + bool gsc_enabled = huc->fw.has_gsc_headers; 323 311 324 312 /* 325 313 * The fuse for HuC load via GSC is only valid on platforms that have 326 314 * GuC deprivilege. 327 315 */ 328 316 if (HAS_GUC_DEPRIVILEGE(gt->i915)) 329 - hw_uses_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) & 330 - GSC_LOADS_HUC; 317 + huc->loaded_via_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) & 318 + GSC_LOADS_HUC; 331 319 332 - if (fw_needs_gsc != hw_uses_gsc) { 333 - huc_err(huc, "mismatch between FW (%s) and HW (%s) load modes\n", 334 - HUC_LOAD_MODE_STRING(fw_needs_gsc), HUC_LOAD_MODE_STRING(hw_uses_gsc)); 320 + if (huc->loaded_via_gsc && !gsc_enabled) { 321 + huc_err(huc, "HW requires a GSC-enabled blob, but we found a legacy one\n"); 335 322 return -ENOEXEC; 336 323 } 337 324 338 - /* make sure we can access the GSC via the mei driver if we need it */ 339 - if (!(IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC)) && 340 - fw_needs_gsc) { 341 - huc_info(huc, "can't load due to missing MEI modules\n"); 342 - return -EIO; 325 + /* 326 + * On newer platforms we have GSC-enabled binaries but we load the HuC 327 + * via DMA. To do so we need to find the location of the legacy-style 328 + * binary inside the GSC-enabled one, which we do at fetch time. Make 329 + * sure that we were able to do so if the fuse says we need to load via 330 + * DMA and the binary is GSC-enabled. 331 + */ 332 + if (!huc->loaded_via_gsc && gsc_enabled && !huc->fw.dma_start_offset) { 333 + huc_err(huc, "HW in DMA mode, but we have an incompatible GSC-enabled blob\n"); 334 + return -ENOEXEC; 343 335 } 344 336 345 - huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(fw_needs_gsc)); 337 + /* 338 + * If the HuC is loaded via GSC, we need to be able to access the GSC. 339 + * On DG2 this is done via the mei components, while on newer platforms 340 + * it is done via the GSCCS, 341 + */ 342 + if (huc->loaded_via_gsc) { 343 + if (IS_DG2(gt->i915)) { 344 + if (!IS_ENABLED(CONFIG_INTEL_MEI_PXP) || 345 + !IS_ENABLED(CONFIG_INTEL_MEI_GSC)) { 346 + huc_info(huc, "can't load due to missing mei modules\n"); 347 + return -EIO; 348 + } 349 + } else { 350 + if (!HAS_ENGINE(gt, GSC0)) { 351 + huc_info(huc, "can't load due to missing GSCCS\n"); 352 + return -EIO; 353 + } 354 + } 355 + } 356 + 357 + huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(huc->loaded_via_gsc)); 346 358 347 359 return 0; 348 360 } 349 361 350 362 int intel_huc_init(struct intel_huc *huc) 351 363 { 364 + struct intel_gt *gt = huc_to_gt(huc); 352 365 int err; 353 366 354 367 err = check_huc_loading_mode(huc); 355 368 if (err) 356 369 goto out; 357 370 371 + if (HAS_ENGINE(gt, GSC0)) { 372 + struct i915_vma *vma; 373 + 374 + vma = intel_guc_allocate_vma(&gt->uc.guc, PXP43_HUC_AUTH_INOUT_SIZE * 2); 375 + if (IS_ERR(vma)) { 376 + huc_info(huc, "Failed to allocate heci pkt\n"); 377 + goto out; 378 + } 379 + 380 + huc->heci_pkt = vma; 381 + } 382 + 358 383 err = intel_uc_fw_init(&huc->fw); 359 384 if (err) 360 - goto out; 385 + goto out_pkt; 361 386 362 387 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE); 363 388 364 389 return 0; 365 390 391 + out_pkt: 392 + if (huc->heci_pkt) 393 + i915_vma_unpin_and_release(&huc->heci_pkt, 0); 366 394 out: 367 395 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL); 368 396 huc_info(huc, "initialization failed %pe\n", ERR_PTR(err)); ··· 415 365 * even if HuC loading is off. 416 366 */ 417 367 delayed_huc_load_fini(huc); 368 + 369 + if (huc->heci_pkt) 370 + i915_vma_unpin_and_release(&huc->heci_pkt, 0); 418 371 419 372 if (intel_uc_fw_is_loadable(&huc->fw)) 420 373 intel_uc_fw_fini(&huc->fw); ··· 436 383 delayed_huc_load_complete(huc); 437 384 } 438 385 439 - int intel_huc_wait_for_auth_complete(struct intel_huc *huc) 386 + static const char *auth_mode_string(struct intel_huc *huc, 387 + enum intel_huc_authentication_type type) 388 + { 389 + bool partial = huc->fw.has_gsc_headers && type == INTEL_HUC_AUTH_BY_GUC; 390 + 391 + return partial ? "clear media" : "all workloads"; 392 + } 393 + 394 + int intel_huc_wait_for_auth_complete(struct intel_huc *huc, 395 + enum intel_huc_authentication_type type) 440 396 { 441 397 struct intel_gt *gt = huc_to_gt(huc); 442 398 int ret; 443 399 444 400 ret = __intel_wait_for_register(gt->uncore, 445 - huc->status.reg, 446 - huc->status.mask, 447 - huc->status.value, 401 + huc->status[type].reg, 402 + huc->status[type].mask, 403 + huc->status[type].value, 448 404 2, 50, NULL); 449 405 450 406 /* mark the load process as complete even if the wait failed */ 451 407 delayed_huc_load_complete(huc); 452 408 453 409 if (ret) { 454 - huc_err(huc, "firmware not verified %pe\n", ERR_PTR(ret)); 410 + huc_err(huc, "firmware not verified for %s: %pe\n", 411 + auth_mode_string(huc, type), ERR_PTR(ret)); 455 412 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); 456 413 return ret; 457 414 } 458 415 459 416 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING); 460 - huc_info(huc, "authenticated!\n"); 417 + huc_info(huc, "authenticated for %s\n", auth_mode_string(huc, type)); 461 418 return 0; 462 419 } 463 420 464 421 /** 465 422 * intel_huc_auth() - Authenticate HuC uCode 466 423 * @huc: intel_huc structure 424 + * @type: authentication type (via GuC or via GSC) 467 425 * 468 426 * Called after HuC and GuC firmware loading during intel_uc_init_hw(). 469 427 * ··· 482 418 * passing the offset of the RSA signature to intel_guc_auth_huc(). It then 483 419 * waits for up to 50ms for firmware verification ACK. 484 420 */ 485 - int intel_huc_auth(struct intel_huc *huc) 421 + int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type) 486 422 { 487 423 struct intel_gt *gt = huc_to_gt(huc); 488 424 struct intel_guc *guc = &gt->uc.guc; ··· 491 427 if (!intel_uc_fw_is_loaded(&huc->fw)) 492 428 return -ENOEXEC; 493 429 494 - /* GSC will do the auth */ 430 + /* GSC will do the auth with the load */ 495 431 if (intel_huc_is_loaded_by_gsc(huc)) 496 432 return -ENODEV; 433 + 434 + if (intel_huc_is_authenticated(huc, type)) 435 + return -EEXIST; 497 436 498 437 ret = i915_inject_probe_error(gt->i915, -ENXIO); 499 438 if (ret) 500 439 goto fail; 501 440 502 - GEM_BUG_ON(intel_uc_fw_is_running(&huc->fw)); 503 - 504 - ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data)); 505 - if (ret) { 506 - huc_err(huc, "authentication by GuC failed %pe\n", ERR_PTR(ret)); 507 - goto fail; 441 + switch (type) { 442 + case INTEL_HUC_AUTH_BY_GUC: 443 + ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data)); 444 + break; 445 + case INTEL_HUC_AUTH_BY_GSC: 446 + ret = intel_huc_fw_auth_via_gsccs(huc); 447 + break; 448 + default: 449 + MISSING_CASE(type); 450 + ret = -EINVAL; 508 451 } 452 + if (ret) 453 + goto fail; 509 454 510 455 /* Check authentication status, it should be done by now */ 511 - ret = intel_huc_wait_for_auth_complete(huc); 456 + ret = intel_huc_wait_for_auth_complete(huc, type); 512 457 if (ret) 513 458 goto fail; 514 459 515 460 return 0; 516 461 517 462 fail: 518 - huc_probe_error(huc, "authentication failed %pe\n", ERR_PTR(ret)); 463 + huc_probe_error(huc, "%s authentication failed %pe\n", 464 + auth_mode_string(huc, type), ERR_PTR(ret)); 519 465 return ret; 520 466 } 521 467 522 - bool intel_huc_is_authenticated(struct intel_huc *huc) 468 + bool intel_huc_is_authenticated(struct intel_huc *huc, 469 + enum intel_huc_authentication_type type) 523 470 { 524 471 struct intel_gt *gt = huc_to_gt(huc); 525 472 intel_wakeref_t wakeref; 526 473 u32 status = 0; 527 474 528 475 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 529 - status = intel_uncore_read(gt->uncore, huc->status.reg); 476 + status = intel_uncore_read(gt->uncore, huc->status[type].reg); 530 477 531 - return (status & huc->status.mask) == huc->status.value; 478 + return (status & huc->status[type].mask) == huc->status[type].value; 479 + } 480 + 481 + static bool huc_is_fully_authenticated(struct intel_huc *huc) 482 + { 483 + struct intel_uc_fw *huc_fw = &huc->fw; 484 + 485 + if (!huc_fw->has_gsc_headers) 486 + return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC); 487 + else if (intel_huc_is_loaded_by_gsc(huc) || HAS_ENGINE(huc_to_gt(huc), GSC0)) 488 + return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC); 489 + else 490 + return false; 532 491 } 533 492 534 493 /** ··· 566 479 */ 567 480 int intel_huc_check_status(struct intel_huc *huc) 568 481 { 569 - switch (__intel_uc_fw_status(&huc->fw)) { 482 + struct intel_uc_fw *huc_fw = &huc->fw; 483 + 484 + switch (__intel_uc_fw_status(huc_fw)) { 570 485 case INTEL_UC_FIRMWARE_NOT_SUPPORTED: 571 486 return -ENODEV; 572 487 case INTEL_UC_FIRMWARE_DISABLED: ··· 585 496 break; 586 497 } 587 498 588 - return intel_huc_is_authenticated(huc); 499 + /* 500 + * GSC-enabled binaries loaded via DMA are first partially 501 + * authenticated by GuC and then fully authenticated by GSC 502 + */ 503 + if (huc_is_fully_authenticated(huc)) 504 + return 1; /* full auth */ 505 + else if (huc_fw->has_gsc_headers && !intel_huc_is_loaded_by_gsc(huc) && 506 + intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC)) 507 + return 2; /* clear media only */ 508 + else 509 + return 0; 589 510 } 590 511 591 512 static bool huc_has_delayed_load(struct intel_huc *huc) ··· 609 510 if (!intel_uc_fw_is_loadable(&huc->fw)) 610 511 return; 611 512 612 - if (intel_huc_is_authenticated(huc)) 513 + if (!huc->fw.has_gsc_headers) 514 + return; 515 + 516 + if (huc_is_fully_authenticated(huc)) 613 517 intel_uc_fw_change_status(&huc->fw, 614 518 INTEL_UC_FIRMWARE_RUNNING); 615 519 else if (huc_has_delayed_load(huc)) ··· 645 543 646 544 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 647 545 drm_printf(p, "HuC status: 0x%08x\n", 648 - intel_uncore_read(gt->uncore, huc->status.reg)); 546 + intel_uncore_read(gt->uncore, huc->status[INTEL_HUC_AUTH_BY_GUC].reg)); 649 547 }
+20 -6
drivers/gpu/drm/i915/gt/uc/intel_huc.h
··· 15 15 #include <linux/hrtimer.h> 16 16 17 17 struct bus_type; 18 + struct i915_vma; 18 19 19 20 enum intel_huc_delayed_load_status { 20 21 INTEL_HUC_WAITING_ON_GSC = 0, 21 22 INTEL_HUC_WAITING_ON_PXP, 22 23 INTEL_HUC_DELAYED_LOAD_ERROR, 24 + }; 25 + 26 + enum intel_huc_authentication_type { 27 + INTEL_HUC_AUTH_BY_GUC = 0, 28 + INTEL_HUC_AUTH_BY_GSC, 29 + INTEL_HUC_AUTH_MAX_MODES 23 30 }; 24 31 25 32 struct intel_huc { ··· 38 31 i915_reg_t reg; 39 32 u32 mask; 40 33 u32 value; 41 - } status; 34 + } status[INTEL_HUC_AUTH_MAX_MODES]; 42 35 43 36 struct { 44 37 struct i915_sw_fence fence; ··· 46 39 struct notifier_block nb; 47 40 enum intel_huc_delayed_load_status status; 48 41 } delayed_load; 42 + 43 + /* for load via GSCCS */ 44 + struct i915_vma *heci_pkt; 45 + 46 + bool loaded_via_gsc; 49 47 }; 50 48 51 49 int intel_huc_sanitize(struct intel_huc *huc); ··· 58 46 int intel_huc_init(struct intel_huc *huc); 59 47 void intel_huc_fini(struct intel_huc *huc); 60 48 void intel_huc_suspend(struct intel_huc *huc); 61 - int intel_huc_auth(struct intel_huc *huc); 62 - int intel_huc_wait_for_auth_complete(struct intel_huc *huc); 49 + int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type); 50 + int intel_huc_wait_for_auth_complete(struct intel_huc *huc, 51 + enum intel_huc_authentication_type type); 52 + bool intel_huc_is_authenticated(struct intel_huc *huc, 53 + enum intel_huc_authentication_type type); 63 54 int intel_huc_check_status(struct intel_huc *huc); 64 55 void intel_huc_update_auth_status(struct intel_huc *huc); 65 - bool intel_huc_is_authenticated(struct intel_huc *huc); 66 56 67 57 void intel_huc_register_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus); 68 58 void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus); ··· 87 73 88 74 static inline bool intel_huc_is_loaded_by_gsc(const struct intel_huc *huc) 89 75 { 90 - return huc->fw.loaded_via_gsc; 76 + return huc->loaded_via_gsc; 91 77 } 92 78 93 79 static inline bool intel_huc_wait_required(struct intel_huc *huc) 94 80 { 95 81 return intel_huc_is_used(huc) && intel_huc_is_loaded_by_gsc(huc) && 96 - !intel_huc_is_authenticated(huc); 82 + !intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC); 97 83 } 98 84 99 85 void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p);
+233 -2
drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
··· 5 5 6 6 #include "gt/intel_gsc.h" 7 7 #include "gt/intel_gt.h" 8 + #include "intel_gsc_binary_headers.h" 9 + #include "intel_gsc_uc_heci_cmd_submit.h" 8 10 #include "intel_huc.h" 9 11 #include "intel_huc_fw.h" 12 + #include "intel_huc_print.h" 10 13 #include "i915_drv.h" 11 14 #include "pxp/intel_pxp_huc.h" 15 + #include "pxp/intel_pxp_cmd_interface_43.h" 16 + 17 + struct mtl_huc_auth_msg_in { 18 + struct intel_gsc_mtl_header header; 19 + struct pxp43_new_huc_auth_in huc_in; 20 + } __packed; 21 + 22 + struct mtl_huc_auth_msg_out { 23 + struct intel_gsc_mtl_header header; 24 + struct pxp43_huc_auth_out huc_out; 25 + } __packed; 26 + 27 + int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc) 28 + { 29 + struct intel_gt *gt = huc_to_gt(huc); 30 + struct drm_i915_private *i915 = gt->i915; 31 + struct drm_i915_gem_object *obj; 32 + struct mtl_huc_auth_msg_in *msg_in; 33 + struct mtl_huc_auth_msg_out *msg_out; 34 + void *pkt_vaddr; 35 + u64 pkt_offset; 36 + int retry = 5; 37 + int err = 0; 38 + 39 + if (!huc->heci_pkt) 40 + return -ENODEV; 41 + 42 + obj = huc->heci_pkt->obj; 43 + pkt_offset = i915_ggtt_offset(huc->heci_pkt); 44 + 45 + pkt_vaddr = i915_gem_object_pin_map_unlocked(obj, 46 + i915_coherent_map_type(i915, obj, true)); 47 + if (IS_ERR(pkt_vaddr)) 48 + return PTR_ERR(pkt_vaddr); 49 + 50 + msg_in = pkt_vaddr; 51 + msg_out = pkt_vaddr + PXP43_HUC_AUTH_INOUT_SIZE; 52 + 53 + intel_gsc_uc_heci_cmd_emit_mtl_header(&msg_in->header, 54 + HECI_MEADDRESS_PXP, 55 + sizeof(*msg_in), 0); 56 + 57 + msg_in->huc_in.header.api_version = PXP_APIVER(4, 3); 58 + msg_in->huc_in.header.command_id = PXP43_CMDID_NEW_HUC_AUTH; 59 + msg_in->huc_in.header.status = 0; 60 + msg_in->huc_in.header.buffer_len = sizeof(msg_in->huc_in) - 61 + sizeof(msg_in->huc_in.header); 62 + msg_in->huc_in.huc_base_address = huc->fw.vma_res.start; 63 + msg_in->huc_in.huc_size = huc->fw.obj->base.size; 64 + 65 + do { 66 + err = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, 67 + pkt_offset, sizeof(*msg_in), 68 + pkt_offset + PXP43_HUC_AUTH_INOUT_SIZE, 69 + PXP43_HUC_AUTH_INOUT_SIZE); 70 + if (err) { 71 + huc_err(huc, "failed to submit GSC request to auth: %d\n", err); 72 + goto out_unpin; 73 + } 74 + 75 + if (msg_out->header.flags & GSC_OUTFLAG_MSG_PENDING) { 76 + msg_in->header.gsc_message_handle = msg_out->header.gsc_message_handle; 77 + err = -EBUSY; 78 + msleep(50); 79 + } 80 + } while (--retry && err == -EBUSY); 81 + 82 + if (err) 83 + goto out_unpin; 84 + 85 + if (msg_out->header.message_size != sizeof(*msg_out)) { 86 + huc_err(huc, "invalid GSC reply length %u [expected %zu]\n", 87 + msg_out->header.message_size, sizeof(*msg_out)); 88 + err = -EPROTO; 89 + goto out_unpin; 90 + } 91 + 92 + /* 93 + * The GSC will return PXP_STATUS_OP_NOT_PERMITTED if the HuC is already 94 + * loaded. If the same error is ever returned with HuC not loaded we'll 95 + * still catch it when we check the authentication bit later. 96 + */ 97 + if (msg_out->huc_out.header.status != PXP_STATUS_SUCCESS && 98 + msg_out->huc_out.header.status != PXP_STATUS_OP_NOT_PERMITTED) { 99 + huc_err(huc, "auth failed with GSC error = 0x%x\n", 100 + msg_out->huc_out.header.status); 101 + err = -EIO; 102 + goto out_unpin; 103 + } 104 + 105 + out_unpin: 106 + i915_gem_object_unpin_map(obj); 107 + return err; 108 + } 109 + 110 + static void get_version_from_gsc_manifest(struct intel_uc_fw_ver *ver, const void *data) 111 + { 112 + const struct intel_gsc_manifest_header *manifest = data; 113 + 114 + ver->major = manifest->fw_version.major; 115 + ver->minor = manifest->fw_version.minor; 116 + ver->patch = manifest->fw_version.hotfix; 117 + } 118 + 119 + static bool css_valid(const void *data, size_t size) 120 + { 121 + const struct uc_css_header *css = data; 122 + 123 + if (unlikely(size < sizeof(struct uc_css_header))) 124 + return false; 125 + 126 + if (css->module_type != 0x6) 127 + return false; 128 + 129 + if (css->module_vendor != PCI_VENDOR_ID_INTEL) 130 + return false; 131 + 132 + return true; 133 + } 134 + 135 + static inline u32 entry_offset(const struct intel_gsc_cpd_entry *entry) 136 + { 137 + return entry->offset & INTEL_GSC_CPD_ENTRY_OFFSET_MASK; 138 + } 139 + 140 + int intel_huc_fw_get_binary_info(struct intel_uc_fw *huc_fw, const void *data, size_t size) 141 + { 142 + struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); 143 + const struct intel_gsc_cpd_header_v2 *header = data; 144 + const struct intel_gsc_cpd_entry *entry; 145 + size_t min_size = sizeof(*header); 146 + int i; 147 + 148 + if (!huc_fw->has_gsc_headers) { 149 + huc_err(huc, "Invalid FW type for GSC header parsing!\n"); 150 + return -EINVAL; 151 + } 152 + 153 + if (size < sizeof(*header)) { 154 + huc_err(huc, "FW too small! %zu < %zu\n", size, min_size); 155 + return -ENODATA; 156 + } 157 + 158 + /* 159 + * The GSC-enabled HuC binary starts with a directory header, followed 160 + * by a series of entries. Each entry is identified by a name and 161 + * points to a specific section of the binary containing the relevant 162 + * data. The entries we're interested in are: 163 + * - "HUCP.man": points to the GSC manifest header for the HuC, which 164 + * contains the version info. 165 + * - "huc_fw": points to the legacy-style binary that can be used for 166 + * load via the DMA. This entry only contains a valid CSS 167 + * on binaries for platforms that support 2-step HuC load 168 + * via dma and auth via GSC (like MTL). 169 + * 170 + * -------------------------------------------------- 171 + * [ intel_gsc_cpd_header_v2 ] 172 + * -------------------------------------------------- 173 + * [ intel_gsc_cpd_entry[] ] 174 + * [ entry1 ] 175 + * [ ... ] 176 + * [ entryX ] 177 + * [ "HUCP.man" ] 178 + * [ ... ] 179 + * [ offset >----------------------------]------o 180 + * [ ... ] | 181 + * [ entryY ] | 182 + * [ "huc_fw" ] | 183 + * [ ... ] | 184 + * [ offset >----------------------------]----------o 185 + * -------------------------------------------------- | | 186 + * | | 187 + * -------------------------------------------------- | | 188 + * [ intel_gsc_manifest_header ]<-----o | 189 + * [ ... ] | 190 + * [ intel_gsc_version fw_version ] | 191 + * [ ... ] | 192 + * -------------------------------------------------- | 193 + * | 194 + * -------------------------------------------------- | 195 + * [ data[] ]<---------o 196 + * [ ... ] 197 + * [ ... ] 198 + * -------------------------------------------------- 199 + */ 200 + 201 + if (header->header_marker != INTEL_GSC_CPD_HEADER_MARKER) { 202 + huc_err(huc, "invalid marker for CPD header: 0x%08x!\n", 203 + header->header_marker); 204 + return -EINVAL; 205 + } 206 + 207 + /* we only have binaries with header v2 and entry v1 for now */ 208 + if (header->header_version != 2 || header->entry_version != 1) { 209 + huc_err(huc, "invalid CPD header/entry version %u:%u!\n", 210 + header->header_version, header->entry_version); 211 + return -EINVAL; 212 + } 213 + 214 + if (header->header_length < sizeof(struct intel_gsc_cpd_header_v2)) { 215 + huc_err(huc, "invalid CPD header length %u!\n", 216 + header->header_length); 217 + return -EINVAL; 218 + } 219 + 220 + min_size = header->header_length + sizeof(*entry) * header->num_of_entries; 221 + if (size < min_size) { 222 + huc_err(huc, "FW too small! %zu < %zu\n", size, min_size); 223 + return -ENODATA; 224 + } 225 + 226 + entry = data + header->header_length; 227 + 228 + for (i = 0; i < header->num_of_entries; i++, entry++) { 229 + if (strcmp(entry->name, "HUCP.man") == 0) 230 + get_version_from_gsc_manifest(&huc_fw->file_selected.ver, 231 + data + entry_offset(entry)); 232 + 233 + if (strcmp(entry->name, "huc_fw") == 0) { 234 + u32 offset = entry_offset(entry); 235 + 236 + if (offset < size && css_valid(data + offset, size - offset)) 237 + huc_fw->dma_start_offset = offset; 238 + } 239 + } 240 + 241 + return 0; 242 + } 12 243 13 244 int intel_huc_fw_load_and_auth_via_gsc(struct intel_huc *huc) 14 245 { ··· 256 25 * component gets re-bound and this function called again. If so, just 257 26 * mark the HuC as loaded. 258 27 */ 259 - if (intel_huc_is_authenticated(huc)) { 28 + if (intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)) { 260 29 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING); 261 30 return 0; 262 31 } ··· 269 38 270 39 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_TRANSFERRED); 271 40 272 - return intel_huc_wait_for_auth_complete(huc); 41 + return intel_huc_wait_for_auth_complete(huc, INTEL_HUC_AUTH_BY_GSC); 273 42 } 274 43 275 44 /**
+5 -1
drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
··· 7 7 #define _INTEL_HUC_FW_H_ 8 8 9 9 struct intel_huc; 10 + struct intel_uc_fw; 11 + 12 + #include <linux/types.h> 10 13 11 14 int intel_huc_fw_load_and_auth_via_gsc(struct intel_huc *huc); 15 + int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc); 12 16 int intel_huc_fw_upload(struct intel_huc *huc); 13 - 17 + int intel_huc_fw_get_binary_info(struct intel_uc_fw *huc_fw, const void *data, size_t size); 14 18 #endif
+21
drivers/gpu/drm/i915/gt/uc/intel_huc_print.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_HUC_PRINT__ 7 + #define __INTEL_HUC_PRINT__ 8 + 9 + #include "gt/intel_gt.h" 10 + #include "gt/intel_gt_print.h" 11 + 12 + #define huc_printk(_huc, _level, _fmt, ...) \ 13 + gt_##_level(huc_to_gt(_huc), "HuC: " _fmt, ##__VA_ARGS__) 14 + #define huc_err(_huc, _fmt, ...) huc_printk((_huc), err, _fmt, ##__VA_ARGS__) 15 + #define huc_warn(_huc, _fmt, ...) huc_printk((_huc), warn, _fmt, ##__VA_ARGS__) 16 + #define huc_notice(_huc, _fmt, ...) huc_printk((_huc), notice, _fmt, ##__VA_ARGS__) 17 + #define huc_info(_huc, _fmt, ...) huc_printk((_huc), info, _fmt, ##__VA_ARGS__) 18 + #define huc_dbg(_huc, _fmt, ...) huc_printk((_huc), dbg, _fmt, ##__VA_ARGS__) 19 + #define huc_probe_error(_huc, _fmt, ...) huc_printk((_huc), probe_error, _fmt, ##__VA_ARGS__) 20 + 21 + #endif /* __INTEL_HUC_PRINT__ */
+9 -1
drivers/gpu/drm/i915/gt/uc/intel_uc.c
··· 538 538 if (intel_huc_is_loaded_by_gsc(huc)) 539 539 intel_huc_update_auth_status(huc); 540 540 else 541 - intel_huc_auth(huc); 541 + intel_huc_auth(huc, INTEL_HUC_AUTH_BY_GUC); 542 542 543 543 if (intel_uc_uses_guc_submission(uc)) { 544 544 ret = intel_guc_submission_enable(guc); ··· 700 700 } 701 701 } 702 702 703 + static void __uc_resume_mappings(struct intel_uc *uc) 704 + { 705 + intel_uc_fw_resume_mapping(&uc->guc.fw); 706 + intel_uc_fw_resume_mapping(&uc->huc.fw); 707 + } 708 + 703 709 static int __uc_resume(struct intel_uc *uc, bool enable_communication) 704 710 { 705 711 struct intel_guc *guc = &uc->guc; ··· 773 767 774 768 .init_hw = __uc_init_hw, 775 769 .fini_hw = __uc_fini_hw, 770 + 771 + .resume_mappings = __uc_resume_mappings, 776 772 };
+2
drivers/gpu/drm/i915/gt/uc/intel_uc.h
··· 24 24 void (*fini)(struct intel_uc *uc); 25 25 int (*init_hw)(struct intel_uc *uc); 26 26 void (*fini_hw)(struct intel_uc *uc); 27 + void (*resume_mappings)(struct intel_uc *uc); 27 28 }; 28 29 29 30 struct intel_uc { ··· 115 114 intel_uc_ops_function(fini, fini, void, ); 116 115 intel_uc_ops_function(init_hw, init_hw, int, 0); 117 116 intel_uc_ops_function(fini_hw, fini_hw, void, ); 117 + intel_uc_ops_function(resume_mappings, resume_mappings, void, ); 118 118 #undef intel_uc_ops_function 119 119 120 120 #endif
+77 -56
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
··· 108 108 fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1)) 109 109 110 110 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp, huc_gsc) \ 111 + fw_def(METEORLAKE, 0, huc_gsc(mtl)) \ 111 112 fw_def(DG2, 0, huc_gsc(dg2)) \ 112 113 fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \ 113 114 fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \ ··· 187 186 u8 major; 188 187 u8 minor; 189 188 u8 patch; 190 - bool loaded_via_gsc; 189 + bool has_gsc_headers; 191 190 }; 192 191 193 192 #define UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \ ··· 198 197 199 198 #define UC_FW_BLOB_NEW(major_, minor_, patch_, gsc_, path_) \ 200 199 { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \ 201 - .legacy = false, .loaded_via_gsc = gsc_ } 200 + .legacy = false, .has_gsc_headers = gsc_ } 202 201 203 202 #define UC_FW_BLOB_OLD(major_, minor_, patch_, path_) \ 204 203 { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \ ··· 311 310 uc_fw->file_wanted.ver.major = blob->major; 312 311 uc_fw->file_wanted.ver.minor = blob->minor; 313 312 uc_fw->file_wanted.ver.patch = blob->patch; 314 - uc_fw->loaded_via_gsc = blob->loaded_via_gsc; 313 + uc_fw->has_gsc_headers = blob->has_gsc_headers; 315 314 found = true; 316 315 break; 317 316 } ··· 472 471 * intel_uc_fw_init_early - initialize the uC object and select the firmware 473 472 * @uc_fw: uC firmware 474 473 * @type: type of uC 474 + * @needs_ggtt_mapping: whether the FW needs to be GGTT mapped for loading 475 475 * 476 476 * Initialize the state of our uC object and relevant tracking and select the 477 477 * firmware to fetch and load. 478 478 */ 479 479 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, 480 - enum intel_uc_fw_type type) 480 + enum intel_uc_fw_type type, 481 + bool needs_ggtt_mapping) 481 482 { 482 483 struct intel_gt *gt = ____uc_fw_to_gt(uc_fw, type); 483 484 struct drm_i915_private *i915 = gt->i915; ··· 493 490 GEM_BUG_ON(uc_fw->file_selected.path); 494 491 495 492 uc_fw->type = type; 493 + uc_fw->needs_ggtt_mapping = needs_ggtt_mapping; 496 494 497 495 if (HAS_GT_UC(i915)) { 498 496 if (!validate_fw_table_type(i915, type)) { ··· 547 543 uc_fw->file_wanted.ver.minor = 0; 548 544 uc_fw->user_overridden = true; 549 545 } 550 - } 551 - 552 - static int check_gsc_manifest(struct intel_gt *gt, 553 - const struct firmware *fw, 554 - struct intel_uc_fw *uc_fw) 555 - { 556 - u32 *dw = (u32 *)fw->data; 557 - u32 version_hi, version_lo; 558 - size_t min_size; 559 - 560 - /* Check the size of the blob before examining buffer contents */ 561 - min_size = sizeof(u32) * (HUC_GSC_VERSION_LO_DW + 1); 562 - if (unlikely(fw->size < min_size)) { 563 - gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n", 564 - intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, 565 - fw->size, min_size); 566 - return -ENODATA; 567 - } 568 - 569 - version_hi = dw[HUC_GSC_VERSION_HI_DW]; 570 - version_lo = dw[HUC_GSC_VERSION_LO_DW]; 571 - 572 - uc_fw->file_selected.ver.major = FIELD_GET(HUC_GSC_MAJOR_VER_HI_MASK, version_hi); 573 - uc_fw->file_selected.ver.minor = FIELD_GET(HUC_GSC_MINOR_VER_HI_MASK, version_hi); 574 - uc_fw->file_selected.ver.patch = FIELD_GET(HUC_GSC_PATCH_VER_LO_MASK, version_lo); 575 - 576 - return 0; 577 546 } 578 547 579 548 static void uc_unpack_css_version(struct intel_uc_fw_ver *ver, u32 css_value) ··· 605 628 uc_fw->private_data_size = css->private_data_size; 606 629 } 607 630 608 - static int check_ccs_header(struct intel_gt *gt, 609 - const struct firmware *fw, 610 - struct intel_uc_fw *uc_fw) 631 + static int __check_ccs_header(struct intel_gt *gt, 632 + const void *fw_data, size_t fw_size, 633 + struct intel_uc_fw *uc_fw) 611 634 { 612 635 struct uc_css_header *css; 613 636 size_t size; 614 637 615 638 /* Check the size of the blob before examining buffer contents */ 616 - if (unlikely(fw->size < sizeof(struct uc_css_header))) { 639 + if (unlikely(fw_size < sizeof(struct uc_css_header))) { 617 640 gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n", 618 641 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, 619 - fw->size, sizeof(struct uc_css_header)); 642 + fw_size, sizeof(struct uc_css_header)); 620 643 return -ENODATA; 621 644 } 622 645 623 - css = (struct uc_css_header *)fw->data; 646 + css = (struct uc_css_header *)fw_data; 624 647 625 648 /* Check integrity of size values inside CSS header */ 626 649 size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw - ··· 628 651 if (unlikely(size != sizeof(struct uc_css_header))) { 629 652 gt_warn(gt, "%s firmware %s: unexpected header size: %zu != %zu\n", 630 653 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, 631 - fw->size, sizeof(struct uc_css_header)); 654 + fw_size, sizeof(struct uc_css_header)); 632 655 return -EPROTO; 633 656 } 634 657 ··· 640 663 641 664 /* At least, it should have header, uCode and RSA. Size of all three. */ 642 665 size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size; 643 - if (unlikely(fw->size < size)) { 666 + if (unlikely(fw_size < size)) { 644 667 gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n", 645 668 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, 646 - fw->size, size); 669 + fw_size, size); 647 670 return -ENOEXEC; 648 671 } 649 672 ··· 662 685 guc_read_css_info(uc_fw, css); 663 686 664 687 return 0; 688 + } 689 + 690 + static int check_gsc_manifest(struct intel_gt *gt, 691 + const struct firmware *fw, 692 + struct intel_uc_fw *uc_fw) 693 + { 694 + if (uc_fw->type != INTEL_UC_FW_TYPE_HUC) { 695 + gt_err(gt, "trying to GSC-parse a non-HuC binary"); 696 + return -EINVAL; 697 + } 698 + 699 + intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size); 700 + 701 + if (uc_fw->dma_start_offset) { 702 + u32 delta = uc_fw->dma_start_offset; 703 + 704 + __check_ccs_header(gt, fw->data + delta, fw->size - delta, uc_fw); 705 + } 706 + 707 + return 0; 708 + } 709 + 710 + static int check_ccs_header(struct intel_gt *gt, 711 + const struct firmware *fw, 712 + struct intel_uc_fw *uc_fw) 713 + { 714 + return __check_ccs_header(gt, fw->data, fw->size, uc_fw); 665 715 } 666 716 667 717 static bool is_ver_8bit(struct intel_uc_fw_ver *ver) ··· 738 734 if (uc_fw->type == INTEL_UC_FW_TYPE_GSC) 739 735 return 0; 740 736 741 - if (uc_fw->loaded_via_gsc) 737 + if (uc_fw->has_gsc_headers) 742 738 err = check_gsc_manifest(gt, fw, uc_fw); 743 739 else 744 740 err = check_ccs_header(gt, fw, uc_fw); ··· 759 755 if (err) 760 756 return err; 761 757 762 - if ((*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) { 758 + if (uc_fw->needs_ggtt_mapping && (*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) { 763 759 gt_err(gt, "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n", 764 760 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, 765 761 (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K); ··· 944 940 { 945 941 struct drm_i915_gem_object *obj = uc_fw->obj; 946 942 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt; 947 - struct i915_vma_resource *dummy = &uc_fw->dummy; 943 + struct i915_vma_resource *vma_res = &uc_fw->vma_res; 948 944 u32 pte_flags = 0; 949 945 950 - dummy->start = uc_fw_ggtt_offset(uc_fw); 951 - dummy->node_size = obj->base.size; 952 - dummy->bi.pages = obj->mm.pages; 946 + if (!uc_fw->needs_ggtt_mapping) 947 + return; 948 + 949 + vma_res->start = uc_fw_ggtt_offset(uc_fw); 950 + vma_res->node_size = obj->base.size; 951 + vma_res->bi.pages = obj->mm.pages; 953 952 954 953 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 955 954 956 955 /* uc_fw->obj cache domains were not controlled across suspend */ 957 956 if (i915_gem_object_has_struct_page(obj)) 958 - drm_clflush_sg(dummy->bi.pages); 957 + drm_clflush_sg(vma_res->bi.pages); 959 958 960 959 if (i915_gem_object_is_lmem(obj)) 961 960 pte_flags |= PTE_LM; 962 961 963 962 if (ggtt->vm.raw_insert_entries) 964 - ggtt->vm.raw_insert_entries(&ggtt->vm, dummy, 963 + ggtt->vm.raw_insert_entries(&ggtt->vm, vma_res, 965 964 i915_gem_get_pat_index(ggtt->vm.i915, 966 965 I915_CACHE_NONE), 967 966 pte_flags); 968 967 else 969 - ggtt->vm.insert_entries(&ggtt->vm, dummy, 968 + ggtt->vm.insert_entries(&ggtt->vm, vma_res, 970 969 i915_gem_get_pat_index(ggtt->vm.i915, 971 970 I915_CACHE_NONE), 972 971 pte_flags); ··· 977 970 978 971 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw) 979 972 { 980 - struct drm_i915_gem_object *obj = uc_fw->obj; 981 973 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt; 982 - u64 start = uc_fw_ggtt_offset(uc_fw); 974 + struct i915_vma_resource *vma_res = &uc_fw->vma_res; 983 975 984 - ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); 976 + if (!vma_res->node_size) 977 + return; 978 + 979 + ggtt->vm.clear_range(&ggtt->vm, vma_res->start, vma_res->node_size); 985 980 } 986 981 987 982 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags) ··· 1000 991 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1001 992 1002 993 /* Set the source address for the uCode */ 1003 - offset = uc_fw_ggtt_offset(uc_fw); 994 + offset = uc_fw->vma_res.start + uc_fw->dma_start_offset; 1004 995 GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000); 1005 996 intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset)); 1006 997 intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset)); ··· 1074 1065 return -ENOEXEC; 1075 1066 1076 1067 /* Call custom loader */ 1077 - uc_fw_bind_ggtt(uc_fw); 1078 1068 err = uc_fw_xfer(uc_fw, dst_offset, dma_flags); 1079 - uc_fw_unbind_ggtt(uc_fw); 1080 1069 if (err) 1081 1070 goto fail; 1082 1071 ··· 1178 1171 goto out_unpin; 1179 1172 } 1180 1173 1174 + uc_fw_bind_ggtt(uc_fw); 1175 + 1181 1176 return 0; 1182 1177 1183 1178 out_unpin: ··· 1190 1181 1191 1182 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) 1192 1183 { 1184 + uc_fw_unbind_ggtt(uc_fw); 1193 1185 uc_fw_rsa_data_destroy(uc_fw); 1194 1186 1195 1187 if (i915_gem_object_has_pinned_pages(uc_fw->obj)) 1196 1188 i915_gem_object_unpin_pages(uc_fw->obj); 1197 1189 1198 1190 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE); 1191 + } 1192 + 1193 + void intel_uc_fw_resume_mapping(struct intel_uc_fw *uc_fw) 1194 + { 1195 + if (!intel_uc_fw_is_available(uc_fw)) 1196 + return; 1197 + 1198 + if (!i915_gem_object_has_pinned_pages(uc_fw->obj)) 1199 + return; 1200 + 1201 + uc_fw_bind_ggtt(uc_fw); 1199 1202 } 1200 1203 1201 1204 /** ··· 1239 1218 { 1240 1219 struct intel_memory_region *mr = uc_fw->obj->mm.region; 1241 1220 u32 size = min_t(u32, uc_fw->rsa_size, max_len); 1242 - u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size; 1221 + u32 offset = uc_fw->dma_start_offset + sizeof(struct uc_css_header) + uc_fw->ucode_size; 1243 1222 struct sgt_iter iter; 1244 1223 size_t count = 0; 1245 1224 int idx;
+18 -8
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
··· 99 99 struct drm_i915_gem_object *obj; 100 100 101 101 /** 102 - * @dummy: A vma used in binding the uc fw to ggtt. We can't define this 103 - * vma on the stack as it can lead to a stack overflow, so we define it 104 - * here. Safe to have 1 copy per uc fw because the binding is single 105 - * threaded as it done during driver load (inherently single threaded) 106 - * or during a GT reset (mutex guarantees single threaded). 102 + * @needs_ggtt_mapping: indicates whether the fw object needs to be 103 + * pinned to ggtt. If true, the fw is pinned at init time and unpinned 104 + * during driver unload. 107 105 */ 108 - struct i915_vma_resource dummy; 106 + bool needs_ggtt_mapping; 107 + 108 + /** 109 + * @vma_res: A vma resource used in binding the uc fw to ggtt. The fw is 110 + * pinned in a reserved area of the ggtt (above the maximum address 111 + * usable by GuC); therefore, we can't use the normal vma functions to 112 + * do the pinning and we instead use this resource to do so. 113 + */ 114 + struct i915_vma_resource vma_res; 109 115 struct i915_vma *rsa_data; 110 116 111 117 u32 rsa_size; 112 118 u32 ucode_size; 113 119 u32 private_data_size; 114 120 115 - bool loaded_via_gsc; 121 + u32 dma_start_offset; 122 + 123 + bool has_gsc_headers; 116 124 }; 117 125 118 126 /* ··· 290 282 } 291 283 292 284 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, 293 - enum intel_uc_fw_type type); 285 + enum intel_uc_fw_type type, 286 + bool needs_ggtt_mapping); 294 287 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw); 295 288 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); 296 289 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags); 297 290 int intel_uc_fw_init(struct intel_uc_fw *uc_fw); 298 291 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw); 292 + void intel_uc_fw_resume_mapping(struct intel_uc_fw *uc_fw); 299 293 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len); 300 294 int intel_uc_fw_mark_load_failed(struct intel_uc_fw *uc_fw, int err); 301 295 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
-6
drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
··· 84 84 } __packed; 85 85 static_assert(sizeof(struct uc_css_header) == 128); 86 86 87 - #define HUC_GSC_VERSION_HI_DW 44 88 - #define HUC_GSC_MAJOR_VER_HI_MASK (0xFF << 0) 89 - #define HUC_GSC_MINOR_VER_HI_MASK (0xFF << 16) 90 - #define HUC_GSC_VERSION_LO_DW 45 91 - #define HUC_GSC_PATCH_VER_LO_MASK (0xFF << 0) 92 - 93 87 #endif /* _INTEL_UC_FW_ABI_H */
+2 -4
drivers/gpu/drm/i915/i915_driver.c
··· 256 256 if (ret < 0) 257 257 goto err_rootgt; 258 258 259 - i915_drm_clients_init(&dev_priv->clients, dev_priv); 260 - 261 259 i915_gem_init_early(dev_priv); 262 260 263 261 /* This must be called before any calls to HAS_PCH_* */ ··· 289 291 intel_power_domains_cleanup(dev_priv); 290 292 i915_gem_cleanup_early(dev_priv); 291 293 intel_gt_driver_late_release_all(dev_priv); 292 - i915_drm_clients_fini(&dev_priv->clients); 293 294 intel_region_ttm_device_fini(dev_priv); 294 295 vlv_suspend_cleanup(dev_priv); 295 296 i915_workqueues_cleanup(dev_priv); ··· 1716 1719 .compat_ioctl = i915_ioc32_compat_ioctl, 1717 1720 .llseek = noop_llseek, 1718 1721 #ifdef CONFIG_PROC_FS 1719 - .show_fdinfo = i915_drm_client_fdinfo, 1722 + .show_fdinfo = drm_show_fdinfo, 1720 1723 #endif 1721 1724 }; 1722 1725 ··· 1816 1819 .open = i915_driver_open, 1817 1820 .lastclose = i915_driver_lastclose, 1818 1821 .postclose = i915_driver_postclose, 1822 + .show_fdinfo = i915_drm_client_fdinfo, 1819 1823 1820 1824 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 1821 1825 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+10 -55
drivers/gpu/drm/i915/i915_drm_client.c
··· 17 17 #include "i915_gem.h" 18 18 #include "i915_utils.h" 19 19 20 - void i915_drm_clients_init(struct i915_drm_clients *clients, 21 - struct drm_i915_private *i915) 22 - { 23 - clients->i915 = i915; 24 - clients->next_id = 0; 25 - 26 - xa_init_flags(&clients->xarray, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); 27 - } 28 - 29 - struct i915_drm_client *i915_drm_client_add(struct i915_drm_clients *clients) 20 + struct i915_drm_client *i915_drm_client_alloc(void) 30 21 { 31 22 struct i915_drm_client *client; 32 - struct xarray *xa = &clients->xarray; 33 - int ret; 34 23 35 24 client = kzalloc(sizeof(*client), GFP_KERNEL); 36 25 if (!client) 37 - return ERR_PTR(-ENOMEM); 38 - 39 - xa_lock_irq(xa); 40 - ret = __xa_alloc_cyclic(xa, &client->id, client, xa_limit_32b, 41 - &clients->next_id, GFP_KERNEL); 42 - xa_unlock_irq(xa); 43 - if (ret < 0) 44 - goto err; 26 + return NULL; 45 27 46 28 kref_init(&client->kref); 47 29 spin_lock_init(&client->ctx_lock); 48 30 INIT_LIST_HEAD(&client->ctx_list); 49 - client->clients = clients; 50 31 51 32 return client; 52 - 53 - err: 54 - kfree(client); 55 - 56 - return ERR_PTR(ret); 57 33 } 58 34 59 35 void __i915_drm_client_free(struct kref *kref) 60 36 { 61 37 struct i915_drm_client *client = 62 38 container_of(kref, typeof(*client), kref); 63 - struct xarray *xa = &client->clients->xarray; 64 - unsigned long flags; 65 39 66 - xa_lock_irqsave(xa, flags); 67 - __xa_erase(xa, client->id); 68 - xa_unlock_irqrestore(xa, flags); 69 40 kfree(client); 70 - } 71 - 72 - void i915_drm_clients_fini(struct i915_drm_clients *clients) 73 - { 74 - GEM_BUG_ON(!xa_empty(&clients->xarray)); 75 - xa_destroy(&clients->xarray); 76 41 } 77 42 78 43 #ifdef CONFIG_PROC_FS ··· 66 101 } 67 102 68 103 static void 69 - show_client_class(struct seq_file *m, 104 + show_client_class(struct drm_printer *p, 105 + struct drm_i915_private *i915, 70 106 struct i915_drm_client *client, 71 107 unsigned int class) 72 108 { 73 - const struct list_head *list = &client->ctx_list; 109 + const unsigned int capacity = i915->engine_uabi_class_count[class]; 74 110 u64 total = atomic64_read(&client->past_runtime[class]); 75 - const unsigned int capacity = 76 - client->clients->i915->engine_uabi_class_count[class]; 77 111 struct i915_gem_context *ctx; 78 112 79 113 rcu_read_lock(); 80 - list_for_each_entry_rcu(ctx, list, client_link) 114 + list_for_each_entry_rcu(ctx, &client->ctx_list, client_link) 81 115 total += busy_add(ctx, class); 82 116 rcu_read_unlock(); 83 117 84 118 if (capacity) 85 - seq_printf(m, "drm-engine-%s:\t%llu ns\n", 119 + drm_printf(p, "drm-engine-%s:\t%llu ns\n", 86 120 uabi_class_names[class], total); 87 121 88 122 if (capacity > 1) 89 - seq_printf(m, "drm-engine-capacity-%s:\t%u\n", 123 + drm_printf(p, "drm-engine-capacity-%s:\t%u\n", 90 124 uabi_class_names[class], 91 125 capacity); 92 126 } 93 127 94 - void i915_drm_client_fdinfo(struct seq_file *m, struct file *f) 128 + void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file) 95 129 { 96 - struct drm_file *file = f->private_data; 97 130 struct drm_i915_file_private *file_priv = file->driver_priv; 98 131 struct drm_i915_private *i915 = file_priv->i915; 99 - struct i915_drm_client *client = file_priv->client; 100 - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 101 132 unsigned int i; 102 133 103 134 /* ··· 102 141 * ****************************************************************** 103 142 */ 104 143 105 - seq_printf(m, "drm-driver:\t%s\n", i915->drm.driver->name); 106 - seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n", 107 - pci_domain_nr(pdev->bus), pdev->bus->number, 108 - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 109 - seq_printf(m, "drm-client-id:\t%u\n", client->id); 110 - 111 144 if (GRAPHICS_VER(i915) < 8) 112 145 return; 113 146 114 147 for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++) 115 - show_client_class(m, client, i); 148 + show_client_class(p, i915, file_priv->client, i); 116 149 } 117 150 #endif
+4 -18
drivers/gpu/drm/i915/i915_drm_client.h
··· 9 9 #include <linux/kref.h> 10 10 #include <linux/list.h> 11 11 #include <linux/spinlock.h> 12 - #include <linux/xarray.h> 13 12 14 13 #include <uapi/drm/i915_drm.h> 15 14 16 15 #define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE 17 16 18 - struct drm_i915_private; 19 - 20 - struct i915_drm_clients { 21 - struct drm_i915_private *i915; 22 - 23 - struct xarray xarray; 24 - u32 next_id; 25 - }; 17 + struct drm_file; 18 + struct drm_printer; 26 19 27 20 struct i915_drm_client { 28 21 struct kref kref; ··· 25 32 spinlock_t ctx_lock; /* For add/remove from ctx_list. */ 26 33 struct list_head ctx_list; /* List of contexts belonging to client. */ 27 34 28 - struct i915_drm_clients *clients; 29 - 30 35 /** 31 36 * @past_runtime: Accumulation of pphwsp runtimes from closed contexts. 32 37 */ 33 38 atomic64_t past_runtime[I915_LAST_UABI_ENGINE_CLASS + 1]; 34 39 }; 35 - 36 - void i915_drm_clients_init(struct i915_drm_clients *clients, 37 - struct drm_i915_private *i915); 38 40 39 41 static inline struct i915_drm_client * 40 42 i915_drm_client_get(struct i915_drm_client *client) ··· 45 57 kref_put(&client->kref, __i915_drm_client_free); 46 58 } 47 59 48 - struct i915_drm_client *i915_drm_client_add(struct i915_drm_clients *clients); 60 + struct i915_drm_client *i915_drm_client_alloc(void); 49 61 50 62 #ifdef CONFIG_PROC_FS 51 - void i915_drm_client_fdinfo(struct seq_file *m, struct file *f); 63 + void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file); 52 64 #endif 53 - 54 - void i915_drm_clients_fini(struct i915_drm_clients *clients); 55 65 56 66 #endif /* !__I915_DRM_CLIENT_H__ */
+1 -3
drivers/gpu/drm/i915/i915_drv.h
··· 324 324 /* 325 325 * i915->gt[0] == &i915->gt0 326 326 */ 327 - #define I915_MAX_GT 4 327 + #define I915_MAX_GT 2 328 328 struct intel_gt *gt[I915_MAX_GT]; 329 329 330 330 struct kobject *sysfs_gt; ··· 357 357 bool irq_enabled; 358 358 359 359 struct i915_pmu pmu; 360 - 361 - struct i915_drm_clients clients; 362 360 363 361 /* The TTM device structure. */ 364 362 struct ttm_device bdev;
+2 -4
drivers/gpu/drm/i915/i915_gem.c
··· 1325 1325 if (!file_priv) 1326 1326 goto err_alloc; 1327 1327 1328 - client = i915_drm_client_add(&i915->clients); 1329 - if (IS_ERR(client)) { 1330 - ret = PTR_ERR(client); 1328 + client = i915_drm_client_alloc(); 1329 + if (!client) 1331 1330 goto err_client; 1332 - } 1333 1331 1334 1332 file->driver_priv = file_priv; 1335 1333 file_priv->i915 = i915;
+5 -1
drivers/gpu/drm/i915/i915_getparam.c
··· 100 100 value = sseu->min_eu_in_pool; 101 101 break; 102 102 case I915_PARAM_HUC_STATUS: 103 - value = intel_huc_check_status(&to_gt(i915)->uc.huc); 103 + /* On platform with a media GT, the HuC is on that GT */ 104 + if (i915->media_gt) 105 + value = intel_huc_check_status(&i915->media_gt->uc.huc); 106 + else 107 + value = intel_huc_check_status(&to_gt(i915)->uc.huc); 104 108 if (value < 0) 105 109 return value; 106 110 break;
+48 -63
drivers/gpu/drm/i915/i915_perf.c
··· 531 531 * (See description of OA_TAIL_MARGIN_NSEC above for further details.) 532 532 * 533 533 * Besides returning true when there is data available to read() this function 534 - * also updates the tail, aging_tail and aging_timestamp in the oa_buffer 535 - * object. 534 + * also updates the tail in the oa_buffer object. 536 535 * 537 536 * Note: It's safe to read OA config state here unlocked, assuming that this is 538 537 * only called while the stream is enabled, while the global OA configuration ··· 543 544 { 544 545 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 545 546 int report_size = stream->oa_buffer.format->size; 547 + u32 head, tail, read_tail; 546 548 unsigned long flags; 547 549 bool pollin; 548 550 u32 hw_tail; 549 - u64 now; 550 551 u32 partial_report_size; 551 552 552 553 /* We have to consider the (unlikely) possibility that read() errors ··· 565 566 partial_report_size %= report_size; 566 567 567 568 /* Subtract partial amount off the tail */ 568 - hw_tail = gtt_offset + OA_TAKEN(hw_tail, partial_report_size); 569 + hw_tail = OA_TAKEN(hw_tail, partial_report_size); 569 570 570 - now = ktime_get_mono_fast_ns(); 571 + /* NB: The head we observe here might effectively be a little 572 + * out of date. If a read() is in progress, the head could be 573 + * anywhere between this head and stream->oa_buffer.tail. 574 + */ 575 + head = stream->oa_buffer.head - gtt_offset; 576 + read_tail = stream->oa_buffer.tail - gtt_offset; 571 577 572 - if (hw_tail == stream->oa_buffer.aging_tail && 573 - (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) { 574 - /* If the HW tail hasn't move since the last check and the HW 575 - * tail has been aging for long enough, declare it the new 576 - * tail. 577 - */ 578 - stream->oa_buffer.tail = stream->oa_buffer.aging_tail; 579 - } else { 580 - u32 head, tail, aged_tail; 578 + tail = hw_tail; 581 579 582 - /* NB: The head we observe here might effectively be a little 583 - * out of date. If a read() is in progress, the head could be 584 - * anywhere between this head and stream->oa_buffer.tail. 585 - */ 586 - head = stream->oa_buffer.head - gtt_offset; 587 - aged_tail = stream->oa_buffer.tail - gtt_offset; 580 + /* Walk the stream backward until we find a report with report 581 + * id and timestmap not at 0. Since the circular buffer pointers 582 + * progress by increments of 64 bytes and that reports can be up 583 + * to 256 bytes long, we can't tell whether a report has fully 584 + * landed in memory before the report id and timestamp of the 585 + * following report have effectively landed. 586 + * 587 + * This is assuming that the writes of the OA unit land in 588 + * memory in the order they were written to. 589 + * If not : (╯°□°)╯︵ ┻━┻ 590 + */ 591 + while (OA_TAKEN(tail, read_tail) >= report_size) { 592 + void *report = stream->oa_buffer.vaddr + tail; 588 593 589 - hw_tail -= gtt_offset; 590 - tail = hw_tail; 594 + if (oa_report_id(stream, report) || 595 + oa_timestamp(stream, report)) 596 + break; 591 597 592 - /* Walk the stream backward until we find a report with report 593 - * id and timestmap not at 0. Since the circular buffer pointers 594 - * progress by increments of 64 bytes and that reports can be up 595 - * to 256 bytes long, we can't tell whether a report has fully 596 - * landed in memory before the report id and timestamp of the 597 - * following report have effectively landed. 598 - * 599 - * This is assuming that the writes of the OA unit land in 600 - * memory in the order they were written to. 601 - * If not : (╯°□°)╯︵ ┻━┻ 602 - */ 603 - while (OA_TAKEN(tail, aged_tail) >= report_size) { 604 - void *report = stream->oa_buffer.vaddr + tail; 605 - 606 - if (oa_report_id(stream, report) || 607 - oa_timestamp(stream, report)) 608 - break; 609 - 610 - tail = (tail - report_size) & (OA_BUFFER_SIZE - 1); 611 - } 612 - 613 - if (OA_TAKEN(hw_tail, tail) > report_size && 614 - __ratelimit(&stream->perf->tail_pointer_race)) 615 - drm_notice(&stream->uncore->i915->drm, 616 - "unlanded report(s) head=0x%x tail=0x%x hw_tail=0x%x\n", 617 - head, tail, hw_tail); 618 - 619 - stream->oa_buffer.tail = gtt_offset + tail; 620 - stream->oa_buffer.aging_tail = gtt_offset + hw_tail; 621 - stream->oa_buffer.aging_timestamp = now; 598 + tail = (tail - report_size) & (OA_BUFFER_SIZE - 1); 622 599 } 623 600 624 - pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset, 625 - stream->oa_buffer.head - gtt_offset) >= report_size; 601 + if (OA_TAKEN(hw_tail, tail) > report_size && 602 + __ratelimit(&stream->perf->tail_pointer_race)) 603 + drm_notice(&stream->uncore->i915->drm, 604 + "unlanded report(s) head=0x%x tail=0x%x hw_tail=0x%x\n", 605 + head, tail, hw_tail); 606 + 607 + stream->oa_buffer.tail = gtt_offset + tail; 608 + 609 + pollin = OA_TAKEN(stream->oa_buffer.tail, 610 + stream->oa_buffer.head) >= report_size; 626 611 627 612 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 628 613 ··· 860 877 stream->oa_buffer.last_ctx_id = ctx_id; 861 878 } 862 879 863 - /* 864 - * Clear out the report id and timestamp as a means to detect unlanded 865 - * reports. 866 - */ 867 - oa_report_id_clear(stream, report32); 868 - oa_timestamp_clear(stream, report32); 880 + if (is_power_of_2(report_size)) { 881 + /* 882 + * Clear out the report id and timestamp as a means 883 + * to detect unlanded reports. 884 + */ 885 + oa_report_id_clear(stream, report32); 886 + oa_timestamp_clear(stream, report32); 887 + } else { 888 + /* Zero out the entire report */ 889 + memset(report32, 0, report_size); 890 + } 869 891 } 870 892 871 893 if (start_offset != *offset) { ··· 1710 1722 gtt_offset | OABUFFER_SIZE_16M); 1711 1723 1712 1724 /* Mark that we need updated tail pointers to read from... */ 1713 - stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1714 1725 stream->oa_buffer.tail = gtt_offset; 1715 1726 1716 1727 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); ··· 1761 1774 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); 1762 1775 1763 1776 /* Mark that we need updated tail pointers to read from... */ 1764 - stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1765 1777 stream->oa_buffer.tail = gtt_offset; 1766 1778 1767 1779 /* ··· 1814 1828 gtt_offset & GEN12_OAG_OATAILPTR_MASK); 1815 1829 1816 1830 /* Mark that we need updated tail pointers to read from... */ 1817 - stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1818 1831 stream->oa_buffer.tail = gtt_offset; 1819 1832 1820 1833 /*
-12
drivers/gpu/drm/i915/i915_perf_types.h
··· 313 313 spinlock_t ptr_lock; 314 314 315 315 /** 316 - * @aging_tail: The last HW tail reported by HW. The data 317 - * might not have made it to memory yet though. 318 - */ 319 - u32 aging_tail; 320 - 321 - /** 322 - * @aging_timestamp: A monotonic timestamp for when the current aging tail pointer 323 - * was read; used to determine when it is old enough to trust. 324 - */ 325 - u64 aging_timestamp; 326 - 327 - /** 328 316 * @head: Although we can always read back the head pointer register, 329 317 * we prefer to avoid trusting the HW state, just to avoid any 330 318 * risk that some hardware condition could * somehow bump the
+9 -25
drivers/gpu/drm/i915/i915_pmu.c
··· 132 132 unsigned int i; 133 133 u32 mask = 0; 134 134 135 - for (i = 0; i < I915_PMU_MAX_GTS; i++) 135 + for (i = 0; i < I915_PMU_MAX_GT; i++) 136 136 mask |= config_mask(__I915_PMU_ACTUAL_FREQUENCY(i)) | 137 137 config_mask(__I915_PMU_REQUESTED_FREQUENCY(i)); 138 138 139 139 return mask; 140 140 } 141 141 142 - static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) 142 + static bool pmu_needs_timer(struct i915_pmu *pmu) 143 143 { 144 144 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 145 145 u32 enable; ··· 158 158 enable &= frequency_enabled_mask() | ENGINE_SAMPLE_MASK; 159 159 160 160 /* 161 - * When the GPU is idle per-engine counters do not need to be 162 - * running so clear those bits out. 163 - */ 164 - if (!gpu_active) 165 - enable &= ~ENGINE_SAMPLE_MASK; 166 - /* 167 161 * Also there is software busyness tracking available we do not 168 162 * need the timer for I915_SAMPLE_BUSY counter. 169 163 */ 170 - else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) 164 + if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) 171 165 enable &= ~BIT(I915_SAMPLE_BUSY); 172 166 173 167 /* ··· 191 197 return ktime_to_ns(ktime_sub(ktime_get_raw(), kt)); 192 198 } 193 199 194 - static unsigned int 195 - __sample_idx(struct i915_pmu *pmu, unsigned int gt_id, int sample) 196 - { 197 - unsigned int idx = gt_id * __I915_NUM_PMU_SAMPLERS + sample; 198 - 199 - GEM_BUG_ON(idx >= ARRAY_SIZE(pmu->sample)); 200 - 201 - return idx; 202 - } 203 - 204 200 static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample) 205 201 { 206 - return pmu->sample[__sample_idx(pmu, gt_id, sample)].cur; 202 + return pmu->sample[gt_id][sample].cur; 207 203 } 208 204 209 205 static void 210 206 store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val) 211 207 { 212 - pmu->sample[__sample_idx(pmu, gt_id, sample)].cur = val; 208 + pmu->sample[gt_id][sample].cur = val; 213 209 } 214 210 215 211 static void 216 212 add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul) 217 213 { 218 - pmu->sample[__sample_idx(pmu, gt_id, sample)].cur += mul_u32_u32(val, mul); 214 + pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul); 219 215 } 220 216 221 217 static u64 get_rc6(struct intel_gt *gt) ··· 279 295 280 296 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) 281 297 { 282 - if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { 298 + if (!pmu->timer_enabled && pmu_needs_timer(pmu)) { 283 299 pmu->timer_enabled = true; 284 300 pmu->timer_last = ktime_get(); 285 301 hrtimer_start_range_ns(&pmu->timer, ··· 305 321 */ 306 322 pmu->unparked &= ~BIT(gt->info.id); 307 323 if (pmu->unparked == 0) 308 - pmu->timer_enabled = pmu_needs_timer(pmu, false); 324 + pmu->timer_enabled = false; 309 325 310 326 spin_unlock_irq(&pmu->lock); 311 327 } ··· 811 827 */ 812 828 if (--pmu->enable_count[bit] == 0) { 813 829 pmu->enable &= ~BIT(bit); 814 - pmu->timer_enabled &= pmu_needs_timer(pmu, true); 830 + pmu->timer_enabled &= pmu_needs_timer(pmu); 815 831 } 816 832 817 833 spin_unlock_irqrestore(&pmu->lock, flags);
+4 -4
drivers/gpu/drm/i915/i915_pmu.h
··· 38 38 __I915_NUM_PMU_SAMPLERS 39 39 }; 40 40 41 - #define I915_PMU_MAX_GTS 2 41 + #define I915_PMU_MAX_GT 2 42 42 43 43 /* 44 44 * How many different events we track in the global PMU mask. ··· 47 47 */ 48 48 #define I915_PMU_MASK_BITS \ 49 49 (I915_ENGINE_SAMPLE_COUNT + \ 50 - I915_PMU_MAX_GTS * __I915_PMU_TRACKED_EVENT_COUNT) 50 + I915_PMU_MAX_GT * __I915_PMU_TRACKED_EVENT_COUNT) 51 51 52 52 #define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1) 53 53 ··· 127 127 * Only global counters are held here, while the per-engine ones are in 128 128 * struct intel_engine_cs. 129 129 */ 130 - struct i915_pmu_sample sample[I915_PMU_MAX_GTS * __I915_NUM_PMU_SAMPLERS]; 130 + struct i915_pmu_sample sample[I915_PMU_MAX_GT][__I915_NUM_PMU_SAMPLERS]; 131 131 /** 132 132 * @sleep_last: Last time GT parked for RC6 estimation. 133 133 */ 134 - ktime_t sleep_last[I915_PMU_MAX_GTS]; 134 + ktime_t sleep_last[I915_PMU_MAX_GT]; 135 135 /** 136 136 * @irq_count: Number of interrupts 137 137 *
+3
drivers/gpu/drm/i915/i915_reg.h
··· 941 941 #define HECI_H_GS1(base) _MMIO((base) + 0xc4c) 942 942 #define HECI_H_GS1_ER_PREP REG_BIT(0) 943 943 944 + #define HECI_FWSTS5(base) _MMIO((base) + 0xc68) 945 + #define HECI_FWSTS5_HUC_AUTH_DONE (1 << 19) 946 + 944 947 #define HSW_GTT_CACHE_EN _MMIO(0x4024) 945 948 #define GTT_CACHE_EN_ALL 0xF0007FFF 946 949 #define GEN7_WR_WATERMARK _MMIO(0x4028)
+14 -3
drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
··· 11 11 12 12 /* PXP-Cmd-Op definitions */ 13 13 #define PXP43_CMDID_START_HUC_AUTH 0x0000003A 14 + #define PXP43_CMDID_NEW_HUC_AUTH 0x0000003F /* MTL+ */ 14 15 #define PXP43_CMDID_INIT_SESSION 0x00000036 15 16 16 17 /* PXP-Packet sizes for MTL's GSCCS-HECI instruction */ 17 18 #define PXP43_MAX_HECI_INOUT_SIZE (SZ_32K) 18 19 19 - /* PXP-Input-Packet: HUC-Authentication */ 20 + /* PXP-Packet size for MTL's NEW_HUC_AUTH instruction */ 21 + #define PXP43_HUC_AUTH_INOUT_SIZE (SZ_4K) 22 + 23 + /* PXP-Input-Packet: HUC Load and Authentication */ 20 24 struct pxp43_start_huc_auth_in { 21 25 struct pxp_cmd_header header; 22 26 __le64 huc_base_address; 23 27 } __packed; 24 28 25 - /* PXP-Output-Packet: HUC-Authentication */ 26 - struct pxp43_start_huc_auth_out { 29 + /* PXP-Input-Packet: HUC Auth-only */ 30 + struct pxp43_new_huc_auth_in { 31 + struct pxp_cmd_header header; 32 + u64 huc_base_address; 33 + u32 huc_size; 34 + } __packed; 35 + 36 + /* PXP-Output-Packet: HUC Load and Authentication or Auth-only */ 37 + struct pxp43_huc_auth_out { 27 38 struct pxp_cmd_header header; 28 39 } __packed; 29 40
+2 -2
drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
··· 143 143 144 144 reply_size = header->message_size - sizeof(*header); 145 145 if (reply_size > msg_out_size_max) { 146 - drm_warn(&i915->drm, "caller with insufficient PXP reply size %u (%ld)\n", 146 + drm_warn(&i915->drm, "caller with insufficient PXP reply size %u (%zu)\n", 147 147 reply_size, msg_out_size_max); 148 148 reply_size = msg_out_size_max; 149 149 } ··· 196 196 * gsc-proxy init flow (the last set of dependencies that 197 197 * are out of order) will suffice. 198 198 */ 199 - if (intel_huc_is_authenticated(&pxp->ctrl_gt->uc.huc) && 199 + if (intel_huc_is_authenticated(&pxp->ctrl_gt->uc.huc, INTEL_HUC_AUTH_BY_GSC) && 200 200 intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc)) 201 201 return true; 202 202
+1 -1
drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
··· 19 19 struct intel_gt *gt; 20 20 struct intel_huc *huc; 21 21 struct pxp43_start_huc_auth_in huc_in = {0}; 22 - struct pxp43_start_huc_auth_out huc_out = {0}; 22 + struct pxp43_huc_auth_out huc_out = {0}; 23 23 dma_addr_t huc_phys_addr; 24 24 u8 client_id = 0; 25 25 u8 fence_id = 0;
+7
drivers/gpu/drm/meson/Kconfig
··· 17 17 default y if DRM_MESON 18 18 select DRM_DW_HDMI 19 19 imply DRM_DW_HDMI_I2S_AUDIO 20 + 21 + config DRM_MESON_DW_MIPI_DSI 22 + tristate "MIPI DSI Synopsys Controller support for Amlogic Meson Display" 23 + depends on DRM_MESON 24 + default y if DRM_MESON 25 + select DRM_DW_MIPI_DSI 26 + select GENERIC_PHY_MIPI_DPHY
+2 -1
drivers/gpu/drm/meson/Makefile
··· 2 2 meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_encoder_cvbs.o 3 3 meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o 4 4 meson-drm-y += meson_rdma.o meson_osd_afbcd.o 5 - meson-drm-y += meson_encoder_hdmi.o 5 + meson-drm-y += meson_encoder_hdmi.o meson_encoder_dsi.o 6 6 7 7 obj-$(CONFIG_DRM_MESON) += meson-drm.o 8 8 obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o 9 + obj-$(CONFIG_DRM_MESON_DW_MIPI_DSI) += meson_dw_mipi_dsi.o
+39 -23
drivers/gpu/drm/meson/meson_drv.c
··· 34 34 #include "meson_registers.h" 35 35 #include "meson_encoder_cvbs.h" 36 36 #include "meson_encoder_hdmi.h" 37 + #include "meson_encoder_dsi.h" 37 38 #include "meson_viu.h" 38 39 #include "meson_vpp.h" 39 40 #include "meson_rdma.h" ··· 317 316 goto exit_afbcd; 318 317 319 318 if (has_components) { 320 - ret = component_bind_all(drm->dev, drm); 319 + ret = component_bind_all(dev, drm); 321 320 if (ret) { 322 321 dev_err(drm->dev, "Couldn't bind all components\n"); 322 + /* Do not try to unbind */ 323 + has_components = false; 323 324 goto exit_afbcd; 324 325 } 325 326 } 326 327 327 328 ret = meson_encoder_hdmi_init(priv); 328 329 if (ret) 329 - goto unbind_all; 330 + goto exit_afbcd; 331 + 332 + if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { 333 + ret = meson_encoder_dsi_init(priv); 334 + if (ret) 335 + goto exit_afbcd; 336 + } 330 337 331 338 ret = meson_plane_create(priv); 332 339 if (ret) 333 - goto unbind_all; 340 + goto exit_afbcd; 334 341 335 342 ret = meson_overlay_create(priv); 336 343 if (ret) 337 - goto unbind_all; 344 + goto exit_afbcd; 338 345 339 346 ret = meson_crtc_create(priv); 340 347 if (ret) 341 - goto unbind_all; 348 + goto exit_afbcd; 342 349 343 350 ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm); 344 351 if (ret) 345 - goto unbind_all; 352 + goto exit_afbcd; 346 353 347 354 drm_mode_config_reset(drm); 348 355 ··· 368 359 369 360 uninstall_irq: 370 361 free_irq(priv->vsync_irq, drm); 371 - unbind_all: 372 - if (has_components) 373 - component_unbind_all(drm->dev, drm); 374 362 exit_afbcd: 375 363 if (priv->afbcd.ops) 376 364 priv->afbcd.ops->exit(priv); 377 365 free_drm: 378 366 drm_dev_put(drm); 367 + 368 + meson_encoder_dsi_remove(priv); 369 + meson_encoder_hdmi_remove(priv); 370 + meson_encoder_cvbs_remove(priv); 371 + 372 + if (has_components) 373 + component_unbind_all(dev, drm); 379 374 380 375 return ret; 381 376 } ··· 407 394 free_irq(priv->vsync_irq, drm); 408 395 drm_dev_put(drm); 409 396 397 + meson_encoder_dsi_remove(priv); 410 398 meson_encoder_hdmi_remove(priv); 411 399 meson_encoder_cvbs_remove(priv); 412 400 ··· 460 446 drm_atomic_helper_shutdown(priv->drm); 461 447 } 462 448 463 - /* Possible connectors nodes to ignore */ 464 - static const struct of_device_id connectors_match[] = { 465 - { .compatible = "composite-video-connector" }, 466 - { .compatible = "svideo-connector" }, 449 + /* 450 + * Only devices to use as components 451 + * TOFIX: get rid of components when we can finally 452 + * get meson_dx_hdmi to stop using the meson_drm 453 + * private structure for HHI registers. 454 + */ 455 + static const struct of_device_id components_dev_match[] = { 456 + { .compatible = "amlogic,meson-gxbb-dw-hdmi" }, 457 + { .compatible = "amlogic,meson-gxl-dw-hdmi" }, 458 + { .compatible = "amlogic,meson-gxm-dw-hdmi" }, 459 + { .compatible = "amlogic,meson-g12a-dw-hdmi" }, 467 460 {} 468 461 }; 469 462 ··· 488 467 continue; 489 468 } 490 469 491 - /* If an analog connector is detected, count it as an output */ 492 - if (of_match_node(connectors_match, remote)) { 493 - ++count; 494 - of_node_put(remote); 495 - continue; 470 + if (of_match_node(components_dev_match, remote)) { 471 + component_match_add(&pdev->dev, &match, component_compare_of, remote); 472 + 473 + dev_dbg(&pdev->dev, "parent %pOF remote match add %pOF parent %s\n", 474 + np, remote, dev_name(&pdev->dev)); 496 475 } 497 - 498 - dev_dbg(&pdev->dev, "parent %pOF remote match add %pOF parent %s\n", 499 - np, remote, dev_name(&pdev->dev)); 500 - 501 - component_match_add(&pdev->dev, &match, component_compare_of, remote); 502 476 503 477 of_node_put(remote); 504 478
+1
drivers/gpu/drm/meson/meson_drv.h
··· 28 28 enum { 29 29 MESON_ENC_CVBS = 0, 30 30 MESON_ENC_HDMI, 31 + MESON_ENC_DSI, 31 32 MESON_ENC_LAST, 32 33 }; 33 34
+352
drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* 3 + * Copyright (C) 2021 BayLibre, SAS 4 + * Author: Neil Armstrong <narmstrong@baylibre.com> 5 + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. 6 + */ 7 + 8 + #include <linux/clk.h> 9 + #include <linux/kernel.h> 10 + #include <linux/module.h> 11 + #include <linux/of_device.h> 12 + #include <linux/of_graph.h> 13 + #include <linux/reset.h> 14 + #include <linux/phy/phy.h> 15 + #include <linux/bitfield.h> 16 + 17 + #include <video/mipi_display.h> 18 + 19 + #include <drm/bridge/dw_mipi_dsi.h> 20 + #include <drm/drm_mipi_dsi.h> 21 + 22 + #include <drm/drm_atomic_helper.h> 23 + #include <drm/drm_device.h> 24 + #include <drm/drm_probe_helper.h> 25 + #include <drm/drm_print.h> 26 + 27 + #include "meson_drv.h" 28 + #include "meson_dw_mipi_dsi.h" 29 + #include "meson_registers.h" 30 + #include "meson_venc.h" 31 + 32 + #define DRIVER_NAME "meson-dw-mipi-dsi" 33 + #define DRIVER_DESC "Amlogic Meson MIPI-DSI DRM driver" 34 + 35 + struct meson_dw_mipi_dsi { 36 + struct meson_drm *priv; 37 + struct device *dev; 38 + void __iomem *base; 39 + struct phy *phy; 40 + union phy_configure_opts phy_opts; 41 + struct dw_mipi_dsi *dmd; 42 + struct dw_mipi_dsi_plat_data pdata; 43 + struct mipi_dsi_device *dsi_device; 44 + const struct drm_display_mode *mode; 45 + struct clk *bit_clk; 46 + struct clk *px_clk; 47 + struct reset_control *top_rst; 48 + }; 49 + 50 + #define encoder_to_meson_dw_mipi_dsi(x) \ 51 + container_of(x, struct meson_dw_mipi_dsi, encoder) 52 + 53 + static void meson_dw_mipi_dsi_hw_init(struct meson_dw_mipi_dsi *mipi_dsi) 54 + { 55 + /* Software reset */ 56 + writel_bits_relaxed(MIPI_DSI_TOP_SW_RESET_DWC | MIPI_DSI_TOP_SW_RESET_INTR | 57 + MIPI_DSI_TOP_SW_RESET_DPI | MIPI_DSI_TOP_SW_RESET_TIMING, 58 + MIPI_DSI_TOP_SW_RESET_DWC | MIPI_DSI_TOP_SW_RESET_INTR | 59 + MIPI_DSI_TOP_SW_RESET_DPI | MIPI_DSI_TOP_SW_RESET_TIMING, 60 + mipi_dsi->base + MIPI_DSI_TOP_SW_RESET); 61 + writel_bits_relaxed(MIPI_DSI_TOP_SW_RESET_DWC | MIPI_DSI_TOP_SW_RESET_INTR | 62 + MIPI_DSI_TOP_SW_RESET_DPI | MIPI_DSI_TOP_SW_RESET_TIMING, 63 + 0, mipi_dsi->base + MIPI_DSI_TOP_SW_RESET); 64 + 65 + /* Enable clocks */ 66 + writel_bits_relaxed(MIPI_DSI_TOP_CLK_SYSCLK_EN | MIPI_DSI_TOP_CLK_PIXCLK_EN, 67 + MIPI_DSI_TOP_CLK_SYSCLK_EN | MIPI_DSI_TOP_CLK_PIXCLK_EN, 68 + mipi_dsi->base + MIPI_DSI_TOP_CLK_CNTL); 69 + 70 + /* Take memory out of power down */ 71 + writel_relaxed(0, mipi_dsi->base + MIPI_DSI_TOP_MEM_PD); 72 + } 73 + 74 + static int dw_mipi_dsi_phy_init(void *priv_data) 75 + { 76 + struct meson_dw_mipi_dsi *mipi_dsi = priv_data; 77 + unsigned int dpi_data_format, venc_data_width; 78 + int ret; 79 + 80 + /* Set the bit clock rate to hs_clk_rate */ 81 + ret = clk_set_rate(mipi_dsi->bit_clk, 82 + mipi_dsi->phy_opts.mipi_dphy.hs_clk_rate); 83 + if (ret) { 84 + dev_err(mipi_dsi->dev, "Failed to set DSI Bit clock rate %lu (ret %d)\n", 85 + mipi_dsi->phy_opts.mipi_dphy.hs_clk_rate, ret); 86 + return ret; 87 + } 88 + 89 + /* Make sure the rate of the bit clock is not modified by someone else */ 90 + ret = clk_rate_exclusive_get(mipi_dsi->bit_clk); 91 + if (ret) { 92 + dev_err(mipi_dsi->dev, 93 + "Failed to set the exclusivity on the bit clock rate (ret %d)\n", ret); 94 + return ret; 95 + } 96 + 97 + ret = clk_set_rate(mipi_dsi->px_clk, mipi_dsi->mode->clock * 1000); 98 + 99 + if (ret) { 100 + dev_err(mipi_dsi->dev, "Failed to set DSI Pixel clock rate %u (%d)\n", 101 + mipi_dsi->mode->clock * 1000, ret); 102 + return ret; 103 + } 104 + 105 + switch (mipi_dsi->dsi_device->format) { 106 + case MIPI_DSI_FMT_RGB888: 107 + dpi_data_format = DPI_COLOR_24BIT; 108 + venc_data_width = VENC_IN_COLOR_24B; 109 + break; 110 + case MIPI_DSI_FMT_RGB666: 111 + dpi_data_format = DPI_COLOR_18BIT_CFG_2; 112 + venc_data_width = VENC_IN_COLOR_18B; 113 + break; 114 + case MIPI_DSI_FMT_RGB666_PACKED: 115 + case MIPI_DSI_FMT_RGB565: 116 + return -EINVAL; 117 + } 118 + 119 + /* Configure color format for DPI register */ 120 + writel_relaxed(FIELD_PREP(MIPI_DSI_TOP_DPI_COLOR_MODE, dpi_data_format) | 121 + FIELD_PREP(MIPI_DSI_TOP_IN_COLOR_MODE, venc_data_width) | 122 + FIELD_PREP(MIPI_DSI_TOP_COMP2_SEL, 2) | 123 + FIELD_PREP(MIPI_DSI_TOP_COMP1_SEL, 1) | 124 + FIELD_PREP(MIPI_DSI_TOP_COMP0_SEL, 0), 125 + mipi_dsi->base + MIPI_DSI_TOP_CNTL); 126 + 127 + return phy_configure(mipi_dsi->phy, &mipi_dsi->phy_opts); 128 + } 129 + 130 + static void dw_mipi_dsi_phy_power_on(void *priv_data) 131 + { 132 + struct meson_dw_mipi_dsi *mipi_dsi = priv_data; 133 + 134 + if (phy_power_on(mipi_dsi->phy)) 135 + dev_warn(mipi_dsi->dev, "Failed to power on PHY\n"); 136 + } 137 + 138 + static void dw_mipi_dsi_phy_power_off(void *priv_data) 139 + { 140 + struct meson_dw_mipi_dsi *mipi_dsi = priv_data; 141 + 142 + if (phy_power_off(mipi_dsi->phy)) 143 + dev_warn(mipi_dsi->dev, "Failed to power off PHY\n"); 144 + 145 + /* Remove the exclusivity on the bit clock rate */ 146 + clk_rate_exclusive_put(mipi_dsi->bit_clk); 147 + } 148 + 149 + static int 150 + dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode, 151 + unsigned long mode_flags, u32 lanes, u32 format, 152 + unsigned int *lane_mbps) 153 + { 154 + struct meson_dw_mipi_dsi *mipi_dsi = priv_data; 155 + int bpp; 156 + 157 + mipi_dsi->mode = mode; 158 + 159 + bpp = mipi_dsi_pixel_format_to_bpp(mipi_dsi->dsi_device->format); 160 + 161 + phy_mipi_dphy_get_default_config(mode->clock * 1000, 162 + bpp, mipi_dsi->dsi_device->lanes, 163 + &mipi_dsi->phy_opts.mipi_dphy); 164 + 165 + *lane_mbps = DIV_ROUND_UP(mipi_dsi->phy_opts.mipi_dphy.hs_clk_rate, USEC_PER_SEC); 166 + 167 + return 0; 168 + } 169 + 170 + static int 171 + dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps, 172 + struct dw_mipi_dsi_dphy_timing *timing) 173 + { 174 + struct meson_dw_mipi_dsi *mipi_dsi = priv_data; 175 + 176 + switch (mipi_dsi->mode->hdisplay) { 177 + case 240: 178 + case 768: 179 + case 1920: 180 + case 2560: 181 + timing->clk_lp2hs = 23; 182 + timing->clk_hs2lp = 38; 183 + timing->data_lp2hs = 15; 184 + timing->data_hs2lp = 9; 185 + break; 186 + 187 + default: 188 + timing->clk_lp2hs = 37; 189 + timing->clk_hs2lp = 135; 190 + timing->data_lp2hs = 50; 191 + timing->data_hs2lp = 3; 192 + } 193 + 194 + return 0; 195 + } 196 + 197 + static int 198 + dw_mipi_dsi_get_esc_clk_rate(void *priv_data, unsigned int *esc_clk_rate) 199 + { 200 + *esc_clk_rate = 4; /* Mhz */ 201 + 202 + return 0; 203 + } 204 + 205 + static const struct dw_mipi_dsi_phy_ops meson_dw_mipi_dsi_phy_ops = { 206 + .init = dw_mipi_dsi_phy_init, 207 + .power_on = dw_mipi_dsi_phy_power_on, 208 + .power_off = dw_mipi_dsi_phy_power_off, 209 + .get_lane_mbps = dw_mipi_dsi_get_lane_mbps, 210 + .get_timing = dw_mipi_dsi_phy_get_timing, 211 + .get_esc_clk_rate = dw_mipi_dsi_get_esc_clk_rate, 212 + }; 213 + 214 + static int meson_dw_mipi_dsi_host_attach(void *priv_data, 215 + struct mipi_dsi_device *device) 216 + { 217 + struct meson_dw_mipi_dsi *mipi_dsi = priv_data; 218 + int ret; 219 + 220 + mipi_dsi->dsi_device = device; 221 + 222 + switch (device->format) { 223 + case MIPI_DSI_FMT_RGB888: 224 + break; 225 + case MIPI_DSI_FMT_RGB666: 226 + break; 227 + case MIPI_DSI_FMT_RGB666_PACKED: 228 + case MIPI_DSI_FMT_RGB565: 229 + dev_err(mipi_dsi->dev, "invalid pixel format %d\n", device->format); 230 + return -EINVAL; 231 + } 232 + 233 + ret = phy_init(mipi_dsi->phy); 234 + if (ret) 235 + return ret; 236 + 237 + meson_dw_mipi_dsi_hw_init(mipi_dsi); 238 + 239 + return 0; 240 + } 241 + 242 + static int meson_dw_mipi_dsi_host_detach(void *priv_data, 243 + struct mipi_dsi_device *device) 244 + { 245 + struct meson_dw_mipi_dsi *mipi_dsi = priv_data; 246 + 247 + if (device == mipi_dsi->dsi_device) 248 + mipi_dsi->dsi_device = NULL; 249 + else 250 + return -EINVAL; 251 + 252 + return phy_exit(mipi_dsi->phy); 253 + } 254 + 255 + static const struct dw_mipi_dsi_host_ops meson_dw_mipi_dsi_host_ops = { 256 + .attach = meson_dw_mipi_dsi_host_attach, 257 + .detach = meson_dw_mipi_dsi_host_detach, 258 + }; 259 + 260 + static int meson_dw_mipi_dsi_probe(struct platform_device *pdev) 261 + { 262 + struct meson_dw_mipi_dsi *mipi_dsi; 263 + struct device *dev = &pdev->dev; 264 + 265 + mipi_dsi = devm_kzalloc(dev, sizeof(*mipi_dsi), GFP_KERNEL); 266 + if (!mipi_dsi) 267 + return -ENOMEM; 268 + 269 + mipi_dsi->base = devm_platform_ioremap_resource(pdev, 0); 270 + if (IS_ERR(mipi_dsi->base)) 271 + return PTR_ERR(mipi_dsi->base); 272 + 273 + mipi_dsi->phy = devm_phy_get(dev, "dphy"); 274 + if (IS_ERR(mipi_dsi->phy)) 275 + return dev_err_probe(dev, PTR_ERR(mipi_dsi->phy), 276 + "failed to get mipi dphy\n"); 277 + 278 + mipi_dsi->bit_clk = devm_clk_get_enabled(dev, "bit"); 279 + if (IS_ERR(mipi_dsi->bit_clk)) { 280 + int ret = PTR_ERR(mipi_dsi->bit_clk); 281 + 282 + /* TOFIX GP0 on some platforms fails to lock in early boot, defer probe */ 283 + if (ret == -EIO) 284 + ret = -EPROBE_DEFER; 285 + 286 + return dev_err_probe(dev, ret, "Unable to get enabled bit_clk\n"); 287 + } 288 + 289 + mipi_dsi->px_clk = devm_clk_get_enabled(dev, "px"); 290 + if (IS_ERR(mipi_dsi->px_clk)) 291 + return dev_err_probe(dev, PTR_ERR(mipi_dsi->px_clk), 292 + "Unable to get enabled px_clk\n"); 293 + 294 + /* 295 + * We use a TOP reset signal because the APB reset signal 296 + * is handled by the TOP control registers. 297 + */ 298 + mipi_dsi->top_rst = devm_reset_control_get_exclusive(dev, "top"); 299 + if (IS_ERR(mipi_dsi->top_rst)) 300 + return dev_err_probe(dev, PTR_ERR(mipi_dsi->top_rst), 301 + "Unable to get reset control\n"); 302 + 303 + reset_control_assert(mipi_dsi->top_rst); 304 + usleep_range(10, 20); 305 + reset_control_deassert(mipi_dsi->top_rst); 306 + 307 + /* MIPI DSI Controller */ 308 + 309 + mipi_dsi->dev = dev; 310 + mipi_dsi->pdata.base = mipi_dsi->base; 311 + mipi_dsi->pdata.max_data_lanes = 4; 312 + mipi_dsi->pdata.phy_ops = &meson_dw_mipi_dsi_phy_ops; 313 + mipi_dsi->pdata.host_ops = &meson_dw_mipi_dsi_host_ops; 314 + mipi_dsi->pdata.priv_data = mipi_dsi; 315 + platform_set_drvdata(pdev, mipi_dsi); 316 + 317 + mipi_dsi->dmd = dw_mipi_dsi_probe(pdev, &mipi_dsi->pdata); 318 + if (IS_ERR(mipi_dsi->dmd)) 319 + return dev_err_probe(dev, PTR_ERR(mipi_dsi->dmd), 320 + "Failed to probe dw_mipi_dsi\n"); 321 + 322 + return 0; 323 + } 324 + 325 + static int meson_dw_mipi_dsi_remove(struct platform_device *pdev) 326 + { 327 + struct meson_dw_mipi_dsi *mipi_dsi = platform_get_drvdata(pdev); 328 + 329 + dw_mipi_dsi_remove(mipi_dsi->dmd); 330 + 331 + return 0; 332 + } 333 + 334 + static const struct of_device_id meson_dw_mipi_dsi_of_table[] = { 335 + { .compatible = "amlogic,meson-g12a-dw-mipi-dsi", }, 336 + { } 337 + }; 338 + MODULE_DEVICE_TABLE(of, meson_dw_mipi_dsi_of_table); 339 + 340 + static struct platform_driver meson_dw_mipi_dsi_platform_driver = { 341 + .probe = meson_dw_mipi_dsi_probe, 342 + .remove = meson_dw_mipi_dsi_remove, 343 + .driver = { 344 + .name = DRIVER_NAME, 345 + .of_match_table = meson_dw_mipi_dsi_of_table, 346 + }, 347 + }; 348 + module_platform_driver(meson_dw_mipi_dsi_platform_driver); 349 + 350 + MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); 351 + MODULE_DESCRIPTION(DRIVER_DESC); 352 + MODULE_LICENSE("GPL");
+160
drivers/gpu/drm/meson/meson_dw_mipi_dsi.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * Copyright (C) 2020 BayLibre, SAS 4 + * Author: Neil Armstrong <narmstrong@baylibre.com> 5 + * Copyright (C) 2018 Amlogic, Inc. All rights reserved. 6 + */ 7 + 8 + #ifndef __MESON_DW_MIPI_DSI_H 9 + #define __MESON_DW_MIPI_DSI_H 10 + 11 + /* Top-level registers */ 12 + /* [31: 4] Reserved. Default 0. 13 + * [3] RW timing_rst_n: Default 1. 14 + * 1=Assert SW reset of timing feature. 0=Release reset. 15 + * [2] RW dpi_rst_n: Default 1. 16 + * 1=Assert SW reset on mipi_dsi_host_dpi block. 0=Release reset. 17 + * [1] RW intr_rst_n: Default 1. 18 + * 1=Assert SW reset on mipi_dsi_host_intr block. 0=Release reset. 19 + * [0] RW dwc_rst_n: Default 1. 20 + * 1=Assert SW reset on IP core. 0=Release reset. 21 + */ 22 + #define MIPI_DSI_TOP_SW_RESET 0x3c0 23 + 24 + #define MIPI_DSI_TOP_SW_RESET_DWC BIT(0) 25 + #define MIPI_DSI_TOP_SW_RESET_INTR BIT(1) 26 + #define MIPI_DSI_TOP_SW_RESET_DPI BIT(2) 27 + #define MIPI_DSI_TOP_SW_RESET_TIMING BIT(3) 28 + 29 + /* [31: 5] Reserved. Default 0. 30 + * [4] RW manual_edpihalt: Default 0. 31 + * 1=Manual suspend VencL; 0=do not suspend VencL. 32 + * [3] RW auto_edpihalt_en: Default 0. 33 + * 1=Enable IP's edpihalt signal to suspend VencL; 34 + * 0=IP's edpihalt signal does not affect VencL. 35 + * [2] RW clock_freerun: Apply to auto-clock gate only. Default 0. 36 + * 0=Default, use auto-clock gating to save power; 37 + * 1=use free-run clock, disable auto-clock gating, for debug mode. 38 + * [1] RW enable_pixclk: A manual clock gate option, due to DWC IP does not 39 + * have auto-clock gating. 1=Enable pixclk. Default 0. 40 + * [0] RW enable_sysclk: A manual clock gate option, due to DWC IP does not 41 + * have auto-clock gating. 1=Enable sysclk. Default 0. 42 + */ 43 + #define MIPI_DSI_TOP_CLK_CNTL 0x3c4 44 + 45 + #define MIPI_DSI_TOP_CLK_SYSCLK_EN BIT(0) 46 + #define MIPI_DSI_TOP_CLK_PIXCLK_EN BIT(1) 47 + 48 + /* [31:24] Reserved. Default 0. 49 + * [23:20] RW dpi_color_mode: Define DPI pixel format. Default 0. 50 + * 0=16-bit RGB565 config 1; 51 + * 1=16-bit RGB565 config 2; 52 + * 2=16-bit RGB565 config 3; 53 + * 3=18-bit RGB666 config 1; 54 + * 4=18-bit RGB666 config 2; 55 + * 5=24-bit RGB888; 56 + * 6=20-bit YCbCr 4:2:2; 57 + * 7=24-bit YCbCr 4:2:2; 58 + * 8=16-bit YCbCr 4:2:2; 59 + * 9=30-bit RGB; 60 + * 10=36-bit RGB; 61 + * 11=12-bit YCbCr 4:2:0. 62 + * [19] Reserved. Default 0. 63 + * [18:16] RW in_color_mode: Define VENC data width. Default 0. 64 + * 0=30-bit pixel; 65 + * 1=24-bit pixel; 66 + * 2=18-bit pixel, RGB666; 67 + * 3=16-bit pixel, RGB565. 68 + * [15:14] RW chroma_subsample: Define method of chroma subsampling. Default 0. 69 + * Applicable to YUV422 or YUV420 only. 70 + * 0=Use even pixel's chroma; 71 + * 1=Use odd pixel's chroma; 72 + * 2=Use averaged value between even and odd pair. 73 + * [13:12] RW comp2_sel: Select which component to be Cr or B: Default 2. 74 + * 0=comp0; 1=comp1; 2=comp2. 75 + * [11:10] RW comp1_sel: Select which component to be Cb or G: Default 1. 76 + * 0=comp0; 1=comp1; 2=comp2. 77 + * [9: 8] RW comp0_sel: Select which component to be Y or R: Default 0. 78 + * 0=comp0; 1=comp1; 2=comp2. 79 + * [7] Reserved. Default 0. 80 + * [6] RW de_pol: Default 0. 81 + * If DE input is active low, set to 1 to invert to active high. 82 + * [5] RW hsync_pol: Default 0. 83 + * If HS input is active low, set to 1 to invert to active high. 84 + * [4] RW vsync_pol: Default 0. 85 + * If VS input is active low, set to 1 to invert to active high. 86 + * [3] RW dpicolorm: Signal to IP. Default 0. 87 + * [2] RW dpishutdn: Signal to IP. Default 0. 88 + * [1] Reserved. Default 0. 89 + * [0] Reserved. Default 0. 90 + */ 91 + #define MIPI_DSI_TOP_CNTL 0x3c8 92 + 93 + /* VENC data width */ 94 + #define VENC_IN_COLOR_30B 0x0 95 + #define VENC_IN_COLOR_24B 0x1 96 + #define VENC_IN_COLOR_18B 0x2 97 + #define VENC_IN_COLOR_16B 0x3 98 + 99 + /* DPI pixel format */ 100 + #define DPI_COLOR_16BIT_CFG_1 0 101 + #define DPI_COLOR_16BIT_CFG_2 1 102 + #define DPI_COLOR_16BIT_CFG_3 2 103 + #define DPI_COLOR_18BIT_CFG_1 3 104 + #define DPI_COLOR_18BIT_CFG_2 4 105 + #define DPI_COLOR_24BIT 5 106 + #define DPI_COLOR_20BIT_YCBCR_422 6 107 + #define DPI_COLOR_24BIT_YCBCR_422 7 108 + #define DPI_COLOR_16BIT_YCBCR_422 8 109 + #define DPI_COLOR_30BIT 9 110 + #define DPI_COLOR_36BIT 10 111 + #define DPI_COLOR_12BIT_YCBCR_420 11 112 + 113 + #define MIPI_DSI_TOP_DPI_COLOR_MODE GENMASK(23, 20) 114 + #define MIPI_DSI_TOP_IN_COLOR_MODE GENMASK(18, 16) 115 + #define MIPI_DSI_TOP_CHROMA_SUBSAMPLE GENMASK(15, 14) 116 + #define MIPI_DSI_TOP_COMP2_SEL GENMASK(13, 12) 117 + #define MIPI_DSI_TOP_COMP1_SEL GENMASK(11, 10) 118 + #define MIPI_DSI_TOP_COMP0_SEL GENMASK(9, 8) 119 + #define MIPI_DSI_TOP_DE_INVERT BIT(6) 120 + #define MIPI_DSI_TOP_HSYNC_INVERT BIT(5) 121 + #define MIPI_DSI_TOP_VSYNC_INVERT BIT(4) 122 + #define MIPI_DSI_TOP_DPICOLORM BIT(3) 123 + #define MIPI_DSI_TOP_DPISHUTDN BIT(2) 124 + 125 + #define MIPI_DSI_TOP_SUSPEND_CNTL 0x3cc 126 + #define MIPI_DSI_TOP_SUSPEND_LINE 0x3d0 127 + #define MIPI_DSI_TOP_SUSPEND_PIX 0x3d4 128 + #define MIPI_DSI_TOP_MEAS_CNTL 0x3d8 129 + /* [0] R stat_edpihalt: edpihalt signal from IP. Default 0. */ 130 + #define MIPI_DSI_TOP_STAT 0x3dc 131 + #define MIPI_DSI_TOP_MEAS_STAT_TE0 0x3e0 132 + #define MIPI_DSI_TOP_MEAS_STAT_TE1 0x3e4 133 + #define MIPI_DSI_TOP_MEAS_STAT_VS0 0x3e8 134 + #define MIPI_DSI_TOP_MEAS_STAT_VS1 0x3ec 135 + /* [31:16] RW intr_stat/clr. Default 0. 136 + * For each bit, read as this interrupt level status, 137 + * write 1 to clear. 138 + * [31:22] Reserved 139 + * [ 21] stat/clr of eof interrupt 140 + * [ 21] vde_fall interrupt 141 + * [ 19] stat/clr of de_rise interrupt 142 + * [ 18] stat/clr of vs_fall interrupt 143 + * [ 17] stat/clr of vs_rise interrupt 144 + * [ 16] stat/clr of dwc_edpite interrupt 145 + * [15: 0] RW intr_enable. Default 0. 146 + * For each bit, 1=enable this interrupt, 0=disable. 147 + * [15: 6] Reserved 148 + * [ 5] eof interrupt 149 + * [ 4] de_fall interrupt 150 + * [ 3] de_rise interrupt 151 + * [ 2] vs_fall interrupt 152 + * [ 1] vs_rise interrupt 153 + * [ 0] dwc_edpite interrupt 154 + */ 155 + #define MIPI_DSI_TOP_INTR_CNTL_STAT 0x3f0 156 + // 31: 2 Reserved. Default 0. 157 + // 1: 0 RW mem_pd. Default 3. 158 + #define MIPI_DSI_TOP_MEM_PD 0x3f4 159 + 160 + #endif /* __MESON_DW_MIPI_DSI_H */
+174
drivers/gpu/drm/meson/meson_encoder_dsi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* 3 + * Copyright (C) 2016 BayLibre, SAS 4 + * Author: Neil Armstrong <narmstrong@baylibre.com> 5 + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. 6 + */ 7 + 8 + #include <linux/kernel.h> 9 + #include <linux/module.h> 10 + #include <linux/of_device.h> 11 + #include <linux/of_graph.h> 12 + 13 + #include <drm/drm_atomic_helper.h> 14 + #include <drm/drm_simple_kms_helper.h> 15 + #include <drm/drm_bridge.h> 16 + #include <drm/drm_bridge_connector.h> 17 + #include <drm/drm_device.h> 18 + #include <drm/drm_probe_helper.h> 19 + 20 + #include "meson_drv.h" 21 + #include "meson_encoder_dsi.h" 22 + #include "meson_registers.h" 23 + #include "meson_venc.h" 24 + #include "meson_vclk.h" 25 + 26 + struct meson_encoder_dsi { 27 + struct drm_encoder encoder; 28 + struct drm_bridge bridge; 29 + struct drm_bridge *next_bridge; 30 + struct meson_drm *priv; 31 + }; 32 + 33 + #define bridge_to_meson_encoder_dsi(x) \ 34 + container_of(x, struct meson_encoder_dsi, bridge) 35 + 36 + static int meson_encoder_dsi_attach(struct drm_bridge *bridge, 37 + enum drm_bridge_attach_flags flags) 38 + { 39 + struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge); 40 + 41 + return drm_bridge_attach(bridge->encoder, encoder_dsi->next_bridge, 42 + &encoder_dsi->bridge, flags); 43 + } 44 + 45 + static void meson_encoder_dsi_atomic_enable(struct drm_bridge *bridge, 46 + struct drm_bridge_state *bridge_state) 47 + { 48 + struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge); 49 + struct drm_atomic_state *state = bridge_state->base.state; 50 + struct meson_drm *priv = encoder_dsi->priv; 51 + struct drm_connector_state *conn_state; 52 + struct drm_crtc_state *crtc_state; 53 + struct drm_connector *connector; 54 + 55 + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); 56 + if (WARN_ON(!connector)) 57 + return; 58 + 59 + conn_state = drm_atomic_get_new_connector_state(state, connector); 60 + if (WARN_ON(!conn_state)) 61 + return; 62 + 63 + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); 64 + if (WARN_ON(!crtc_state)) 65 + return; 66 + 67 + /* ENCL clock setup is handled by CCF */ 68 + 69 + meson_venc_mipi_dsi_mode_set(priv, &crtc_state->adjusted_mode); 70 + meson_encl_load_gamma(priv); 71 + 72 + writel_relaxed(0, priv->io_base + _REG(ENCL_VIDEO_EN)); 73 + 74 + writel_bits_relaxed(ENCL_VIDEO_MODE_ADV_VFIFO_EN, ENCL_VIDEO_MODE_ADV_VFIFO_EN, 75 + priv->io_base + _REG(ENCL_VIDEO_MODE_ADV)); 76 + writel_relaxed(0, priv->io_base + _REG(ENCL_TST_EN)); 77 + 78 + writel_bits_relaxed(BIT(0), 0, priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_EN_CTRL)); 79 + 80 + writel_relaxed(1, priv->io_base + _REG(ENCL_VIDEO_EN)); 81 + } 82 + 83 + static void meson_encoder_dsi_atomic_disable(struct drm_bridge *bridge, 84 + struct drm_bridge_state *bridge_state) 85 + { 86 + struct meson_encoder_dsi *meson_encoder_dsi = 87 + bridge_to_meson_encoder_dsi(bridge); 88 + struct meson_drm *priv = meson_encoder_dsi->priv; 89 + 90 + writel_relaxed(0, priv->io_base + _REG(ENCL_VIDEO_EN)); 91 + 92 + writel_bits_relaxed(BIT(0), BIT(0), priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_EN_CTRL)); 93 + } 94 + 95 + static const struct drm_bridge_funcs meson_encoder_dsi_bridge_funcs = { 96 + .attach = meson_encoder_dsi_attach, 97 + .atomic_enable = meson_encoder_dsi_atomic_enable, 98 + .atomic_disable = meson_encoder_dsi_atomic_disable, 99 + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, 100 + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, 101 + .atomic_reset = drm_atomic_helper_bridge_reset, 102 + }; 103 + 104 + int meson_encoder_dsi_init(struct meson_drm *priv) 105 + { 106 + struct meson_encoder_dsi *meson_encoder_dsi; 107 + struct device_node *remote; 108 + int ret; 109 + 110 + meson_encoder_dsi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_dsi), GFP_KERNEL); 111 + if (!meson_encoder_dsi) 112 + return -ENOMEM; 113 + 114 + /* DSI Transceiver Bridge */ 115 + remote = of_graph_get_remote_node(priv->dev->of_node, 2, 0); 116 + if (!remote) { 117 + dev_err(priv->dev, "DSI transceiver device is disabled"); 118 + return 0; 119 + } 120 + 121 + meson_encoder_dsi->next_bridge = of_drm_find_bridge(remote); 122 + if (!meson_encoder_dsi->next_bridge) { 123 + dev_dbg(priv->dev, "Failed to find DSI transceiver bridge\n"); 124 + return -EPROBE_DEFER; 125 + } 126 + 127 + /* DSI Encoder Bridge */ 128 + meson_encoder_dsi->bridge.funcs = &meson_encoder_dsi_bridge_funcs; 129 + meson_encoder_dsi->bridge.of_node = priv->dev->of_node; 130 + meson_encoder_dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; 131 + 132 + drm_bridge_add(&meson_encoder_dsi->bridge); 133 + 134 + meson_encoder_dsi->priv = priv; 135 + 136 + /* Encoder */ 137 + ret = drm_simple_encoder_init(priv->drm, &meson_encoder_dsi->encoder, 138 + DRM_MODE_ENCODER_DSI); 139 + if (ret) { 140 + dev_err(priv->dev, "Failed to init DSI encoder: %d\n", ret); 141 + return ret; 142 + } 143 + 144 + meson_encoder_dsi->encoder.possible_crtcs = BIT(0); 145 + 146 + /* Attach DSI Encoder Bridge to Encoder */ 147 + ret = drm_bridge_attach(&meson_encoder_dsi->encoder, &meson_encoder_dsi->bridge, NULL, 0); 148 + if (ret) { 149 + dev_err(priv->dev, "Failed to attach bridge: %d\n", ret); 150 + return ret; 151 + } 152 + 153 + /* 154 + * We should have now in place: 155 + * encoder->[dsi encoder bridge]->[dw-mipi-dsi bridge]->[panel bridge]->[panel] 156 + */ 157 + 158 + priv->encoders[MESON_ENC_DSI] = meson_encoder_dsi; 159 + 160 + dev_dbg(priv->dev, "DSI encoder initialized\n"); 161 + 162 + return 0; 163 + } 164 + 165 + void meson_encoder_dsi_remove(struct meson_drm *priv) 166 + { 167 + struct meson_encoder_dsi *meson_encoder_dsi; 168 + 169 + if (priv->encoders[MESON_ENC_DSI]) { 170 + meson_encoder_dsi = priv->encoders[MESON_ENC_DSI]; 171 + drm_bridge_remove(&meson_encoder_dsi->bridge); 172 + drm_bridge_remove(meson_encoder_dsi->next_bridge); 173 + } 174 + }
+13
drivers/gpu/drm/meson/meson_encoder_dsi.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * Copyright (C) 2021 BayLibre, SAS 4 + * Author: Neil Armstrong <narmstrong@baylibre.com> 5 + */ 6 + 7 + #ifndef __MESON_ENCODER_DSI_H 8 + #define __MESON_ENCODER_DSI_H 9 + 10 + int meson_encoder_dsi_init(struct meson_drm *priv); 11 + void meson_encoder_dsi_remove(struct meson_drm *priv); 12 + 13 + #endif /* __MESON_ENCODER_DSI_H */
+25
drivers/gpu/drm/meson/meson_registers.h
··· 812 812 #define VENC_STATA 0x1b6d 813 813 #define VENC_INTCTRL 0x1b6e 814 814 #define VENC_INTCTRL_ENCI_LNRST_INT_EN BIT(1) 815 + #define VENC_INTCTRL_ENCP_LNRST_INT_EN BIT(9) 815 816 #define VENC_INTFLAG 0x1b6f 816 817 #define VENC_VIDEO_TST_EN 0x1b70 817 818 #define VENC_VIDEO_TST_MDSEL 0x1b71 ··· 1193 1192 #define ENCL_VIDEO_PB_OFFST 0x1ca5 1194 1193 #define ENCL_VIDEO_PR_OFFST 0x1ca6 1195 1194 #define ENCL_VIDEO_MODE 0x1ca7 1195 + #define ENCL_PX_LN_CNT_SHADOW_EN BIT(15) 1196 1196 #define ENCL_VIDEO_MODE_ADV 0x1ca8 1197 + #define ENCL_VIDEO_MODE_ADV_VFIFO_EN BIT(3) 1198 + #define ENCL_VIDEO_MODE_ADV_GAIN_HDTV BIT(4) 1199 + #define ENCL_SEL_GAMMA_RGB_IN BIT(10) 1197 1200 #define ENCL_DBG_PX_RST 0x1ca9 1198 1201 #define ENCL_DBG_LN_RST 0x1caa 1199 1202 #define ENCL_DBG_PX_INT 0x1cab ··· 1224 1219 #define ENCL_VIDEO_VOFFST 0x1cc0 1225 1220 #define ENCL_VIDEO_RGB_CTRL 0x1cc1 1226 1221 #define ENCL_VIDEO_FILT_CTRL 0x1cc2 1222 + #define ENCL_VIDEO_FILT_CTRL_BYPASS_FILTER BIT(12) 1227 1223 #define ENCL_VIDEO_OFLD_VPEQ_OFST 0x1cc3 1228 1224 #define ENCL_VIDEO_OFLD_VOAV_OFST 0x1cc4 1229 1225 #define ENCL_VIDEO_MATRIX_CB 0x1cc5 1230 1226 #define ENCL_VIDEO_MATRIX_CR 0x1cc6 1231 1227 #define ENCL_VIDEO_RGBIN_CTRL 0x1cc7 1228 + #define ENCL_VIDEO_RGBIN_RGB BIT(0) 1229 + #define ENCL_VIDEO_RGBIN_ZBLK BIT(1) 1232 1230 #define ENCL_MAX_LINE_SWITCH_POINT 0x1cc8 1233 1231 #define ENCL_DACSEL_0 0x1cc9 1234 1232 #define ENCL_DACSEL_1 0x1cca ··· 1308 1300 #define RDMA_STATUS2 0x1116 1309 1301 #define RDMA_STATUS3 0x1117 1310 1302 #define L_GAMMA_CNTL_PORT 0x1400 1303 + #define L_GAMMA_CNTL_PORT_VCOM_POL BIT(7) /* RW */ 1304 + #define L_GAMMA_CNTL_PORT_RVS_OUT BIT(6) /* RW */ 1305 + #define L_GAMMA_CNTL_PORT_ADR_RDY BIT(5) /* Read Only */ 1306 + #define L_GAMMA_CNTL_PORT_WR_RDY BIT(4) /* Read Only */ 1307 + #define L_GAMMA_CNTL_PORT_RD_RDY BIT(3) /* Read Only */ 1308 + #define L_GAMMA_CNTL_PORT_TR BIT(2) /* RW */ 1309 + #define L_GAMMA_CNTL_PORT_SET BIT(1) /* RW */ 1310 + #define L_GAMMA_CNTL_PORT_EN BIT(0) /* RW */ 1311 1311 #define L_GAMMA_DATA_PORT 0x1401 1312 1312 #define L_GAMMA_ADDR_PORT 0x1402 1313 + #define L_GAMMA_ADDR_PORT_RD BIT(12) 1314 + #define L_GAMMA_ADDR_PORT_AUTO_INC BIT(11) 1315 + #define L_GAMMA_ADDR_PORT_SEL_R BIT(10) 1316 + #define L_GAMMA_ADDR_PORT_SEL_G BIT(9) 1317 + #define L_GAMMA_ADDR_PORT_SEL_B BIT(8) 1318 + #define L_GAMMA_ADDR_PORT_ADDR GENMASK(7, 0) 1313 1319 #define L_GAMMA_VCOM_HSWITCH_ADDR 0x1403 1314 1320 #define L_RGB_BASE_ADDR 0x1405 1315 1321 #define L_RGB_COEFF_ADDR 0x1406 1316 1322 #define L_POL_CNTL_ADDR 0x1407 1317 1323 #define L_DITH_CNTL_ADDR 0x1408 1324 + #define L_DITH_CNTL_DITH10_EN BIT(10) 1318 1325 #define L_GAMMA_PROBE_CTRL 0x1409 1319 1326 #define L_GAMMA_PROBE_COLOR_L 0x140a 1320 1327 #define L_GAMMA_PROBE_COLOR_H 0x140b ··· 1386 1363 #define L_LCD_PWM1_HI_ADDR 0x143f 1387 1364 #define L_INV_CNT_ADDR 0x1440 1388 1365 #define L_TCON_MISC_SEL_ADDR 0x1441 1366 + #define L_TCON_MISC_SEL_STV1 BIT(4) 1367 + #define L_TCON_MISC_SEL_STV2 BIT(5) 1389 1368 #define L_DUAL_PORT_CNTL_ADDR 0x1442 1390 1369 #define MLVDS_CLK_CTL1_HI 0x1443 1391 1370 #define MLVDS_CLK_CTL1_LO 0x1444
+210 -2
drivers/gpu/drm/meson/meson_venc.c
··· 5 5 * Copyright (C) 2015 Amlogic, Inc. All rights reserved. 6 6 */ 7 7 8 + #include <linux/bitfield.h> 8 9 #include <linux/export.h> 10 + #include <linux/iopoll.h> 9 11 10 12 #include <drm/drm_modes.h> 11 13 ··· 1559 1557 } 1560 1558 EXPORT_SYMBOL_GPL(meson_venc_hdmi_mode_set); 1561 1559 1560 + static unsigned short meson_encl_gamma_table[256] = { 1561 + 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 1562 + 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 1563 + 128, 132, 136, 140, 144, 148, 152, 156, 160, 164, 168, 172, 176, 180, 184, 188, 1564 + 192, 196, 200, 204, 208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 248, 252, 1565 + 256, 260, 264, 268, 272, 276, 280, 284, 288, 292, 296, 300, 304, 308, 312, 316, 1566 + 320, 324, 328, 332, 336, 340, 344, 348, 352, 356, 360, 364, 368, 372, 376, 380, 1567 + 384, 388, 392, 396, 400, 404, 408, 412, 416, 420, 424, 428, 432, 436, 440, 444, 1568 + 448, 452, 456, 460, 464, 468, 472, 476, 480, 484, 488, 492, 496, 500, 504, 508, 1569 + 512, 516, 520, 524, 528, 532, 536, 540, 544, 548, 552, 556, 560, 564, 568, 572, 1570 + 576, 580, 584, 588, 592, 596, 600, 604, 608, 612, 616, 620, 624, 628, 632, 636, 1571 + 640, 644, 648, 652, 656, 660, 664, 668, 672, 676, 680, 684, 688, 692, 696, 700, 1572 + 704, 708, 712, 716, 720, 724, 728, 732, 736, 740, 744, 748, 752, 756, 760, 764, 1573 + 768, 772, 776, 780, 784, 788, 792, 796, 800, 804, 808, 812, 816, 820, 824, 828, 1574 + 832, 836, 840, 844, 848, 852, 856, 860, 864, 868, 872, 876, 880, 884, 888, 892, 1575 + 896, 900, 904, 908, 912, 916, 920, 924, 928, 932, 936, 940, 944, 948, 952, 956, 1576 + 960, 964, 968, 972, 976, 980, 984, 988, 992, 996, 1000, 1004, 1008, 1012, 1016, 1020, 1577 + }; 1578 + 1579 + static void meson_encl_set_gamma_table(struct meson_drm *priv, u16 *data, 1580 + u32 rgb_mask) 1581 + { 1582 + int i, ret; 1583 + u32 reg; 1584 + 1585 + writel_bits_relaxed(L_GAMMA_CNTL_PORT_EN, 0, 1586 + priv->io_base + _REG(L_GAMMA_CNTL_PORT)); 1587 + 1588 + ret = readl_relaxed_poll_timeout(priv->io_base + _REG(L_GAMMA_CNTL_PORT), 1589 + reg, reg & L_GAMMA_CNTL_PORT_ADR_RDY, 10, 10000); 1590 + if (ret) 1591 + pr_warn("%s: GAMMA ADR_RDY timeout\n", __func__); 1592 + 1593 + writel_relaxed(L_GAMMA_ADDR_PORT_AUTO_INC | rgb_mask | 1594 + FIELD_PREP(L_GAMMA_ADDR_PORT_ADDR, 0), 1595 + priv->io_base + _REG(L_GAMMA_ADDR_PORT)); 1596 + 1597 + for (i = 0; i < 256; i++) { 1598 + ret = readl_relaxed_poll_timeout(priv->io_base + _REG(L_GAMMA_CNTL_PORT), 1599 + reg, reg & L_GAMMA_CNTL_PORT_WR_RDY, 1600 + 10, 10000); 1601 + if (ret) 1602 + pr_warn_once("%s: GAMMA WR_RDY timeout\n", __func__); 1603 + 1604 + writel_relaxed(data[i], priv->io_base + _REG(L_GAMMA_DATA_PORT)); 1605 + } 1606 + 1607 + ret = readl_relaxed_poll_timeout(priv->io_base + _REG(L_GAMMA_CNTL_PORT), 1608 + reg, reg & L_GAMMA_CNTL_PORT_ADR_RDY, 10, 10000); 1609 + if (ret) 1610 + pr_warn("%s: GAMMA ADR_RDY timeout\n", __func__); 1611 + 1612 + writel_relaxed(L_GAMMA_ADDR_PORT_AUTO_INC | rgb_mask | 1613 + FIELD_PREP(L_GAMMA_ADDR_PORT_ADDR, 0x23), 1614 + priv->io_base + _REG(L_GAMMA_ADDR_PORT)); 1615 + } 1616 + 1617 + void meson_encl_load_gamma(struct meson_drm *priv) 1618 + { 1619 + meson_encl_set_gamma_table(priv, meson_encl_gamma_table, L_GAMMA_ADDR_PORT_SEL_R); 1620 + meson_encl_set_gamma_table(priv, meson_encl_gamma_table, L_GAMMA_ADDR_PORT_SEL_G); 1621 + meson_encl_set_gamma_table(priv, meson_encl_gamma_table, L_GAMMA_ADDR_PORT_SEL_B); 1622 + 1623 + writel_bits_relaxed(L_GAMMA_CNTL_PORT_EN, L_GAMMA_CNTL_PORT_EN, 1624 + priv->io_base + _REG(L_GAMMA_CNTL_PORT)); 1625 + } 1626 + 1627 + void meson_venc_mipi_dsi_mode_set(struct meson_drm *priv, 1628 + const struct drm_display_mode *mode) 1629 + { 1630 + unsigned int max_pxcnt; 1631 + unsigned int max_lncnt; 1632 + unsigned int havon_begin; 1633 + unsigned int havon_end; 1634 + unsigned int vavon_bline; 1635 + unsigned int vavon_eline; 1636 + unsigned int hso_begin; 1637 + unsigned int hso_end; 1638 + unsigned int vso_begin; 1639 + unsigned int vso_end; 1640 + unsigned int vso_bline; 1641 + unsigned int vso_eline; 1642 + 1643 + max_pxcnt = mode->htotal - 1; 1644 + max_lncnt = mode->vtotal - 1; 1645 + havon_begin = mode->htotal - mode->hsync_start; 1646 + havon_end = havon_begin + mode->hdisplay - 1; 1647 + vavon_bline = mode->vtotal - mode->vsync_start; 1648 + vavon_eline = vavon_bline + mode->vdisplay - 1; 1649 + hso_begin = 0; 1650 + hso_end = mode->hsync_end - mode->hsync_start; 1651 + vso_begin = 0; 1652 + vso_end = 0; 1653 + vso_bline = 0; 1654 + vso_eline = mode->vsync_end - mode->vsync_start; 1655 + 1656 + meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCL); 1657 + 1658 + writel_relaxed(0, priv->io_base + _REG(ENCL_VIDEO_EN)); 1659 + 1660 + writel_relaxed(ENCL_PX_LN_CNT_SHADOW_EN, priv->io_base + _REG(ENCL_VIDEO_MODE)); 1661 + writel_relaxed(ENCL_VIDEO_MODE_ADV_VFIFO_EN | 1662 + ENCL_VIDEO_MODE_ADV_GAIN_HDTV | 1663 + ENCL_SEL_GAMMA_RGB_IN, priv->io_base + _REG(ENCL_VIDEO_MODE_ADV)); 1664 + 1665 + writel_relaxed(ENCL_VIDEO_FILT_CTRL_BYPASS_FILTER, 1666 + priv->io_base + _REG(ENCL_VIDEO_FILT_CTRL)); 1667 + writel_relaxed(max_pxcnt, priv->io_base + _REG(ENCL_VIDEO_MAX_PXCNT)); 1668 + writel_relaxed(max_lncnt, priv->io_base + _REG(ENCL_VIDEO_MAX_LNCNT)); 1669 + writel_relaxed(havon_begin, priv->io_base + _REG(ENCL_VIDEO_HAVON_BEGIN)); 1670 + writel_relaxed(havon_end, priv->io_base + _REG(ENCL_VIDEO_HAVON_END)); 1671 + writel_relaxed(vavon_bline, priv->io_base + _REG(ENCL_VIDEO_VAVON_BLINE)); 1672 + writel_relaxed(vavon_eline, priv->io_base + _REG(ENCL_VIDEO_VAVON_ELINE)); 1673 + 1674 + writel_relaxed(hso_begin, priv->io_base + _REG(ENCL_VIDEO_HSO_BEGIN)); 1675 + writel_relaxed(hso_end, priv->io_base + _REG(ENCL_VIDEO_HSO_END)); 1676 + writel_relaxed(vso_begin, priv->io_base + _REG(ENCL_VIDEO_VSO_BEGIN)); 1677 + writel_relaxed(vso_end, priv->io_base + _REG(ENCL_VIDEO_VSO_END)); 1678 + writel_relaxed(vso_bline, priv->io_base + _REG(ENCL_VIDEO_VSO_BLINE)); 1679 + writel_relaxed(vso_eline, priv->io_base + _REG(ENCL_VIDEO_VSO_ELINE)); 1680 + writel_relaxed(ENCL_VIDEO_RGBIN_RGB | ENCL_VIDEO_RGBIN_ZBLK, 1681 + priv->io_base + _REG(ENCL_VIDEO_RGBIN_CTRL)); 1682 + 1683 + /* default black pattern */ 1684 + writel_relaxed(0, priv->io_base + _REG(ENCL_TST_MDSEL)); 1685 + writel_relaxed(0, priv->io_base + _REG(ENCL_TST_Y)); 1686 + writel_relaxed(0, priv->io_base + _REG(ENCL_TST_CB)); 1687 + writel_relaxed(0, priv->io_base + _REG(ENCL_TST_CR)); 1688 + writel_relaxed(1, priv->io_base + _REG(ENCL_TST_EN)); 1689 + writel_bits_relaxed(ENCL_VIDEO_MODE_ADV_VFIFO_EN, 0, 1690 + priv->io_base + _REG(ENCL_VIDEO_MODE_ADV)); 1691 + 1692 + writel_relaxed(1, priv->io_base + _REG(ENCL_VIDEO_EN)); 1693 + 1694 + writel_relaxed(0, priv->io_base + _REG(L_RGB_BASE_ADDR)); 1695 + writel_relaxed(0x400, priv->io_base + _REG(L_RGB_COEFF_ADDR)); /* Magic value */ 1696 + 1697 + writel_relaxed(L_DITH_CNTL_DITH10_EN, priv->io_base + _REG(L_DITH_CNTL_ADDR)); 1698 + 1699 + /* DE signal for TTL */ 1700 + writel_relaxed(havon_begin, priv->io_base + _REG(L_OEH_HS_ADDR)); 1701 + writel_relaxed(havon_end + 1, priv->io_base + _REG(L_OEH_HE_ADDR)); 1702 + writel_relaxed(vavon_bline, priv->io_base + _REG(L_OEH_VS_ADDR)); 1703 + writel_relaxed(vavon_eline, priv->io_base + _REG(L_OEH_VE_ADDR)); 1704 + 1705 + /* DE signal for TTL */ 1706 + writel_relaxed(havon_begin, priv->io_base + _REG(L_OEV1_HS_ADDR)); 1707 + writel_relaxed(havon_end + 1, priv->io_base + _REG(L_OEV1_HE_ADDR)); 1708 + writel_relaxed(vavon_bline, priv->io_base + _REG(L_OEV1_VS_ADDR)); 1709 + writel_relaxed(vavon_eline, priv->io_base + _REG(L_OEV1_VE_ADDR)); 1710 + 1711 + /* Hsync signal for TTL */ 1712 + if (mode->flags & DRM_MODE_FLAG_PHSYNC) { 1713 + writel_relaxed(hso_begin, priv->io_base + _REG(L_STH1_HS_ADDR)); 1714 + writel_relaxed(hso_end, priv->io_base + _REG(L_STH1_HE_ADDR)); 1715 + } else { 1716 + writel_relaxed(hso_end, priv->io_base + _REG(L_STH1_HS_ADDR)); 1717 + writel_relaxed(hso_begin, priv->io_base + _REG(L_STH1_HE_ADDR)); 1718 + } 1719 + writel_relaxed(0, priv->io_base + _REG(L_STH1_VS_ADDR)); 1720 + writel_relaxed(max_lncnt, priv->io_base + _REG(L_STH1_VE_ADDR)); 1721 + 1722 + /* Vsync signal for TTL */ 1723 + writel_relaxed(vso_begin, priv->io_base + _REG(L_STV1_HS_ADDR)); 1724 + writel_relaxed(vso_end, priv->io_base + _REG(L_STV1_HE_ADDR)); 1725 + if (mode->flags & DRM_MODE_FLAG_PVSYNC) { 1726 + writel_relaxed(vso_bline, priv->io_base + _REG(L_STV1_VS_ADDR)); 1727 + writel_relaxed(vso_eline, priv->io_base + _REG(L_STV1_VE_ADDR)); 1728 + } else { 1729 + writel_relaxed(vso_eline, priv->io_base + _REG(L_STV1_VS_ADDR)); 1730 + writel_relaxed(vso_bline, priv->io_base + _REG(L_STV1_VE_ADDR)); 1731 + } 1732 + 1733 + /* DE signal */ 1734 + writel_relaxed(havon_begin, priv->io_base + _REG(L_DE_HS_ADDR)); 1735 + writel_relaxed(havon_end + 1, priv->io_base + _REG(L_DE_HE_ADDR)); 1736 + writel_relaxed(vavon_bline, priv->io_base + _REG(L_DE_VS_ADDR)); 1737 + writel_relaxed(vavon_eline, priv->io_base + _REG(L_DE_VE_ADDR)); 1738 + 1739 + /* Hsync signal */ 1740 + writel_relaxed(hso_begin, priv->io_base + _REG(L_HSYNC_HS_ADDR)); 1741 + writel_relaxed(hso_end, priv->io_base + _REG(L_HSYNC_HE_ADDR)); 1742 + writel_relaxed(0, priv->io_base + _REG(L_HSYNC_VS_ADDR)); 1743 + writel_relaxed(max_lncnt, priv->io_base + _REG(L_HSYNC_VE_ADDR)); 1744 + 1745 + /* Vsync signal */ 1746 + writel_relaxed(vso_begin, priv->io_base + _REG(L_VSYNC_HS_ADDR)); 1747 + writel_relaxed(vso_end, priv->io_base + _REG(L_VSYNC_HE_ADDR)); 1748 + writel_relaxed(vso_bline, priv->io_base + _REG(L_VSYNC_VS_ADDR)); 1749 + writel_relaxed(vso_eline, priv->io_base + _REG(L_VSYNC_VE_ADDR)); 1750 + 1751 + writel_relaxed(0, priv->io_base + _REG(L_INV_CNT_ADDR)); 1752 + writel_relaxed(L_TCON_MISC_SEL_STV1 | L_TCON_MISC_SEL_STV2, 1753 + priv->io_base + _REG(L_TCON_MISC_SEL_ADDR)); 1754 + 1755 + priv->venc.current_mode = MESON_VENC_MODE_MIPI_DSI; 1756 + } 1757 + EXPORT_SYMBOL_GPL(meson_venc_mipi_dsi_mode_set); 1758 + 1562 1759 void meson_venci_cvbs_mode_set(struct meson_drm *priv, 1563 1760 struct meson_cvbs_enci_mode *mode) 1564 1761 { ··· 1948 1747 1949 1748 void meson_venc_enable_vsync(struct meson_drm *priv) 1950 1749 { 1951 - writel_relaxed(VENC_INTCTRL_ENCI_LNRST_INT_EN, 1952 - priv->io_base + _REG(VENC_INTCTRL)); 1750 + switch (priv->venc.current_mode) { 1751 + case MESON_VENC_MODE_MIPI_DSI: 1752 + writel_relaxed(VENC_INTCTRL_ENCP_LNRST_INT_EN, 1753 + priv->io_base + _REG(VENC_INTCTRL)); 1754 + break; 1755 + default: 1756 + writel_relaxed(VENC_INTCTRL_ENCI_LNRST_INT_EN, 1757 + priv->io_base + _REG(VENC_INTCTRL)); 1758 + } 1953 1759 regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25)); 1954 1760 } 1955 1761
+6
drivers/gpu/drm/meson/meson_venc.h
··· 21 21 MESON_VENC_MODE_CVBS_PAL, 22 22 MESON_VENC_MODE_CVBS_NTSC, 23 23 MESON_VENC_MODE_HDMI, 24 + MESON_VENC_MODE_MIPI_DSI, 24 25 }; 25 26 26 27 struct meson_cvbs_enci_mode { ··· 48 47 unsigned int analog_sync_adj; 49 48 }; 50 49 50 + /* LCD Encoder gamma setup */ 51 + void meson_encl_load_gamma(struct meson_drm *priv); 52 + 51 53 /* HDMI Clock parameters */ 52 54 enum drm_mode_status 53 55 meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode); ··· 67 63 unsigned int ycrcb_map, 68 64 bool yuv420_mode, 69 65 const struct drm_display_mode *mode); 66 + void meson_venc_mipi_dsi_mode_set(struct meson_drm *priv, 67 + const struct drm_display_mode *mode); 70 68 unsigned int meson_venci_get_field(struct meson_drm *priv); 71 69 72 70 void meson_venc_enable_vsync(struct meson_drm *priv);
+2
drivers/gpu/drm/meson/meson_vpp.h
··· 12 12 struct drm_rect; 13 13 struct meson_drm; 14 14 15 + /* Mux VIU/VPP to ENCL */ 16 + #define MESON_VIU_VPP_MUX_ENCL 0x0 15 17 /* Mux VIU/VPP to ENCI */ 16 18 #define MESON_VIU_VPP_MUX_ENCI 0x5 17 19 /* Mux VIU/VPP to ENCP */
+1
drivers/gpu/drm/msm/Kconfig
··· 21 21 select DRM_BRIDGE 22 22 select DRM_PANEL_BRIDGE 23 23 select DRM_SCHED 24 + select FB_SYS_HELPERS if DRM_FBDEV_EMULATION 24 25 select SHMEM 25 26 select TMPFS 26 27 select QCOM_SCM
+7 -6
drivers/gpu/drm/msm/msm_drv.c
··· 1057 1057 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW), 1058 1058 }; 1059 1059 1060 - static void msm_fop_show_fdinfo(struct seq_file *m, struct file *f) 1060 + static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file) 1061 1061 { 1062 - struct drm_file *file = f->private_data; 1063 1062 struct drm_device *dev = file->minor->dev; 1064 1063 struct msm_drm_private *priv = dev->dev_private; 1065 - struct drm_printer p = drm_seq_file_printer(m); 1066 1064 1067 1065 if (!priv->gpu) 1068 1066 return; 1069 1067 1070 - msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, &p); 1068 + msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, p); 1069 + 1070 + drm_show_memory_stats(p, file); 1071 1071 } 1072 1072 1073 1073 static const struct file_operations fops = { 1074 1074 .owner = THIS_MODULE, 1075 1075 DRM_GEM_FOPS, 1076 - .show_fdinfo = msm_fop_show_fdinfo, 1076 + .show_fdinfo = drm_show_fdinfo, 1077 1077 }; 1078 1078 1079 1079 static const struct drm_driver msm_driver = { ··· 1083 1083 DRIVER_MODESET | 1084 1084 DRIVER_SYNCOBJ, 1085 1085 .open = msm_open, 1086 - .postclose = msm_postclose, 1086 + .postclose = msm_postclose, 1087 1087 .dumb_create = msm_gem_dumb_create, 1088 1088 .dumb_map_offset = msm_gem_dumb_map_offset, 1089 1089 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, ··· 1093 1093 #ifdef CONFIG_DEBUG_FS 1094 1094 .debugfs_init = msm_debugfs_init, 1095 1095 #endif 1096 + .show_fdinfo = msm_show_fdinfo, 1096 1097 .ioctls = msm_ioctls, 1097 1098 .num_ioctls = ARRAY_SIZE(msm_ioctls), 1098 1099 .fops = &fops,
+8 -9
drivers/gpu/drm/msm/msm_fbdev.c
··· 4 4 * Author: Rob Clark <robdclark@gmail.com> 5 5 */ 6 6 7 + #include <linux/fb.h> 8 + 7 9 #include <drm/drm_drv.h> 8 10 #include <drm/drm_crtc_helper.h> 9 11 #include <drm/drm_fb_helper.h> ··· 24 22 /* 25 23 * fbdev funcs, to implement legacy fbdev interface on top of drm driver 26 24 */ 25 + 26 + FB_GEN_DEFAULT_DEFERRED_SYS_OPS(msm_fbdev, 27 + drm_fb_helper_damage_range, 28 + drm_fb_helper_damage_area) 27 29 28 30 static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) 29 31 { ··· 58 52 59 53 static const struct fb_ops msm_fb_ops = { 60 54 .owner = THIS_MODULE, 55 + __FB_DEFAULT_DEFERRED_OPS_RDWR(msm_fbdev), 61 56 DRM_FB_HELPER_DEFAULT_OPS, 62 - 63 - /* Note: to properly handle manual update displays, we wrap the 64 - * basic fbdev ops which write to the framebuffer 65 - */ 66 - .fb_read = drm_fb_helper_sys_read, 67 - .fb_write = drm_fb_helper_sys_write, 68 - .fb_fillrect = drm_fb_helper_sys_fillrect, 69 - .fb_copyarea = drm_fb_helper_sys_copyarea, 70 - .fb_imageblit = drm_fb_helper_sys_imageblit, 57 + __FB_DEFAULT_DEFERRED_OPS_DRAW(msm_fbdev), 71 58 .fb_mmap = msm_fbdev_mmap, 72 59 .fb_destroy = msm_fbdev_fb_destroy, 73 60 };
+15
drivers/gpu/drm/msm/msm_gem.c
··· 1096 1096 return ret; 1097 1097 } 1098 1098 1099 + static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj) 1100 + { 1101 + struct msm_gem_object *msm_obj = to_msm_bo(obj); 1102 + enum drm_gem_object_status status = 0; 1103 + 1104 + if (msm_obj->pages) 1105 + status |= DRM_GEM_OBJECT_RESIDENT; 1106 + 1107 + if (msm_obj->madv == MSM_MADV_DONTNEED) 1108 + status |= DRM_GEM_OBJECT_PURGEABLE; 1109 + 1110 + return status; 1111 + } 1112 + 1099 1113 static const struct vm_operations_struct vm_ops = { 1100 1114 .fault = msm_gem_fault, 1101 1115 .open = drm_gem_vm_open, ··· 1124 1110 .vmap = msm_gem_prime_vmap, 1125 1111 .vunmap = msm_gem_prime_vunmap, 1126 1112 .mmap = msm_gem_object_mmap, 1113 + .status = msm_gem_status, 1127 1114 .vm_ops = &vm_ops, 1128 1115 }; 1129 1116
-2
drivers/gpu/drm/msm/msm_gpu.c
··· 151 151 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx, 152 152 struct drm_printer *p) 153 153 { 154 - drm_printf(p, "drm-driver:\t%s\n", gpu->dev->driver->name); 155 - drm_printf(p, "drm-client-id:\t%u\n", ctx->seqno); 156 154 drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns); 157 155 drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles); 158 156 drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
+61 -8
drivers/gpu/drm/mxsfb/lcdif_drv.c
··· 9 9 #include <linux/dma-mapping.h> 10 10 #include <linux/io.h> 11 11 #include <linux/module.h> 12 + #include <linux/of.h> 12 13 #include <linux/of_device.h> 14 + #include <linux/of_graph.h> 13 15 #include <linux/platform_device.h> 14 16 #include <linux/pm_runtime.h> 15 17 16 18 #include <drm/drm_atomic_helper.h> 17 19 #include <drm/drm_bridge.h> 18 20 #include <drm/drm_drv.h> 21 + #include <drm/drm_encoder.h> 19 22 #include <drm/drm_fbdev_dma.h> 20 23 #include <drm/drm_gem_dma_helper.h> 21 24 #include <drm/drm_gem_framebuffer_helper.h> ··· 41 38 .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, 42 39 }; 43 40 41 + static const struct drm_encoder_funcs lcdif_encoder_funcs = { 42 + .destroy = drm_encoder_cleanup, 43 + }; 44 + 44 45 static int lcdif_attach_bridge(struct lcdif_drm_private *lcdif) 45 46 { 46 - struct drm_device *drm = lcdif->drm; 47 + struct device *dev = lcdif->drm->dev; 48 + struct device_node *ep; 47 49 struct drm_bridge *bridge; 48 50 int ret; 49 51 50 - bridge = devm_drm_of_get_bridge(drm->dev, drm->dev->of_node, 0, 0); 51 - if (IS_ERR(bridge)) 52 - return PTR_ERR(bridge); 52 + for_each_endpoint_of_node(dev->of_node, ep) { 53 + struct device_node *remote; 54 + struct of_endpoint of_ep; 55 + struct drm_encoder *encoder; 53 56 54 - ret = drm_bridge_attach(&lcdif->encoder, bridge, NULL, 0); 55 - if (ret) 56 - return dev_err_probe(drm->dev, ret, "Failed to attach bridge\n"); 57 + remote = of_graph_get_remote_port_parent(ep); 58 + if (!of_device_is_available(remote)) { 59 + of_node_put(remote); 60 + continue; 61 + } 62 + of_node_put(remote); 57 63 58 - lcdif->bridge = bridge; 64 + ret = of_graph_parse_endpoint(ep, &of_ep); 65 + if (ret < 0) { 66 + dev_err(dev, "Failed to parse endpoint %pOF\n", ep); 67 + of_node_put(ep); 68 + return ret; 69 + } 70 + 71 + bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, of_ep.id); 72 + if (IS_ERR(bridge)) { 73 + of_node_put(ep); 74 + return dev_err_probe(dev, PTR_ERR(bridge), 75 + "Failed to get bridge for endpoint%u\n", 76 + of_ep.id); 77 + } 78 + 79 + encoder = devm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL); 80 + if (!encoder) { 81 + dev_err(dev, "Failed to allocate encoder for endpoint%u\n", 82 + of_ep.id); 83 + of_node_put(ep); 84 + return -ENOMEM; 85 + } 86 + 87 + encoder->possible_crtcs = drm_crtc_mask(&lcdif->crtc); 88 + ret = drm_encoder_init(lcdif->drm, encoder, &lcdif_encoder_funcs, 89 + DRM_MODE_ENCODER_NONE, NULL); 90 + if (ret) { 91 + dev_err(dev, "Failed to initialize encoder for endpoint%u: %d\n", 92 + of_ep.id, ret); 93 + of_node_put(ep); 94 + return ret; 95 + } 96 + 97 + ret = drm_bridge_attach(encoder, bridge, NULL, 0); 98 + if (ret) { 99 + of_node_put(ep); 100 + return dev_err_probe(dev, ret, 101 + "Failed to attach bridge for endpoint%u\n", 102 + of_ep.id); 103 + } 104 + } 59 105 60 106 return 0; 61 107 } ··· 251 199 252 200 static const struct of_device_id lcdif_dt_ids[] = { 253 201 { .compatible = "fsl,imx8mp-lcdif" }, 202 + { .compatible = "fsl,imx93-lcdif" }, 254 203 { /* sentinel */ } 255 204 }; 256 205 MODULE_DEVICE_TABLE(of, lcdif_dt_ids);
-3
drivers/gpu/drm/mxsfb/lcdif_drv.h
··· 11 11 #include <drm/drm_bridge.h> 12 12 #include <drm/drm_crtc.h> 13 13 #include <drm/drm_device.h> 14 - #include <drm/drm_encoder.h> 15 14 #include <drm/drm_plane.h> 16 15 17 16 struct clk; ··· 29 30 /* i.MXRT does support overlay planes, add them here. */ 30 31 } planes; 31 32 struct drm_crtc crtc; 32 - struct drm_encoder encoder; 33 - struct drm_bridge *bridge; 34 33 }; 35 34 36 35 static inline struct lcdif_drm_private *
+139 -62
drivers/gpu/drm/mxsfb/lcdif_kms.c
··· 17 17 #include <drm/drm_atomic_helper.h> 18 18 #include <drm/drm_bridge.h> 19 19 #include <drm/drm_color_mgmt.h> 20 + #include <drm/drm_connector.h> 20 21 #include <drm/drm_crtc.h> 21 22 #include <drm/drm_encoder.h> 22 23 #include <drm/drm_fb_dma_helper.h> ··· 30 29 31 30 #include "lcdif_drv.h" 32 31 #include "lcdif_regs.h" 32 + 33 + struct lcdif_crtc_state { 34 + struct drm_crtc_state base; /* always be the first member */ 35 + u32 bus_format; 36 + u32 bus_flags; 37 + }; 38 + 39 + static inline struct lcdif_crtc_state * 40 + to_lcdif_crtc_state(struct drm_crtc_state *s) 41 + { 42 + return container_of(s, struct lcdif_crtc_state, base); 43 + } 33 44 34 45 /* ----------------------------------------------------------------------------- 35 46 * CRTC ··· 398 385 readl(lcdif->base + LCDC_V8_CTRL); 399 386 } 400 387 401 - static void lcdif_crtc_mode_set_nofb(struct lcdif_drm_private *lcdif, 402 - struct drm_plane_state *plane_state, 403 - struct drm_bridge_state *bridge_state, 404 - const u32 bus_format) 388 + static void lcdif_crtc_mode_set_nofb(struct drm_crtc_state *crtc_state, 389 + struct drm_plane_state *plane_state) 405 390 { 406 - struct drm_device *drm = lcdif->crtc.dev; 407 - struct drm_display_mode *m = &lcdif->crtc.state->adjusted_mode; 408 - u32 bus_flags = 0; 409 - 410 - if (lcdif->bridge && lcdif->bridge->timings) 411 - bus_flags = lcdif->bridge->timings->input_bus_flags; 412 - else if (bridge_state) 413 - bus_flags = bridge_state->input_bus_cfg.flags; 391 + struct lcdif_crtc_state *lcdif_crtc_state = to_lcdif_crtc_state(crtc_state); 392 + struct drm_device *drm = crtc_state->crtc->dev; 393 + struct lcdif_drm_private *lcdif = to_lcdif_drm_private(drm); 394 + struct drm_display_mode *m = &crtc_state->adjusted_mode; 414 395 415 396 DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n", 416 397 m->crtc_clock, 417 398 (int)(clk_get_rate(lcdif->clk) / 1000)); 418 399 DRM_DEV_DEBUG_DRIVER(drm->dev, "Bridge bus_flags: 0x%08X\n", 419 - bus_flags); 400 + lcdif_crtc_state->bus_flags); 420 401 DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags); 421 402 422 403 /* Mandatory eLCDIF reset as per the Reference Manual */ 423 404 lcdif_reset_block(lcdif); 424 405 425 - lcdif_set_formats(lcdif, plane_state, bus_format); 406 + lcdif_set_formats(lcdif, plane_state, lcdif_crtc_state->bus_format); 426 407 427 - lcdif_set_mode(lcdif, bus_flags); 408 + lcdif_set_mode(lcdif, lcdif_crtc_state->bus_flags); 428 409 } 429 410 430 411 static int lcdif_crtc_atomic_check(struct drm_crtc *crtc, 431 412 struct drm_atomic_state *state) 432 413 { 414 + struct drm_device *drm = crtc->dev; 433 415 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 434 416 crtc); 417 + struct lcdif_crtc_state *lcdif_crtc_state = to_lcdif_crtc_state(crtc_state); 435 418 bool has_primary = crtc_state->plane_mask & 436 419 drm_plane_mask(crtc->primary); 420 + struct drm_connector_state *connector_state; 421 + struct drm_connector *connector; 422 + struct drm_encoder *encoder; 423 + struct drm_bridge_state *bridge_state; 424 + struct drm_bridge *bridge; 425 + u32 bus_format, bus_flags; 426 + bool format_set = false, flags_set = false; 427 + int ret, i; 437 428 438 429 /* The primary plane has to be enabled when the CRTC is active. */ 439 430 if (crtc_state->active && !has_primary) 440 431 return -EINVAL; 441 432 442 - return drm_atomic_add_affected_planes(state, crtc); 433 + ret = drm_atomic_add_affected_planes(state, crtc); 434 + if (ret) 435 + return ret; 436 + 437 + /* Try to find consistent bus format and flags across first bridges. */ 438 + for_each_new_connector_in_state(state, connector, connector_state, i) { 439 + if (!connector_state->crtc) 440 + continue; 441 + 442 + encoder = connector_state->best_encoder; 443 + 444 + bridge = drm_bridge_chain_get_first_bridge(encoder); 445 + if (!bridge) 446 + continue; 447 + 448 + bridge_state = drm_atomic_get_new_bridge_state(state, bridge); 449 + if (!bridge_state) 450 + bus_format = MEDIA_BUS_FMT_FIXED; 451 + else 452 + bus_format = bridge_state->input_bus_cfg.format; 453 + 454 + if (bus_format == MEDIA_BUS_FMT_FIXED) { 455 + dev_warn(drm->dev, 456 + "[ENCODER:%d:%s]'s bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n" 457 + "Please fix bridge driver by handling atomic_get_input_bus_fmts.\n", 458 + encoder->base.id, encoder->name); 459 + bus_format = MEDIA_BUS_FMT_RGB888_1X24; 460 + } else if (!bus_format) { 461 + /* If all else fails, default to RGB888_1X24 */ 462 + bus_format = MEDIA_BUS_FMT_RGB888_1X24; 463 + } 464 + 465 + if (!format_set) { 466 + lcdif_crtc_state->bus_format = bus_format; 467 + format_set = true; 468 + } else if (lcdif_crtc_state->bus_format != bus_format) { 469 + DRM_DEV_DEBUG_DRIVER(drm->dev, "inconsistent bus format\n"); 470 + return -EINVAL; 471 + } 472 + 473 + if (bridge->timings) 474 + bus_flags = bridge->timings->input_bus_flags; 475 + else if (bridge_state) 476 + bus_flags = bridge_state->input_bus_cfg.flags; 477 + else 478 + bus_flags = 0; 479 + 480 + if (!flags_set) { 481 + lcdif_crtc_state->bus_flags = bus_flags; 482 + flags_set = true; 483 + } else if (lcdif_crtc_state->bus_flags != bus_flags) { 484 + DRM_DEV_DEBUG_DRIVER(drm->dev, "inconsistent bus flags\n"); 485 + return -EINVAL; 486 + } 487 + } 488 + 489 + return 0; 443 490 } 444 491 445 492 static void lcdif_crtc_atomic_flush(struct drm_crtc *crtc, ··· 531 458 struct drm_atomic_state *state) 532 459 { 533 460 struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev); 461 + struct drm_crtc_state *new_cstate = drm_atomic_get_new_crtc_state(state, crtc); 534 462 struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state, 535 463 crtc->primary); 536 464 struct drm_display_mode *m = &lcdif->crtc.state->adjusted_mode; 537 - struct drm_bridge_state *bridge_state = NULL; 538 465 struct drm_device *drm = lcdif->drm; 539 - u32 bus_format = 0; 540 466 dma_addr_t paddr; 541 - 542 - /* If there is a bridge attached to the LCDIF, use its bus format */ 543 - if (lcdif->bridge) { 544 - bridge_state = 545 - drm_atomic_get_new_bridge_state(state, 546 - lcdif->bridge); 547 - if (!bridge_state) 548 - bus_format = MEDIA_BUS_FMT_FIXED; 549 - else 550 - bus_format = bridge_state->input_bus_cfg.format; 551 - 552 - if (bus_format == MEDIA_BUS_FMT_FIXED) { 553 - dev_warn_once(drm->dev, 554 - "Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n" 555 - "Please fix bridge driver by handling atomic_get_input_bus_fmts.\n"); 556 - bus_format = MEDIA_BUS_FMT_RGB888_1X24; 557 - } 558 - } 559 - 560 - /* If all else fails, default to RGB888_1X24 */ 561 - if (!bus_format) 562 - bus_format = MEDIA_BUS_FMT_RGB888_1X24; 563 467 564 468 clk_set_rate(lcdif->clk, m->crtc_clock * 1000); 565 469 566 470 pm_runtime_get_sync(drm->dev); 567 471 568 - lcdif_crtc_mode_set_nofb(lcdif, new_pstate, bridge_state, bus_format); 472 + lcdif_crtc_mode_set_nofb(new_cstate, new_pstate); 569 473 570 474 /* Write cur_buf as well to avoid an initial corrupt frame */ 571 475 paddr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0); ··· 579 529 pm_runtime_put_sync(drm->dev); 580 530 } 581 531 532 + static void lcdif_crtc_atomic_destroy_state(struct drm_crtc *crtc, 533 + struct drm_crtc_state *state) 534 + { 535 + __drm_atomic_helper_crtc_destroy_state(state); 536 + kfree(to_lcdif_crtc_state(state)); 537 + } 538 + 539 + static void lcdif_crtc_reset(struct drm_crtc *crtc) 540 + { 541 + struct lcdif_crtc_state *state; 542 + 543 + if (crtc->state) 544 + lcdif_crtc_atomic_destroy_state(crtc, crtc->state); 545 + 546 + crtc->state = NULL; 547 + 548 + state = kzalloc(sizeof(*state), GFP_KERNEL); 549 + if (state) 550 + __drm_atomic_helper_crtc_reset(crtc, &state->base); 551 + } 552 + 553 + static struct drm_crtc_state * 554 + lcdif_crtc_atomic_duplicate_state(struct drm_crtc *crtc) 555 + { 556 + struct lcdif_crtc_state *old = to_lcdif_crtc_state(crtc->state); 557 + struct lcdif_crtc_state *new; 558 + 559 + if (WARN_ON(!crtc->state)) 560 + return NULL; 561 + 562 + new = kzalloc(sizeof(*new), GFP_KERNEL); 563 + if (!new) 564 + return NULL; 565 + 566 + __drm_atomic_helper_crtc_duplicate_state(crtc, &new->base); 567 + 568 + new->bus_format = old->bus_format; 569 + new->bus_flags = old->bus_flags; 570 + 571 + return &new->base; 572 + } 573 + 582 574 static int lcdif_crtc_enable_vblank(struct drm_crtc *crtc) 583 575 { 584 576 struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev); ··· 649 557 }; 650 558 651 559 static const struct drm_crtc_funcs lcdif_crtc_funcs = { 652 - .reset = drm_atomic_helper_crtc_reset, 560 + .reset = lcdif_crtc_reset, 653 561 .destroy = drm_crtc_cleanup, 654 562 .set_config = drm_atomic_helper_set_config, 655 563 .page_flip = drm_atomic_helper_page_flip, 656 - .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 657 - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 564 + .atomic_duplicate_state = lcdif_crtc_atomic_duplicate_state, 565 + .atomic_destroy_state = lcdif_crtc_atomic_destroy_state, 658 566 .enable_vblank = lcdif_crtc_enable_vblank, 659 567 .disable_vblank = lcdif_crtc_disable_vblank, 660 - }; 661 - 662 - /* ----------------------------------------------------------------------------- 663 - * Encoder 664 - */ 665 - 666 - static const struct drm_encoder_funcs lcdif_encoder_funcs = { 667 - .destroy = drm_encoder_cleanup, 668 568 }; 669 569 670 570 /* ----------------------------------------------------------------------------- ··· 751 667 BIT(DRM_COLOR_YCBCR_BT2020); 752 668 const u32 supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | 753 669 BIT(DRM_COLOR_YCBCR_FULL_RANGE); 754 - struct drm_encoder *encoder = &lcdif->encoder; 755 670 struct drm_crtc *crtc = &lcdif->crtc; 756 671 int ret; 757 672 ··· 774 691 return ret; 775 692 776 693 drm_crtc_helper_add(crtc, &lcdif_crtc_helper_funcs); 777 - ret = drm_crtc_init_with_planes(lcdif->drm, crtc, 778 - &lcdif->planes.primary, NULL, 779 - &lcdif_crtc_funcs, NULL); 780 - if (ret) 781 - return ret; 782 - 783 - encoder->possible_crtcs = drm_crtc_mask(crtc); 784 - return drm_encoder_init(lcdif->drm, encoder, &lcdif_encoder_funcs, 785 - DRM_MODE_ENCODER_NONE, NULL); 694 + return drm_crtc_init_with_planes(lcdif->drm, crtc, 695 + &lcdif->planes.primary, NULL, 696 + &lcdif_crtc_funcs, NULL); 786 697 }
+1
drivers/gpu/drm/omapdrm/Kconfig
··· 4 4 depends on DRM && OF 5 5 depends on ARCH_OMAP2PLUS 6 6 select DRM_KMS_HELPER 7 + select FB_SYS_HELPERS if DRM_FBDEV_EMULATION 7 8 select VIDEOMODE_HELPERS 8 9 select HDMI 9 10 default n
+3 -8
drivers/gpu/drm/omapdrm/omap_fbdev.c
··· 4 4 * Author: Rob Clark <rob@ti.com> 5 5 */ 6 6 7 + #include <linux/fb.h> 8 + 7 9 #include <drm/drm_drv.h> 8 10 #include <drm/drm_crtc_helper.h> 9 11 #include <drm/drm_fb_helper.h> ··· 97 95 98 96 static const struct fb_ops omap_fb_ops = { 99 97 .owner = THIS_MODULE, 100 - 98 + FB_DEFAULT_SYS_OPS, 101 99 .fb_check_var = drm_fb_helper_check_var, 102 100 .fb_set_par = drm_fb_helper_set_par, 103 101 .fb_setcmap = drm_fb_helper_setcmap, 104 102 .fb_blank = drm_fb_helper_blank, 105 103 .fb_pan_display = omap_fbdev_pan_display, 106 104 .fb_ioctl = drm_fb_helper_ioctl, 107 - 108 - .fb_read = drm_fb_helper_sys_read, 109 - .fb_write = drm_fb_helper_sys_write, 110 - .fb_fillrect = drm_fb_helper_sys_fillrect, 111 - .fb_copyarea = drm_fb_helper_sys_copyarea, 112 - .fb_imageblit = drm_fb_helper_sys_imageblit, 113 - 114 105 .fb_destroy = omap_fbdev_fb_destroy, 115 106 }; 116 107
+472
drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
··· 1301 1301 {}, 1302 1302 }; 1303 1303 1304 + static const struct panel_init_cmd starry_himax83102_j02_init_cmd[] = { 1305 + _INIT_DCS_CMD(0xB9, 0x83, 0x10, 0x21, 0x55, 0x00), 1306 + _INIT_DCS_CMD(0xB1, 0x2C, 0xB5, 0xB5, 0x31, 0xF1, 0x31, 0xD7, 0x2F, 0x36, 0x36, 0x36, 0x36, 0x1A, 0x8B, 0x11, 1307 + 0x65, 0x00, 0x88, 0xFA, 0xFF, 0xFF, 0x8F, 0xFF, 0x08, 0x74, 0x33), 1308 + _INIT_DCS_CMD(0xB2, 0x00, 0x47, 0xB0, 0x80, 0x00, 0x12, 0x72, 0x3C, 0xA3, 0x03, 0x03, 0x00, 0x00, 0x88, 0xF5), 1309 + _INIT_DCS_CMD(0xB4, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x63, 0x5C, 0x63, 0x5C, 0x01, 0x9E), 1310 + _INIT_DCS_CMD(0xE9, 0xCD), 1311 + _INIT_DCS_CMD(0xBA, 0x84), 1312 + _INIT_DCS_CMD(0xE9, 0x3F), 1313 + _INIT_DCS_CMD(0xBC, 0x1B, 0x04), 1314 + _INIT_DCS_CMD(0xBE, 0x20), 1315 + _INIT_DCS_CMD(0xBF, 0xFC, 0xC4), 1316 + _INIT_DCS_CMD(0xC0, 0x36, 0x36, 0x22, 0x11, 0x22, 0xA0, 0x61, 0x08, 0xF5, 0x03), 1317 + _INIT_DCS_CMD(0xE9, 0xCC), 1318 + _INIT_DCS_CMD(0xC7, 0x80), 1319 + _INIT_DCS_CMD(0xE9, 0x3F), 1320 + _INIT_DCS_CMD(0xE9, 0xC6), 1321 + _INIT_DCS_CMD(0xC8, 0x97), 1322 + _INIT_DCS_CMD(0xE9, 0x3F), 1323 + _INIT_DCS_CMD(0xC9, 0x00, 0x1E, 0x13, 0x88, 0x01), 1324 + _INIT_DCS_CMD(0xCB, 0x08, 0x13, 0x07, 0x00, 0x0F, 0x33), 1325 + _INIT_DCS_CMD(0xCC, 0x02), 1326 + _INIT_DCS_CMD(0xE9, 0xC4), 1327 + _INIT_DCS_CMD(0xD0, 0x03), 1328 + _INIT_DCS_CMD(0xE9, 0x3F), 1329 + _INIT_DCS_CMD(0xD1, 0x37, 0x06, 0x00, 0x02, 0x04, 0x0C, 0xFF), 1330 + _INIT_DCS_CMD(0xD2, 0x1F, 0x11, 0x1F), 1331 + _INIT_DCS_CMD(0xD3, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x37, 0x47, 0x34, 0x3B, 0x12, 0x12, 0x03, 1332 + 0x03, 0x32, 0x10, 0x10, 0x00, 0x10, 0x32, 0x10, 0x08, 0x00, 0x08, 0x32, 0x17, 0x94, 0x07, 0x94, 0x00, 0x00), 1333 + _INIT_DCS_CMD(0xD5, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, 0x40, 0x40, 0x1A, 0x1A, 1334 + 0x1B, 0x1B, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x28, 0x29, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18), 1335 + _INIT_DCS_CMD(0xD6, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x40, 0x40, 0x19, 0x19, 0x1A, 0x1A, 1336 + 0x1B, 0x1B, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x29, 0x28, 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18), 1337 + _INIT_DCS_CMD(0xD8, 0xAA, 0xBA, 0xEA, 0xAA, 0xAA, 0xA0, 0xAA, 0xBA, 0xEA, 0xAA, 0xAA, 0xA0, 0xAA, 0xBA, 0xEA, 0xAA, 1338 + 0xAA, 0xA0, 0xAA, 0xBA, 0xEA, 0xAA, 0xAA, 0xA0, 0xAA, 0xBA, 0xEA, 0xAA, 0xAA, 0xA0, 0xAA, 0xBA, 0xEA, 0xAA, 0xAA, 0xA0), 1339 + _INIT_DCS_CMD(0xE0, 0x00, 0x09, 0x14, 0x1E, 0x26, 0x48, 0x61, 0x67, 0x6C, 0x67, 0x7D, 0x7F, 0x80, 0x8B, 0x87, 0x8F, 0x98, 0xAB, 1340 + 0xAB, 0x55, 0x5C, 0x68, 0x73, 0x00, 0x09, 0x14, 0x1E, 0x26, 0x48, 0x61, 0x67, 0x6C, 0x67, 0x7D, 0x7F, 0x80, 0x8B, 0x87, 0x8F, 0x98, 0xAB, 0xAB, 0x55, 0x5C, 0x68, 0x73), 1341 + _INIT_DCS_CMD(0xE7, 0x0E, 0x10, 0x10, 0x21, 0x2B, 0x9A, 0x02, 0x54, 0x9A, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x12, 0x05, 0x02, 0x02, 0x10), 1342 + _INIT_DCS_CMD(0xBD, 0x01), 1343 + _INIT_DCS_CMD(0xB1, 0x01, 0xBF, 0x11), 1344 + _INIT_DCS_CMD(0xCB, 0x86), 1345 + _INIT_DCS_CMD(0xD2, 0x3C, 0xFA), 1346 + _INIT_DCS_CMD(0xE9, 0xC5), 1347 + _INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01), 1348 + _INIT_DCS_CMD(0xE9, 0x3F), 1349 + _INIT_DCS_CMD(0xE7, 0x02, 0x00, 0x28, 0x01, 0x7E, 0x0F, 0x7E, 0x10, 0xA0, 0x00, 0x00, 0x20, 0x40, 0x50, 0x40), 1350 + _INIT_DCS_CMD(0xBD, 0x02), 1351 + _INIT_DCS_CMD(0xD8, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0), 1352 + _INIT_DCS_CMD(0xE7, 0xFE, 0x04, 0xFE, 0x04, 0xFE, 0x04, 0x03, 0x03, 0x03, 0x26, 0x00, 0x26, 0x81, 0x02, 0x40, 0x00, 0x20, 0x9E, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00), 1353 + _INIT_DCS_CMD(0xBD, 0x03), 1354 + _INIT_DCS_CMD(0xE9, 0xC6), 1355 + _INIT_DCS_CMD(0xB4, 0x03, 0xFF, 0xF8), 1356 + _INIT_DCS_CMD(0xE9, 0x3F), 1357 + _INIT_DCS_CMD(0xD8, 0x00, 0x2A, 0xAA, 0xA8, 0x00, 0x00, 0x00, 0x2A, 0xAA, 0xA8, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x2A, 0xAA, 0xA8, 1358 + 0x00, 0x00, 0x00, 0x2A, 0xAA, 0xA8, 0x00, 0x00), 1359 + _INIT_DCS_CMD(0xBD, 0x00), 1360 + _INIT_DCS_CMD(0xE9, 0xC4), 1361 + _INIT_DCS_CMD(0xBA, 0x96), 1362 + _INIT_DCS_CMD(0xE9, 0x3F), 1363 + _INIT_DCS_CMD(0xBD, 0x01), 1364 + _INIT_DCS_CMD(0xE9, 0xC5), 1365 + _INIT_DCS_CMD(0xBA, 0x4F), 1366 + _INIT_DCS_CMD(0xE9, 0x3F), 1367 + _INIT_DCS_CMD(0xBD, 0x00), 1368 + _INIT_DCS_CMD(0x11), 1369 + _INIT_DELAY_CMD(120), 1370 + _INIT_DCS_CMD(0x29), 1371 + {}, 1372 + }; 1373 + 1374 + static const struct panel_init_cmd starry_ili9882t_init_cmd[] = { 1375 + _INIT_DELAY_CMD(5), 1376 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x01), 1377 + _INIT_DCS_CMD(0x00, 0x42), 1378 + _INIT_DCS_CMD(0x01, 0x11), 1379 + _INIT_DCS_CMD(0x02, 0x00), 1380 + _INIT_DCS_CMD(0x03, 0x00), 1381 + 1382 + _INIT_DCS_CMD(0x04, 0x01), 1383 + _INIT_DCS_CMD(0x05, 0x11), 1384 + _INIT_DCS_CMD(0x06, 0x00), 1385 + _INIT_DCS_CMD(0x07, 0x00), 1386 + 1387 + _INIT_DCS_CMD(0x08, 0x80), 1388 + _INIT_DCS_CMD(0x09, 0x81), 1389 + _INIT_DCS_CMD(0x0A, 0x71), 1390 + _INIT_DCS_CMD(0x0B, 0x00), 1391 + 1392 + _INIT_DCS_CMD(0x0C, 0x00), 1393 + _INIT_DCS_CMD(0x0E, 0x1A), 1394 + 1395 + _INIT_DCS_CMD(0x24, 0x00), 1396 + _INIT_DCS_CMD(0x25, 0x00), 1397 + _INIT_DCS_CMD(0x26, 0x00), 1398 + _INIT_DCS_CMD(0x27, 0x00), 1399 + 1400 + _INIT_DCS_CMD(0x2C, 0xD4), 1401 + _INIT_DCS_CMD(0xB9, 0x40), 1402 + 1403 + _INIT_DCS_CMD(0xB0, 0x11), 1404 + 1405 + _INIT_DCS_CMD(0xE6, 0x32), 1406 + _INIT_DCS_CMD(0xD1, 0x30), 1407 + 1408 + _INIT_DCS_CMD(0xD6, 0x55), 1409 + 1410 + _INIT_DCS_CMD(0xD0, 0x01), 1411 + _INIT_DCS_CMD(0xE3, 0x93), 1412 + _INIT_DCS_CMD(0xE4, 0x00), 1413 + _INIT_DCS_CMD(0xE5, 0x80), 1414 + 1415 + _INIT_DCS_CMD(0x31, 0x07), 1416 + _INIT_DCS_CMD(0x32, 0x07), 1417 + _INIT_DCS_CMD(0x33, 0x07), 1418 + _INIT_DCS_CMD(0x34, 0x07), 1419 + _INIT_DCS_CMD(0x35, 0x07), 1420 + _INIT_DCS_CMD(0x36, 0x01), 1421 + _INIT_DCS_CMD(0x37, 0x00), 1422 + _INIT_DCS_CMD(0x38, 0x28), 1423 + _INIT_DCS_CMD(0x39, 0x29), 1424 + _INIT_DCS_CMD(0x3A, 0x11), 1425 + _INIT_DCS_CMD(0x3B, 0x13), 1426 + _INIT_DCS_CMD(0x3C, 0x15), 1427 + _INIT_DCS_CMD(0x3D, 0x17), 1428 + _INIT_DCS_CMD(0x3E, 0x09), 1429 + _INIT_DCS_CMD(0x3F, 0x0D), 1430 + _INIT_DCS_CMD(0x40, 0x02), 1431 + _INIT_DCS_CMD(0x41, 0x02), 1432 + _INIT_DCS_CMD(0x42, 0x02), 1433 + _INIT_DCS_CMD(0x43, 0x02), 1434 + _INIT_DCS_CMD(0x44, 0x02), 1435 + _INIT_DCS_CMD(0x45, 0x02), 1436 + _INIT_DCS_CMD(0x46, 0x02), 1437 + 1438 + _INIT_DCS_CMD(0x47, 0x07), 1439 + _INIT_DCS_CMD(0x48, 0x07), 1440 + _INIT_DCS_CMD(0x49, 0x07), 1441 + _INIT_DCS_CMD(0x4A, 0x07), 1442 + _INIT_DCS_CMD(0x4B, 0x07), 1443 + _INIT_DCS_CMD(0x4C, 0x01), 1444 + _INIT_DCS_CMD(0x4D, 0x00), 1445 + _INIT_DCS_CMD(0x4E, 0x28), 1446 + _INIT_DCS_CMD(0x4F, 0x29), 1447 + _INIT_DCS_CMD(0x50, 0x10), 1448 + _INIT_DCS_CMD(0x51, 0x12), 1449 + _INIT_DCS_CMD(0x52, 0x14), 1450 + _INIT_DCS_CMD(0x53, 0x16), 1451 + _INIT_DCS_CMD(0x54, 0x08), 1452 + _INIT_DCS_CMD(0x55, 0x0C), 1453 + _INIT_DCS_CMD(0x56, 0x02), 1454 + _INIT_DCS_CMD(0x57, 0x02), 1455 + _INIT_DCS_CMD(0x58, 0x02), 1456 + _INIT_DCS_CMD(0x59, 0x02), 1457 + _INIT_DCS_CMD(0x5A, 0x02), 1458 + _INIT_DCS_CMD(0x5B, 0x02), 1459 + _INIT_DCS_CMD(0x5C, 0x02), 1460 + 1461 + _INIT_DCS_CMD(0x61, 0x07), 1462 + _INIT_DCS_CMD(0x62, 0x07), 1463 + _INIT_DCS_CMD(0x63, 0x07), 1464 + _INIT_DCS_CMD(0x64, 0x07), 1465 + _INIT_DCS_CMD(0x65, 0x07), 1466 + _INIT_DCS_CMD(0x66, 0x01), 1467 + _INIT_DCS_CMD(0x67, 0x00), 1468 + _INIT_DCS_CMD(0x68, 0x28), 1469 + _INIT_DCS_CMD(0x69, 0x29), 1470 + _INIT_DCS_CMD(0x6A, 0x16), 1471 + _INIT_DCS_CMD(0x6B, 0x14), 1472 + _INIT_DCS_CMD(0x6C, 0x12), 1473 + _INIT_DCS_CMD(0x6D, 0x10), 1474 + _INIT_DCS_CMD(0x6E, 0x0C), 1475 + _INIT_DCS_CMD(0x6F, 0x08), 1476 + _INIT_DCS_CMD(0x70, 0x02), 1477 + _INIT_DCS_CMD(0x71, 0x02), 1478 + _INIT_DCS_CMD(0x72, 0x02), 1479 + _INIT_DCS_CMD(0x73, 0x02), 1480 + _INIT_DCS_CMD(0x74, 0x02), 1481 + _INIT_DCS_CMD(0x75, 0x02), 1482 + _INIT_DCS_CMD(0x76, 0x02), 1483 + 1484 + _INIT_DCS_CMD(0x77, 0x07), 1485 + _INIT_DCS_CMD(0x78, 0x07), 1486 + _INIT_DCS_CMD(0x79, 0x07), 1487 + _INIT_DCS_CMD(0x7A, 0x07), 1488 + _INIT_DCS_CMD(0x7B, 0x07), 1489 + _INIT_DCS_CMD(0x7C, 0x01), 1490 + _INIT_DCS_CMD(0x7D, 0x00), 1491 + _INIT_DCS_CMD(0x7E, 0x28), 1492 + _INIT_DCS_CMD(0x7F, 0x29), 1493 + _INIT_DCS_CMD(0x80, 0x17), 1494 + _INIT_DCS_CMD(0x81, 0x15), 1495 + _INIT_DCS_CMD(0x82, 0x13), 1496 + _INIT_DCS_CMD(0x83, 0x11), 1497 + _INIT_DCS_CMD(0x84, 0x0D), 1498 + _INIT_DCS_CMD(0x85, 0x09), 1499 + _INIT_DCS_CMD(0x86, 0x02), 1500 + _INIT_DCS_CMD(0x87, 0x07), 1501 + _INIT_DCS_CMD(0x88, 0x07), 1502 + _INIT_DCS_CMD(0x89, 0x07), 1503 + _INIT_DCS_CMD(0x8A, 0x07), 1504 + _INIT_DCS_CMD(0x8B, 0x07), 1505 + _INIT_DCS_CMD(0x8C, 0x07), 1506 + 1507 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x02), 1508 + _INIT_DCS_CMD(0x29, 0x3A), 1509 + _INIT_DCS_CMD(0x2A, 0x3B), 1510 + 1511 + _INIT_DCS_CMD(0x06, 0x01), 1512 + _INIT_DCS_CMD(0x07, 0x01), 1513 + _INIT_DCS_CMD(0x08, 0x0C), 1514 + _INIT_DCS_CMD(0x09, 0x44), 1515 + 1516 + _INIT_DCS_CMD(0x3C, 0x0A), 1517 + _INIT_DCS_CMD(0x39, 0x11), 1518 + _INIT_DCS_CMD(0x3D, 0x00), 1519 + _INIT_DCS_CMD(0x3A, 0x0C), 1520 + _INIT_DCS_CMD(0x3B, 0x44), 1521 + 1522 + _INIT_DCS_CMD(0x53, 0x1F), 1523 + _INIT_DCS_CMD(0x5E, 0x40), 1524 + _INIT_DCS_CMD(0x84, 0x00), 1525 + 1526 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x03), 1527 + _INIT_DCS_CMD(0x20, 0x01), 1528 + _INIT_DCS_CMD(0x21, 0x3C), 1529 + _INIT_DCS_CMD(0x22, 0xFA), 1530 + 1531 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x0A), 1532 + _INIT_DCS_CMD(0xE0, 0x01), 1533 + _INIT_DCS_CMD(0xE2, 0x01), 1534 + _INIT_DCS_CMD(0xE5, 0x91), 1535 + _INIT_DCS_CMD(0xE6, 0x3C), 1536 + _INIT_DCS_CMD(0xE7, 0x00), 1537 + _INIT_DCS_CMD(0xE8, 0xFA), 1538 + 1539 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x12), 1540 + _INIT_DCS_CMD(0x87, 0x2C), 1541 + 1542 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x05), 1543 + _INIT_DCS_CMD(0x73, 0xE5), 1544 + _INIT_DCS_CMD(0x7F, 0x6B), 1545 + _INIT_DCS_CMD(0x6D, 0xA4), 1546 + _INIT_DCS_CMD(0x79, 0x54), 1547 + _INIT_DCS_CMD(0x69, 0x97), 1548 + _INIT_DCS_CMD(0x6A, 0x97), 1549 + _INIT_DCS_CMD(0xA5, 0x3F), 1550 + _INIT_DCS_CMD(0x61, 0xDA), 1551 + _INIT_DCS_CMD(0xA7, 0xF1), 1552 + _INIT_DCS_CMD(0x5F, 0x01), 1553 + _INIT_DCS_CMD(0x62, 0x3F), 1554 + _INIT_DCS_CMD(0x1D, 0x90), 1555 + _INIT_DCS_CMD(0x86, 0x87), 1556 + 1557 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x06), 1558 + _INIT_DCS_CMD(0xC0, 0x80), 1559 + _INIT_DCS_CMD(0xC1, 0x07), 1560 + _INIT_DCS_CMD(0xCA, 0x58), 1561 + _INIT_DCS_CMD(0xCB, 0x02), 1562 + _INIT_DCS_CMD(0xCE, 0x58), 1563 + _INIT_DCS_CMD(0xCF, 0x02), 1564 + _INIT_DCS_CMD(0x67, 0x60), 1565 + _INIT_DCS_CMD(0x10, 0x00), 1566 + _INIT_DCS_CMD(0x92, 0x22), 1567 + _INIT_DCS_CMD(0xD3, 0x08), 1568 + _INIT_DCS_CMD(0xD6, 0x55), 1569 + _INIT_DCS_CMD(0xDC, 0x38), 1570 + 1571 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x08), 1572 + _INIT_DCS_CMD(0xE0, 0x00, 0x10, 0x2A, 0x4D, 0x61, 0x56, 0x6A, 0x6E, 0x79, 0x76, 0x8F, 0x95, 0x98, 0xAE, 0xAA, 0xB2, 0xBB, 0xCE, 0xC6, 0xBD, 0xD5, 0xE2, 0xE8), 1573 + _INIT_DCS_CMD(0xE1, 0x00, 0x10, 0x2A, 0x4D, 0x61, 0x56, 0x6A, 0x6E, 0x79, 0x76, 0x8F, 0x95, 0x98, 0xAE, 0xAA, 0xB2, 0xBB, 0xCE, 0xC6, 0xBD, 0xD5, 0xE2, 0xE8), 1574 + 1575 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x04), 1576 + _INIT_DCS_CMD(0xBA, 0x81), 1577 + 1578 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x0C), 1579 + _INIT_DCS_CMD(0x00, 0x02), 1580 + _INIT_DCS_CMD(0x01, 0x00), 1581 + _INIT_DCS_CMD(0x02, 0x03), 1582 + _INIT_DCS_CMD(0x03, 0x01), 1583 + _INIT_DCS_CMD(0x04, 0x03), 1584 + _INIT_DCS_CMD(0x05, 0x02), 1585 + _INIT_DCS_CMD(0x06, 0x04), 1586 + _INIT_DCS_CMD(0x07, 0x03), 1587 + _INIT_DCS_CMD(0x08, 0x03), 1588 + _INIT_DCS_CMD(0x09, 0x04), 1589 + _INIT_DCS_CMD(0x0A, 0x04), 1590 + _INIT_DCS_CMD(0x0B, 0x05), 1591 + _INIT_DCS_CMD(0x0C, 0x04), 1592 + _INIT_DCS_CMD(0x0D, 0x06), 1593 + _INIT_DCS_CMD(0x0E, 0x05), 1594 + _INIT_DCS_CMD(0x0F, 0x07), 1595 + _INIT_DCS_CMD(0x10, 0x04), 1596 + _INIT_DCS_CMD(0x11, 0x08), 1597 + _INIT_DCS_CMD(0x12, 0x05), 1598 + _INIT_DCS_CMD(0x13, 0x09), 1599 + _INIT_DCS_CMD(0x14, 0x05), 1600 + _INIT_DCS_CMD(0x15, 0x0A), 1601 + _INIT_DCS_CMD(0x16, 0x06), 1602 + _INIT_DCS_CMD(0x17, 0x0B), 1603 + _INIT_DCS_CMD(0x18, 0x05), 1604 + _INIT_DCS_CMD(0x19, 0x0C), 1605 + _INIT_DCS_CMD(0x1A, 0x06), 1606 + _INIT_DCS_CMD(0x1B, 0x0D), 1607 + _INIT_DCS_CMD(0x1C, 0x06), 1608 + _INIT_DCS_CMD(0x1D, 0x0E), 1609 + _INIT_DCS_CMD(0x1E, 0x07), 1610 + _INIT_DCS_CMD(0x1F, 0x0F), 1611 + _INIT_DCS_CMD(0x20, 0x06), 1612 + _INIT_DCS_CMD(0x21, 0x10), 1613 + _INIT_DCS_CMD(0x22, 0x07), 1614 + _INIT_DCS_CMD(0x23, 0x11), 1615 + _INIT_DCS_CMD(0x24, 0x07), 1616 + _INIT_DCS_CMD(0x25, 0x12), 1617 + _INIT_DCS_CMD(0x26, 0x08), 1618 + _INIT_DCS_CMD(0x27, 0x13), 1619 + _INIT_DCS_CMD(0x28, 0x07), 1620 + _INIT_DCS_CMD(0x29, 0x14), 1621 + _INIT_DCS_CMD(0x2A, 0x08), 1622 + _INIT_DCS_CMD(0x2B, 0x15), 1623 + _INIT_DCS_CMD(0x2C, 0x08), 1624 + _INIT_DCS_CMD(0x2D, 0x16), 1625 + _INIT_DCS_CMD(0x2E, 0x09), 1626 + _INIT_DCS_CMD(0x2F, 0x17), 1627 + _INIT_DCS_CMD(0x30, 0x08), 1628 + _INIT_DCS_CMD(0x31, 0x18), 1629 + _INIT_DCS_CMD(0x32, 0x09), 1630 + _INIT_DCS_CMD(0x33, 0x19), 1631 + _INIT_DCS_CMD(0x34, 0x09), 1632 + _INIT_DCS_CMD(0x35, 0x1A), 1633 + _INIT_DCS_CMD(0x36, 0x0A), 1634 + _INIT_DCS_CMD(0x37, 0x1B), 1635 + _INIT_DCS_CMD(0x38, 0x0A), 1636 + _INIT_DCS_CMD(0x39, 0x1C), 1637 + _INIT_DCS_CMD(0x3A, 0x0A), 1638 + _INIT_DCS_CMD(0x3B, 0x1D), 1639 + _INIT_DCS_CMD(0x3C, 0x0A), 1640 + _INIT_DCS_CMD(0x3D, 0x1E), 1641 + _INIT_DCS_CMD(0x3E, 0x0A), 1642 + _INIT_DCS_CMD(0x3F, 0x1F), 1643 + 1644 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x04), 1645 + _INIT_DCS_CMD(0xBA, 0x01), 1646 + 1647 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x0E), 1648 + _INIT_DCS_CMD(0x02, 0x0C), 1649 + _INIT_DCS_CMD(0x20, 0x10), 1650 + _INIT_DCS_CMD(0x25, 0x16), 1651 + _INIT_DCS_CMD(0x26, 0xE0), 1652 + _INIT_DCS_CMD(0x27, 0x00), 1653 + _INIT_DCS_CMD(0x29, 0x71), 1654 + _INIT_DCS_CMD(0x2A, 0x46), 1655 + _INIT_DCS_CMD(0x2B, 0x1F), 1656 + _INIT_DCS_CMD(0x2D, 0xC7), 1657 + _INIT_DCS_CMD(0x31, 0x02), 1658 + _INIT_DCS_CMD(0x32, 0xDF), 1659 + _INIT_DCS_CMD(0x33, 0x5A), 1660 + _INIT_DCS_CMD(0x34, 0xC0), 1661 + _INIT_DCS_CMD(0x35, 0x5A), 1662 + _INIT_DCS_CMD(0x36, 0xC0), 1663 + _INIT_DCS_CMD(0x38, 0x65), 1664 + _INIT_DCS_CMD(0x80, 0x3E), 1665 + _INIT_DCS_CMD(0x81, 0xA0), 1666 + _INIT_DCS_CMD(0xB0, 0x01), 1667 + _INIT_DCS_CMD(0xB1, 0xCC), 1668 + _INIT_DCS_CMD(0xC0, 0x12), 1669 + _INIT_DCS_CMD(0xC2, 0xCC), 1670 + _INIT_DCS_CMD(0xC3, 0xCC), 1671 + _INIT_DCS_CMD(0xC4, 0xCC), 1672 + _INIT_DCS_CMD(0xC5, 0xCC), 1673 + _INIT_DCS_CMD(0xC6, 0xCC), 1674 + _INIT_DCS_CMD(0xC7, 0xCC), 1675 + _INIT_DCS_CMD(0xC8, 0xCC), 1676 + _INIT_DCS_CMD(0xC9, 0xCC), 1677 + _INIT_DCS_CMD(0x30, 0x00), 1678 + _INIT_DCS_CMD(0x00, 0x81), 1679 + _INIT_DCS_CMD(0x08, 0x02), 1680 + _INIT_DCS_CMD(0x09, 0x00), 1681 + _INIT_DCS_CMD(0x07, 0x21), 1682 + _INIT_DCS_CMD(0x04, 0x10), 1683 + 1684 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x1E), 1685 + _INIT_DCS_CMD(0x60, 0x00), 1686 + _INIT_DCS_CMD(0x64, 0x00), 1687 + _INIT_DCS_CMD(0x6D, 0x00), 1688 + 1689 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x0B), 1690 + _INIT_DCS_CMD(0xA6, 0x44), 1691 + _INIT_DCS_CMD(0xA7, 0xB6), 1692 + _INIT_DCS_CMD(0xA8, 0x03), 1693 + _INIT_DCS_CMD(0xA9, 0x03), 1694 + _INIT_DCS_CMD(0xAA, 0x51), 1695 + _INIT_DCS_CMD(0xAB, 0x51), 1696 + _INIT_DCS_CMD(0xAC, 0x04), 1697 + _INIT_DCS_CMD(0xBD, 0x92), 1698 + _INIT_DCS_CMD(0xBE, 0xA1), 1699 + 1700 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x05), 1701 + _INIT_DCS_CMD(0x86, 0x87), 1702 + 1703 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x06), 1704 + _INIT_DCS_CMD(0x92, 0x22), 1705 + 1706 + _INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x00), 1707 + _INIT_DCS_CMD(0x11), 1708 + _INIT_DELAY_CMD(120), 1709 + _INIT_DCS_CMD(0x29), 1710 + _INIT_DELAY_CMD(20), 1711 + {}, 1712 + }; 1713 + 1304 1714 static inline struct boe_panel *to_boe_panel(struct drm_panel *panel) 1305 1715 { 1306 1716 return container_of(panel, struct boe_panel, base); ··· 2108 1698 .init_cmds = starry_qfh032011_53g_init_cmd, 2109 1699 }; 2110 1700 1701 + static const struct drm_display_mode starry_himax83102_j02_default_mode = { 1702 + .clock = 161600, 1703 + .hdisplay = 1200, 1704 + .hsync_start = 1200 + 40, 1705 + .hsync_end = 1200 + 40 + 20, 1706 + .htotal = 1200 + 40 + 20 + 40, 1707 + .vdisplay = 1920, 1708 + .vsync_start = 1920 + 116, 1709 + .vsync_end = 1920 + 116 + 8, 1710 + .vtotal = 1920 + 116 + 8 + 12, 1711 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 1712 + }; 1713 + 1714 + static const struct panel_desc starry_himax83102_j02_desc = { 1715 + .modes = &starry_himax83102_j02_default_mode, 1716 + .bpc = 8, 1717 + .size = { 1718 + .width_mm = 141, 1719 + .height_mm = 226, 1720 + }, 1721 + .lanes = 4, 1722 + .format = MIPI_DSI_FMT_RGB888, 1723 + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | 1724 + MIPI_DSI_MODE_LPM, 1725 + .init_cmds = starry_himax83102_j02_init_cmd, 1726 + .lp11_before_reset = true, 1727 + }; 1728 + 1729 + static const struct drm_display_mode starry_ili9882t_default_mode = { 1730 + .clock = 165280, 1731 + .hdisplay = 1200, 1732 + .hsync_start = 1200 + 32, 1733 + .hsync_end = 1200 + 32 + 30, 1734 + .htotal = 1200 + 32 + 30 + 32, 1735 + .vdisplay = 1920, 1736 + .vsync_start = 1920 + 68, 1737 + .vsync_end = 1920 + 68 + 2, 1738 + .vtotal = 1920 + 68 + 2 + 10, 1739 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 1740 + }; 1741 + 1742 + static const struct panel_desc starry_ili9882t_desc = { 1743 + .modes = &starry_ili9882t_default_mode, 1744 + .bpc = 8, 1745 + .size = { 1746 + .width_mm = 141, 1747 + .height_mm = 226, 1748 + }, 1749 + .lanes = 4, 1750 + .format = MIPI_DSI_FMT_RGB888, 1751 + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | 1752 + MIPI_DSI_MODE_LPM, 1753 + .init_cmds = starry_ili9882t_init_cmd, 1754 + .lp11_before_reset = true, 1755 + }; 1756 + 2111 1757 static int boe_panel_get_modes(struct drm_panel *panel, 2112 1758 struct drm_connector *connector) 2113 1759 { ··· 2336 1870 }, 2337 1871 { .compatible = "starry,2081101qfh032011-53g", 2338 1872 .data = &starry_qfh032011_53g_desc 1873 + }, 1874 + { .compatible = "starry,himax83102-j02", 1875 + .data = &starry_himax83102_j02_desc 1876 + }, 1877 + { .compatible = "starry,ili9882t", 1878 + .data = &starry_ili9882t_desc 2339 1879 }, 2340 1880 { /* sentinel */ } 2341 1881 };
+4 -6
drivers/gpu/drm/panel/panel-edp.c
··· 903 903 return err; 904 904 } 905 905 906 - static int panel_edp_remove(struct device *dev) 906 + static void panel_edp_remove(struct device *dev) 907 907 { 908 908 struct panel_edp *panel = dev_get_drvdata(dev); 909 909 ··· 918 918 919 919 kfree(panel->edid); 920 920 panel->edid = NULL; 921 - 922 - return 0; 923 921 } 924 922 925 923 static void panel_edp_shutdown(struct device *dev) ··· 1932 1934 return panel_edp_probe(&pdev->dev, id->data, NULL); 1933 1935 } 1934 1936 1935 - static int panel_edp_platform_remove(struct platform_device *pdev) 1937 + static void panel_edp_platform_remove(struct platform_device *pdev) 1936 1938 { 1937 - return panel_edp_remove(&pdev->dev); 1939 + panel_edp_remove(&pdev->dev); 1938 1940 } 1939 1941 1940 1942 static void panel_edp_platform_shutdown(struct platform_device *pdev) ··· 1955 1957 .pm = &panel_edp_pm_ops, 1956 1958 }, 1957 1959 .probe = panel_edp_platform_probe, 1958 - .remove = panel_edp_platform_remove, 1960 + .remove_new = panel_edp_platform_remove, 1959 1961 .shutdown = panel_edp_platform_shutdown, 1960 1962 }; 1961 1963
+8 -8
drivers/gpu/drm/panel/panel-khadas-ts050.c
··· 568 568 {0xfb, 0x01}, 569 569 /* Select CMD1 */ 570 570 {0xff, 0x00}, 571 - {0xd3, 0x05}, /* RGBMIPICTRL: VSYNC back porch = 5 */ 571 + {0xd3, 0x22}, /* RGBMIPICTRL: VSYNC back porch = 34 */ 572 572 {0xd4, 0x04}, /* RGBMIPICTRL: VSYNC front porch = 4 */ 573 573 }; 574 574 ··· 717 717 } 718 718 719 719 static const struct drm_display_mode default_mode = { 720 - .clock = 120000, 721 - .hdisplay = 1088, 722 - .hsync_start = 1088 + 104, 723 - .hsync_end = 1088 + 104 + 4, 724 - .htotal = 1088 + 104 + 4 + 127, 720 + .clock = 160000, 721 + .hdisplay = 1080, 722 + .hsync_start = 1080 + 117, 723 + .hsync_end = 1080 + 117 + 5, 724 + .htotal = 1080 + 117 + 5 + 160, 725 725 .vdisplay = 1920, 726 726 .vsync_start = 1920 + 4, 727 - .vsync_end = 1920 + 4 + 2, 728 - .vtotal = 1920 + 4 + 2 + 3, 727 + .vsync_end = 1920 + 4 + 3, 728 + .vtotal = 1920 + 4 + 3 + 31, 729 729 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 730 730 }; 731 731
+1 -1
drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
··· 308 308 .name = "lcd_olinuxino", 309 309 .of_match_table = lcd_olinuxino_of_ids, 310 310 }, 311 - .probe_new = lcd_olinuxino_probe, 311 + .probe = lcd_olinuxino_probe, 312 312 .remove = lcd_olinuxino_remove, 313 313 }; 314 314
+1 -1
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
··· 489 489 .name = "rpi_touchscreen", 490 490 .of_match_table = rpi_touchscreen_of_ids, 491 491 }, 492 - .probe_new = rpi_touchscreen_probe, 492 + .probe = rpi_touchscreen_probe, 493 493 .remove = rpi_touchscreen_remove, 494 494 }; 495 495
+64 -2
drivers/gpu/drm/panel/panel-simple.c
··· 759 759 .num_modes = 1, 760 760 .bpc = 8, 761 761 .size = { 762 - .width = 105, 763 - .height = 67, 762 + .width = 99, 763 + .height = 58, 764 764 }, 765 765 .bus_format = MEDIA_BUS_FMT_RGB888_1X24, 766 766 }; ··· 776 776 .vsync_end = 480 + 2 + 45, 777 777 .vtotal = 480 + 2 + 45 + 0, 778 778 .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 779 + }; 780 + 781 + static const struct display_timing ampire_am_800480l1tmqw_t00h_timing = { 782 + .pixelclock = { 29930000, 33260000, 36590000 }, 783 + .hactive = { 800, 800, 800 }, 784 + .hfront_porch = { 1, 40, 168 }, 785 + .hback_porch = { 88, 88, 88 }, 786 + .hsync_len = { 1, 128, 128 }, 787 + .vactive = { 480, 480, 480 }, 788 + .vfront_porch = { 1, 35, 37 }, 789 + .vback_porch = { 8, 8, 8 }, 790 + .vsync_len = { 1, 2, 2 }, 791 + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | 792 + DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE | 793 + DISPLAY_FLAGS_SYNC_POSEDGE, 794 + }; 795 + 796 + static const struct panel_desc ampire_am_800480l1tmqw_t00h = { 797 + .timings = &ampire_am_800480l1tmqw_t00h_timing, 798 + .num_timings = 1, 799 + .bpc = 8, 800 + .size = { 801 + .width = 111, 802 + .height = 67, 803 + }, 804 + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, 805 + .bus_flags = DRM_BUS_FLAG_DE_HIGH | 806 + DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE | 807 + DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE, 808 + .connector_type = DRM_MODE_CONNECTOR_DPI, 779 809 }; 780 810 781 811 static const struct panel_desc ampire_am800480r3tmqwa1h = { ··· 3281 3251 .connector_type = DRM_MODE_CONNECTOR_DPI, 3282 3252 }; 3283 3253 3254 + static const struct display_timing rocktech_rk043fn48h_timing = { 3255 + .pixelclock = { 6000000, 9000000, 12000000 }, 3256 + .hactive = { 480, 480, 480 }, 3257 + .hback_porch = { 8, 43, 43 }, 3258 + .hfront_porch = { 2, 8, 8 }, 3259 + .hsync_len = { 1, 1, 1 }, 3260 + .vactive = { 272, 272, 272 }, 3261 + .vback_porch = { 2, 12, 12 }, 3262 + .vfront_porch = { 1, 4, 4 }, 3263 + .vsync_len = { 1, 10, 10 }, 3264 + .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW | 3265 + DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE, 3266 + }; 3267 + 3268 + static const struct panel_desc rocktech_rk043fn48h = { 3269 + .timings = &rocktech_rk043fn48h_timing, 3270 + .num_timings = 1, 3271 + .bpc = 8, 3272 + .size = { 3273 + .width = 95, 3274 + .height = 54, 3275 + }, 3276 + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, 3277 + .connector_type = DRM_MODE_CONNECTOR_DPI, 3278 + }; 3279 + 3284 3280 static const struct display_timing rocktech_rk070er9427_timing = { 3285 3281 .pixelclock = { 26400000, 33300000, 46800000 }, 3286 3282 .hactive = { 800, 800, 800 }, ··· 4050 3994 .compatible = "ampire,am-480272h3tmqw-t01h", 4051 3995 .data = &ampire_am_480272h3tmqw_t01h, 4052 3996 }, { 3997 + .compatible = "ampire,am-800480l1tmqw-t00h", 3998 + .data = &ampire_am_800480l1tmqw_t00h, 3999 + }, { 4053 4000 .compatible = "ampire,am800480r3tmqwa1h", 4054 4001 .data = &ampire_am800480r3tmqwa1h, 4055 4002 }, { ··· 4346 4287 }, { 4347 4288 .compatible = "qishenglong,gopher2b-lcd", 4348 4289 .data = &qishenglong_gopher2b_lcd, 4290 + }, { 4291 + .compatible = "rocktech,rk043fn48h", 4292 + .data = &rocktech_rk043fn48h, 4349 4293 }, { 4350 4294 .compatible = "rocktech,rk070er9427", 4351 4295 .data = &rocktech_rk070er9427,
+1
drivers/gpu/drm/radeon/Kconfig
··· 11 11 select DRM_SUBALLOC_HELPER 12 12 select DRM_TTM 13 13 select DRM_TTM_HELPER 14 + select FB_IO_HELPERS if DRM_FBDEV_EMULATION 14 15 select SND_HDA_COMPONENT if SND_HDA_CORE 15 16 select POWER_SUPPLY 16 17 select HWMON
+3 -6
drivers/gpu/drm/radeon/radeon_fbdev.c
··· 24 24 * David Airlie 25 25 */ 26 26 27 + #include <linux/fb.h> 27 28 #include <linux/pci.h> 28 29 #include <linux/pm_runtime.h> 29 30 #include <linux/vga_switcheroo.h> ··· 191 190 192 191 static const struct fb_ops radeon_fbdev_fb_ops = { 193 192 .owner = THIS_MODULE, 194 - DRM_FB_HELPER_DEFAULT_OPS, 195 193 .fb_open = radeon_fbdev_fb_open, 196 194 .fb_release = radeon_fbdev_fb_release, 197 - .fb_read = drm_fb_helper_cfb_read, 198 - .fb_write = drm_fb_helper_cfb_write, 199 - .fb_fillrect = drm_fb_helper_cfb_fillrect, 200 - .fb_copyarea = drm_fb_helper_cfb_copyarea, 201 - .fb_imageblit = drm_fb_helper_cfb_imageblit, 195 + FB_DEFAULT_IO_OPS, 196 + DRM_FB_HELPER_DEFAULT_OPS, 202 197 .fb_destroy = radeon_fbdev_fb_destroy, 203 198 }; 204 199
drivers/gpu/drm/rcar-du/Kconfig drivers/gpu/drm/renesas/rcar-du/Kconfig
drivers/gpu/drm/rcar-du/Makefile drivers/gpu/drm/renesas/rcar-du/Makefile
drivers/gpu/drm/rcar-du/rcar_cmm.c drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
drivers/gpu/drm/rcar-du/rcar_cmm.h drivers/gpu/drm/renesas/rcar-du/rcar_cmm.h
+3 -34
drivers/gpu/drm/rcar-du/rcar_du_crtc.c drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c
··· 223 223 * DU channels that have a display PLL can't use the internal 224 224 * system clock, and have no internal clock divider. 225 225 */ 226 - 227 - /* 228 - * The H3 ES1.x exhibits dot clock duty cycle stability issues. 229 - * We can work around them by configuring the DPLL to twice the 230 - * desired frequency, coupled with a /2 post-divider. Restrict 231 - * the workaround to H3 ES1.x as ES2.0 and all other SoCs have 232 - * no post-divider when a display PLL is present (as shown by 233 - * the workaround breaking HDMI output on M3-W during testing). 234 - */ 235 - if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY) { 236 - target *= 2; 237 - div = 1; 238 - } 239 - 240 226 extclk = clk_get_rate(rcrtc->extclock); 241 227 rcar_du_dpll_divider(rcrtc, &dpll, extclk, target); 242 228 ··· 231 245 | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m) 232 246 | DPLLCR_STBY; 233 247 234 - if (rcrtc->index == 1) { 248 + if (rcrtc->index == 1) 235 249 dpllcr |= DPLLCR_PLCS1 236 250 | DPLLCR_INCS_DOTCLKIN1; 237 - } else { 238 - dpllcr |= DPLLCR_PLCS0_PLL 251 + else 252 + dpllcr |= DPLLCR_PLCS0 239 253 | DPLLCR_INCS_DOTCLKIN0; 240 - 241 - /* 242 - * On ES2.x we have a single mux controlled via bit 21, 243 - * which selects between DCLKIN source (bit 21 = 0) and 244 - * a PLL source (bit 21 = 1), where the PLL is always 245 - * PLL1. 246 - * 247 - * On ES1.x we have an additional mux, controlled 248 - * via bit 20, for choosing between PLL0 (bit 20 = 0) 249 - * and PLL1 (bit 20 = 1). We always want to use PLL1, 250 - * so on ES1.x, in addition to setting bit 21, we need 251 - * to set the bit 20. 252 - */ 253 - 254 - if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PLL) 255 - dpllcr |= DPLLCR_PLCS0_H3ES1X_PLL1; 256 - } 257 254 258 255 rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr); 259 256
drivers/gpu/drm/rcar-du/rcar_du_crtc.h drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.h
-48
drivers/gpu/drm/rcar-du/rcar_du_drv.c drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
··· 16 16 #include <linux/platform_device.h> 17 17 #include <linux/pm.h> 18 18 #include <linux/slab.h> 19 - #include <linux/sys_soc.h> 20 19 #include <linux/wait.h> 21 20 22 21 #include <drm/drm_atomic_helper.h> ··· 386 387 .dpll_mask = BIT(2) | BIT(1), 387 388 }; 388 389 389 - static const struct rcar_du_device_info rcar_du_r8a7795_es1_info = { 390 - .gen = 3, 391 - .features = RCAR_DU_FEATURE_CRTC_IRQ 392 - | RCAR_DU_FEATURE_CRTC_CLOCK 393 - | RCAR_DU_FEATURE_VSP1_SOURCE 394 - | RCAR_DU_FEATURE_INTERLACED 395 - | RCAR_DU_FEATURE_TVM_SYNC, 396 - .quirks = RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY 397 - | RCAR_DU_QUIRK_H3_ES1_PLL, 398 - .channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0), 399 - .routes = { 400 - /* 401 - * R8A7795 has one RGB output, two HDMI outputs and one 402 - * LVDS output. 403 - */ 404 - [RCAR_DU_OUTPUT_DPAD0] = { 405 - .possible_crtcs = BIT(3), 406 - .port = 0, 407 - }, 408 - [RCAR_DU_OUTPUT_HDMI0] = { 409 - .possible_crtcs = BIT(1), 410 - .port = 1, 411 - }, 412 - [RCAR_DU_OUTPUT_HDMI1] = { 413 - .possible_crtcs = BIT(2), 414 - .port = 2, 415 - }, 416 - [RCAR_DU_OUTPUT_LVDS0] = { 417 - .possible_crtcs = BIT(0), 418 - .port = 3, 419 - }, 420 - }, 421 - .num_lvds = 1, 422 - .num_rpf = 5, 423 - .dpll_mask = BIT(2) | BIT(1), 424 - }; 425 - 426 390 static const struct rcar_du_device_info rcar_du_r8a7796_info = { 427 391 .gen = 3, 428 392 .features = RCAR_DU_FEATURE_CRTC_IRQ ··· 576 614 577 615 MODULE_DEVICE_TABLE(of, rcar_du_of_table); 578 616 579 - static const struct soc_device_attribute rcar_du_soc_table[] = { 580 - { .soc_id = "r8a7795", .revision = "ES1.*", .data = &rcar_du_r8a7795_es1_info }, 581 - { /* sentinel */ } 582 - }; 583 - 584 617 const char *rcar_du_output_name(enum rcar_du_output output) 585 618 { 586 619 static const char * const names[] = { ··· 664 707 665 708 static int rcar_du_probe(struct platform_device *pdev) 666 709 { 667 - const struct soc_device_attribute *soc_attr; 668 710 struct rcar_du_device *rcdu; 669 711 unsigned int mask; 670 712 int ret; ··· 680 724 rcdu->dev = &pdev->dev; 681 725 682 726 rcdu->info = of_device_get_match_data(rcdu->dev); 683 - 684 - soc_attr = soc_device_match(rcar_du_soc_table); 685 - if (soc_attr) 686 - rcdu->info = soc_attr->data; 687 727 688 728 platform_set_drvdata(pdev, rcdu); 689 729
-2
drivers/gpu/drm/rcar-du/rcar_du_drv.h drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.h
··· 34 34 #define RCAR_DU_FEATURE_NO_BLENDING BIT(5) /* PnMR.SPIM does not have ALP nor EOR bits */ 35 35 36 36 #define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */ 37 - #define RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY BIT(1) /* H3 ES1 has pclk stability issue */ 38 - #define RCAR_DU_QUIRK_H3_ES1_PLL BIT(2) /* H3 ES1 PLL setup differs from non-ES1 */ 39 37 40 38 enum rcar_du_output { 41 39 RCAR_DU_OUTPUT_DPAD0,
drivers/gpu/drm/rcar-du/rcar_du_encoder.c drivers/gpu/drm/renesas/rcar-du/rcar_du_encoder.c
drivers/gpu/drm/rcar-du/rcar_du_encoder.h drivers/gpu/drm/renesas/rcar-du/rcar_du_encoder.h
drivers/gpu/drm/rcar-du/rcar_du_group.c drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c
drivers/gpu/drm/rcar-du/rcar_du_group.h drivers/gpu/drm/renesas/rcar-du/rcar_du_group.h
drivers/gpu/drm/rcar-du/rcar_du_kms.c drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rcar-du/rcar_du_kms.h drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.h
drivers/gpu/drm/rcar-du/rcar_du_plane.c drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c
drivers/gpu/drm/rcar-du/rcar_du_plane.h drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
+1 -2
drivers/gpu/drm/rcar-du/rcar_du_regs.h drivers/gpu/drm/renesas/rcar-du/rcar_du_regs.h
··· 283 283 #define DPLLCR 0x20044 284 284 #define DPLLCR_CODE (0x95 << 24) 285 285 #define DPLLCR_PLCS1 (1 << 23) 286 - #define DPLLCR_PLCS0_PLL (1 << 21) 287 - #define DPLLCR_PLCS0_H3ES1X_PLL1 (1 << 20) 286 + #define DPLLCR_PLCS0 (1 << 21) 288 287 #define DPLLCR_CLKE (1 << 18) 289 288 #define DPLLCR_FDPLL(n) ((n) << 12) 290 289 #define DPLLCR_N(n) ((n) << 5)
drivers/gpu/drm/rcar-du/rcar_du_vsp.c drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c
drivers/gpu/drm/rcar-du/rcar_du_vsp.h drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.h
drivers/gpu/drm/rcar-du/rcar_du_writeback.c drivers/gpu/drm/renesas/rcar-du/rcar_du_writeback.c
drivers/gpu/drm/rcar-du/rcar_du_writeback.h drivers/gpu/drm/renesas/rcar-du/rcar_du_writeback.h
drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c
drivers/gpu/drm/rcar-du/rcar_lvds.c drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
drivers/gpu/drm/rcar-du/rcar_lvds.h drivers/gpu/drm/renesas/rcar-du/rcar_lvds.h
drivers/gpu/drm/rcar-du/rcar_lvds_regs.h drivers/gpu/drm/renesas/rcar-du/rcar_lvds_regs.h
drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.h
drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
drivers/gpu/drm/rcar-du/rzg2l_mipi_dsi.c drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c
drivers/gpu/drm/rcar-du/rzg2l_mipi_dsi_regs.h drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi_regs.h
+4
drivers/gpu/drm/renesas/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + 3 + source "drivers/gpu/drm/renesas/rcar-du/Kconfig" 4 + source "drivers/gpu/drm/renesas/shmobile/Kconfig"
+4
drivers/gpu/drm/renesas/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + obj-y += rcar-du/ 4 + obj-$(CONFIG_DRM_SHMOBILE) += shmobile/
drivers/gpu/drm/shmobile/Kconfig drivers/gpu/drm/renesas/shmobile/Kconfig
drivers/gpu/drm/shmobile/Makefile drivers/gpu/drm/renesas/shmobile/Makefile
drivers/gpu/drm/shmobile/shmob_drm_backlight.c drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.c
drivers/gpu/drm/shmobile/shmob_drm_backlight.h drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.h
drivers/gpu/drm/shmobile/shmob_drm_crtc.c drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/shmobile/shmob_drm_crtc.h drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.h
drivers/gpu/drm/shmobile/shmob_drm_drv.c drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
drivers/gpu/drm/shmobile/shmob_drm_drv.h drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.h
drivers/gpu/drm/shmobile/shmob_drm_kms.c drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
drivers/gpu/drm/shmobile/shmob_drm_kms.h drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.h
drivers/gpu/drm/shmobile/shmob_drm_plane.c drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
drivers/gpu/drm/shmobile/shmob_drm_plane.h drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.h
drivers/gpu/drm/shmobile/shmob_drm_regs.h drivers/gpu/drm/renesas/shmobile/shmob_drm_regs.h
+1 -1
drivers/gpu/drm/solomon/ssd130x-i2c.c
··· 100 100 .name = DRIVER_NAME, 101 101 .of_match_table = ssd130x_of_match, 102 102 }, 103 - .probe_new = ssd130x_i2c_probe, 103 + .probe = ssd130x_i2c_probe, 104 104 .remove = ssd130x_i2c_remove, 105 105 .shutdown = ssd130x_i2c_shutdown, 106 106 };
+1 -3
drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
··· 444 444 struct device *dev = &pdev->dev; 445 445 struct dw_mipi_dsi_stm *dsi; 446 446 struct clk *pclk; 447 - struct resource *res; 448 447 int ret; 449 448 450 449 dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); 451 450 if (!dsi) 452 451 return -ENOMEM; 453 452 454 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 455 - dsi->base = devm_ioremap_resource(dev, res); 453 + dsi->base = devm_platform_ioremap_resource(pdev, 0); 456 454 if (IS_ERR(dsi->base)) { 457 455 ret = PTR_ERR(dsi->base); 458 456 DRM_ERROR("Unable to get dsi registers %d\n", ret);
+3 -1
drivers/gpu/drm/stm/ltdc.c
··· 1145 1145 1146 1146 static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source) 1147 1147 { 1148 - struct ltdc_device *ldev = crtc_to_ltdc(crtc); 1148 + struct ltdc_device *ldev; 1149 1149 int ret; 1150 1150 1151 1151 DRM_DEBUG_DRIVER("\n"); 1152 1152 1153 1153 if (!crtc) 1154 1154 return -ENODEV; 1155 + 1156 + ldev = crtc_to_ltdc(crtc); 1155 1157 1156 1158 if (source && strcmp(source, "auto") == 0) { 1157 1159 ldev->crc_active = true;
+1
drivers/gpu/drm/tegra/Kconfig
··· 12 12 select DRM_KMS_HELPER 13 13 select DRM_MIPI_DSI 14 14 select DRM_PANEL 15 + select FB_SYS_HELPERS if DRM_FBDEV_EMULATION 15 16 select TEGRA_HOST1X 16 17 select INTERCONNECT 17 18 select IOMMU_IOVA
+3 -5
drivers/gpu/drm/tegra/fbdev.c
··· 8 8 */ 9 9 10 10 #include <linux/console.h> 11 + #include <linux/fb.h> 11 12 #include <linux/vmalloc.h> 12 13 13 14 #include <drm/drm_drv.h> ··· 59 58 60 59 static const struct fb_ops tegra_fb_ops = { 61 60 .owner = THIS_MODULE, 61 + __FB_DEFAULT_SYS_OPS_RDWR, 62 62 DRM_FB_HELPER_DEFAULT_OPS, 63 - .fb_read = drm_fb_helper_sys_read, 64 - .fb_write = drm_fb_helper_sys_write, 65 - .fb_fillrect = drm_fb_helper_sys_fillrect, 66 - .fb_copyarea = drm_fb_helper_sys_copyarea, 67 - .fb_imageblit = drm_fb_helper_sys_imageblit, 63 + __FB_DEFAULT_SYS_OPS_DRAW, 68 64 .fb_mmap = tegra_fb_mmap, 69 65 .fb_destroy = tegra_fbdev_fb_destroy, 70 66 };
-1
drivers/gpu/drm/ttm/ttm_tt.c
··· 137 137 unsigned long extra_pages) 138 138 { 139 139 ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages; 140 - ttm->caching = ttm_cached; 141 140 ttm->page_flags = page_flags; 142 141 ttm->dma_address = NULL; 143 142 ttm->swap_storage = NULL;
+1 -1
drivers/gpu/drm/virtio/Makefile
··· 6 6 virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o virtgpu_vram.o \ 7 7 virtgpu_display.o virtgpu_vq.o \ 8 8 virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \ 9 - virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o 9 + virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o virtgpu_submit.o 10 10 11 11 obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o
+4
drivers/gpu/drm/virtio/virtgpu_drv.h
··· 486 486 struct sg_table *sgt, 487 487 enum dma_data_direction dir); 488 488 489 + /* virtgpu_submit.c */ 490 + int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, 491 + struct drm_file *file); 492 + 489 493 #endif
-182
drivers/gpu/drm/virtio/virtgpu_ioctl.c
··· 38 38 VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \ 39 39 VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) 40 40 41 - static int virtio_gpu_fence_event_create(struct drm_device *dev, 42 - struct drm_file *file, 43 - struct virtio_gpu_fence *fence, 44 - uint32_t ring_idx) 45 - { 46 - struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 47 - struct virtio_gpu_fence_event *e = NULL; 48 - int ret; 49 - 50 - if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx))) 51 - return 0; 52 - 53 - e = kzalloc(sizeof(*e), GFP_KERNEL); 54 - if (!e) 55 - return -ENOMEM; 56 - 57 - e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED; 58 - e->event.length = sizeof(e->event); 59 - 60 - ret = drm_event_reserve_init(dev, file, &e->base, &e->event); 61 - if (ret) 62 - goto free; 63 - 64 - fence->e = e; 65 - return 0; 66 - free: 67 - kfree(e); 68 - return ret; 69 - } 70 - 71 41 /* Must be called with &virtio_gpu_fpriv.struct_mutex held. */ 72 42 static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev, 73 43 struct virtio_gpu_fpriv *vfpriv) ··· 76 106 return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev, 77 107 virtio_gpu_map->handle, 78 108 &virtio_gpu_map->offset); 79 - } 80 - 81 - /* 82 - * Usage of execbuffer: 83 - * Relocations need to take into account the full VIRTIO_GPUDrawable size. 84 - * However, the command as passed from user space must *not* contain the initial 85 - * VIRTIO_GPUReleaseInfo struct (first XXX bytes) 86 - */ 87 - static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, 88 - struct drm_file *file) 89 - { 90 - struct drm_virtgpu_execbuffer *exbuf = data; 91 - struct virtio_gpu_device *vgdev = dev->dev_private; 92 - struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 93 - struct virtio_gpu_fence *out_fence; 94 - int ret; 95 - uint32_t *bo_handles = NULL; 96 - void __user *user_bo_handles = NULL; 97 - struct virtio_gpu_object_array *buflist = NULL; 98 - struct sync_file *sync_file; 99 - int out_fence_fd = -1; 100 - void *buf; 101 - uint64_t fence_ctx; 102 - uint32_t ring_idx; 103 - 104 - fence_ctx = vgdev->fence_drv.context; 105 - ring_idx = 0; 106 - 107 - if (vgdev->has_virgl_3d == false) 108 - return -ENOSYS; 109 - 110 - if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS)) 111 - return -EINVAL; 112 - 113 - if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) { 114 - if (exbuf->ring_idx >= vfpriv->num_rings) 115 - return -EINVAL; 116 - 117 - if (!vfpriv->base_fence_ctx) 118 - return -EINVAL; 119 - 120 - fence_ctx = vfpriv->base_fence_ctx; 121 - ring_idx = exbuf->ring_idx; 122 - } 123 - 124 - virtio_gpu_create_context(dev, file); 125 - if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) { 126 - struct dma_fence *in_fence; 127 - 128 - in_fence = sync_file_get_fence(exbuf->fence_fd); 129 - 130 - if (!in_fence) 131 - return -EINVAL; 132 - 133 - /* 134 - * Wait if the fence is from a foreign context, or if the fence 135 - * array contains any fence from a foreign context. 136 - */ 137 - ret = 0; 138 - if (!dma_fence_match_context(in_fence, fence_ctx + ring_idx)) 139 - ret = dma_fence_wait(in_fence, true); 140 - 141 - dma_fence_put(in_fence); 142 - if (ret) 143 - return ret; 144 - } 145 - 146 - if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) { 147 - out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 148 - if (out_fence_fd < 0) 149 - return out_fence_fd; 150 - } 151 - 152 - if (exbuf->num_bo_handles) { 153 - bo_handles = kvmalloc_array(exbuf->num_bo_handles, 154 - sizeof(uint32_t), GFP_KERNEL); 155 - if (!bo_handles) { 156 - ret = -ENOMEM; 157 - goto out_unused_fd; 158 - } 159 - 160 - user_bo_handles = u64_to_user_ptr(exbuf->bo_handles); 161 - if (copy_from_user(bo_handles, user_bo_handles, 162 - exbuf->num_bo_handles * sizeof(uint32_t))) { 163 - ret = -EFAULT; 164 - goto out_unused_fd; 165 - } 166 - 167 - buflist = virtio_gpu_array_from_handles(file, bo_handles, 168 - exbuf->num_bo_handles); 169 - if (!buflist) { 170 - ret = -ENOENT; 171 - goto out_unused_fd; 172 - } 173 - kvfree(bo_handles); 174 - bo_handles = NULL; 175 - } 176 - 177 - buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); 178 - if (IS_ERR(buf)) { 179 - ret = PTR_ERR(buf); 180 - goto out_unused_fd; 181 - } 182 - 183 - if (buflist) { 184 - ret = virtio_gpu_array_lock_resv(buflist); 185 - if (ret) 186 - goto out_memdup; 187 - } 188 - 189 - out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx); 190 - if(!out_fence) { 191 - ret = -ENOMEM; 192 - goto out_unresv; 193 - } 194 - 195 - ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx); 196 - if (ret) 197 - goto out_unresv; 198 - 199 - if (out_fence_fd >= 0) { 200 - sync_file = sync_file_create(&out_fence->f); 201 - if (!sync_file) { 202 - dma_fence_put(&out_fence->f); 203 - ret = -ENOMEM; 204 - goto out_unresv; 205 - } 206 - 207 - exbuf->fence_fd = out_fence_fd; 208 - fd_install(out_fence_fd, sync_file->file); 209 - } 210 - 211 - virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, 212 - vfpriv->ctx_id, buflist, out_fence); 213 - dma_fence_put(&out_fence->f); 214 - virtio_gpu_notify(vgdev); 215 - return 0; 216 - 217 - out_unresv: 218 - if (buflist) 219 - virtio_gpu_array_unlock_resv(buflist); 220 - out_memdup: 221 - kvfree(buf); 222 - out_unused_fd: 223 - kvfree(bo_handles); 224 - if (buflist) 225 - virtio_gpu_array_put_free(buflist); 226 - 227 - if (out_fence_fd >= 0) 228 - put_unused_fd(out_fence_fd); 229 - 230 - return ret; 231 109 } 232 110 233 111 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
+311
drivers/gpu/drm/virtio/virtgpu_submit.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright (C) 2015 Red Hat, Inc. 4 + * All Rights Reserved. 5 + * 6 + * Authors: 7 + * Dave Airlie 8 + * Alon Levy 9 + */ 10 + 11 + #include <linux/dma-fence-unwrap.h> 12 + #include <linux/file.h> 13 + #include <linux/sync_file.h> 14 + #include <linux/uaccess.h> 15 + 16 + #include <drm/drm_file.h> 17 + #include <drm/virtgpu_drm.h> 18 + 19 + #include "virtgpu_drv.h" 20 + 21 + struct virtio_gpu_submit { 22 + struct virtio_gpu_object_array *buflist; 23 + struct drm_virtgpu_execbuffer *exbuf; 24 + struct virtio_gpu_fence *out_fence; 25 + struct virtio_gpu_fpriv *vfpriv; 26 + struct virtio_gpu_device *vgdev; 27 + struct sync_file *sync_file; 28 + struct drm_file *file; 29 + int out_fence_fd; 30 + u64 fence_ctx; 31 + u32 ring_idx; 32 + void *buf; 33 + }; 34 + 35 + static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, 36 + struct dma_fence *in_fence) 37 + { 38 + u32 context = submit->fence_ctx + submit->ring_idx; 39 + 40 + if (dma_fence_match_context(in_fence, context)) 41 + return 0; 42 + 43 + return dma_fence_wait(in_fence, true); 44 + } 45 + 46 + static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit, 47 + struct dma_fence *fence) 48 + { 49 + struct dma_fence_unwrap itr; 50 + struct dma_fence *f; 51 + int err; 52 + 53 + dma_fence_unwrap_for_each(f, &itr, fence) { 54 + err = virtio_gpu_do_fence_wait(submit, f); 55 + if (err) 56 + return err; 57 + } 58 + 59 + return 0; 60 + } 61 + 62 + static int virtio_gpu_fence_event_create(struct drm_device *dev, 63 + struct drm_file *file, 64 + struct virtio_gpu_fence *fence, 65 + u32 ring_idx) 66 + { 67 + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 68 + struct virtio_gpu_fence_event *e = NULL; 69 + int ret; 70 + 71 + if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx))) 72 + return 0; 73 + 74 + e = kzalloc(sizeof(*e), GFP_KERNEL); 75 + if (!e) 76 + return -ENOMEM; 77 + 78 + e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED; 79 + e->event.length = sizeof(e->event); 80 + 81 + ret = drm_event_reserve_init(dev, file, &e->base, &e->event); 82 + if (ret) { 83 + kfree(e); 84 + return ret; 85 + } 86 + 87 + fence->e = e; 88 + 89 + return 0; 90 + } 91 + 92 + static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit) 93 + { 94 + struct drm_virtgpu_execbuffer *exbuf = submit->exbuf; 95 + u32 *bo_handles; 96 + 97 + if (!exbuf->num_bo_handles) 98 + return 0; 99 + 100 + bo_handles = kvmalloc_array(exbuf->num_bo_handles, sizeof(*bo_handles), 101 + GFP_KERNEL); 102 + if (!bo_handles) 103 + return -ENOMEM; 104 + 105 + if (copy_from_user(bo_handles, u64_to_user_ptr(exbuf->bo_handles), 106 + exbuf->num_bo_handles * sizeof(*bo_handles))) { 107 + kvfree(bo_handles); 108 + return -EFAULT; 109 + } 110 + 111 + submit->buflist = virtio_gpu_array_from_handles(submit->file, bo_handles, 112 + exbuf->num_bo_handles); 113 + if (!submit->buflist) { 114 + kvfree(bo_handles); 115 + return -ENOENT; 116 + } 117 + 118 + kvfree(bo_handles); 119 + 120 + return 0; 121 + } 122 + 123 + static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit) 124 + { 125 + if (!IS_ERR(submit->buf)) 126 + kvfree(submit->buf); 127 + 128 + if (submit->buflist) 129 + virtio_gpu_array_put_free(submit->buflist); 130 + 131 + if (submit->out_fence_fd >= 0) 132 + put_unused_fd(submit->out_fence_fd); 133 + 134 + if (submit->out_fence) 135 + dma_fence_put(&submit->out_fence->f); 136 + 137 + if (submit->sync_file) 138 + fput(submit->sync_file->file); 139 + } 140 + 141 + static void virtio_gpu_submit(struct virtio_gpu_submit *submit) 142 + { 143 + virtio_gpu_cmd_submit(submit->vgdev, submit->buf, submit->exbuf->size, 144 + submit->vfpriv->ctx_id, submit->buflist, 145 + submit->out_fence); 146 + virtio_gpu_notify(submit->vgdev); 147 + } 148 + 149 + static void virtio_gpu_complete_submit(struct virtio_gpu_submit *submit) 150 + { 151 + submit->buf = NULL; 152 + submit->buflist = NULL; 153 + submit->sync_file = NULL; 154 + submit->out_fence = NULL; 155 + submit->out_fence_fd = -1; 156 + } 157 + 158 + static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit, 159 + struct drm_virtgpu_execbuffer *exbuf, 160 + struct drm_device *dev, 161 + struct drm_file *file, 162 + u64 fence_ctx, u32 ring_idx) 163 + { 164 + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 165 + struct virtio_gpu_device *vgdev = dev->dev_private; 166 + struct virtio_gpu_fence *out_fence; 167 + int err; 168 + 169 + memset(submit, 0, sizeof(*submit)); 170 + 171 + out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx); 172 + if (!out_fence) 173 + return -ENOMEM; 174 + 175 + err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx); 176 + if (err) { 177 + dma_fence_put(&out_fence->f); 178 + return err; 179 + } 180 + 181 + submit->out_fence = out_fence; 182 + submit->fence_ctx = fence_ctx; 183 + submit->ring_idx = ring_idx; 184 + submit->out_fence_fd = -1; 185 + submit->vfpriv = vfpriv; 186 + submit->vgdev = vgdev; 187 + submit->exbuf = exbuf; 188 + submit->file = file; 189 + 190 + err = virtio_gpu_init_submit_buflist(submit); 191 + if (err) 192 + return err; 193 + 194 + submit->buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); 195 + if (IS_ERR(submit->buf)) 196 + return PTR_ERR(submit->buf); 197 + 198 + if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) { 199 + err = get_unused_fd_flags(O_CLOEXEC); 200 + if (err < 0) 201 + return err; 202 + 203 + submit->out_fence_fd = err; 204 + 205 + submit->sync_file = sync_file_create(&out_fence->f); 206 + if (!submit->sync_file) 207 + return -ENOMEM; 208 + } 209 + 210 + return 0; 211 + } 212 + 213 + static int virtio_gpu_wait_in_fence(struct virtio_gpu_submit *submit) 214 + { 215 + int ret = 0; 216 + 217 + if (submit->exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) { 218 + struct dma_fence *in_fence = 219 + sync_file_get_fence(submit->exbuf->fence_fd); 220 + if (!in_fence) 221 + return -EINVAL; 222 + 223 + /* 224 + * Wait if the fence is from a foreign context, or if the 225 + * fence array contains any fence from a foreign context. 226 + */ 227 + ret = virtio_gpu_dma_fence_wait(submit, in_fence); 228 + 229 + dma_fence_put(in_fence); 230 + } 231 + 232 + return ret; 233 + } 234 + 235 + static void virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit *submit) 236 + { 237 + if (submit->sync_file) { 238 + submit->exbuf->fence_fd = submit->out_fence_fd; 239 + fd_install(submit->out_fence_fd, submit->sync_file->file); 240 + } 241 + } 242 + 243 + static int virtio_gpu_lock_buflist(struct virtio_gpu_submit *submit) 244 + { 245 + if (submit->buflist) 246 + return virtio_gpu_array_lock_resv(submit->buflist); 247 + 248 + return 0; 249 + } 250 + 251 + int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, 252 + struct drm_file *file) 253 + { 254 + struct virtio_gpu_device *vgdev = dev->dev_private; 255 + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 256 + u64 fence_ctx = vgdev->fence_drv.context; 257 + struct drm_virtgpu_execbuffer *exbuf = data; 258 + struct virtio_gpu_submit submit; 259 + u32 ring_idx = 0; 260 + int ret = -EINVAL; 261 + 262 + if (!vgdev->has_virgl_3d) 263 + return -ENOSYS; 264 + 265 + if (exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS) 266 + return ret; 267 + 268 + if (exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) { 269 + if (exbuf->ring_idx >= vfpriv->num_rings) 270 + return ret; 271 + 272 + if (!vfpriv->base_fence_ctx) 273 + return ret; 274 + 275 + fence_ctx = vfpriv->base_fence_ctx; 276 + ring_idx = exbuf->ring_idx; 277 + } 278 + 279 + virtio_gpu_create_context(dev, file); 280 + 281 + ret = virtio_gpu_init_submit(&submit, exbuf, dev, file, 282 + fence_ctx, ring_idx); 283 + if (ret) 284 + goto cleanup; 285 + 286 + /* 287 + * Await in-fences in the end of the job submission path to 288 + * optimize the path by proceeding directly to the submission 289 + * to virtio after the waits. 290 + */ 291 + ret = virtio_gpu_wait_in_fence(&submit); 292 + if (ret) 293 + goto cleanup; 294 + 295 + ret = virtio_gpu_lock_buflist(&submit); 296 + if (ret) 297 + goto cleanup; 298 + 299 + virtio_gpu_submit(&submit); 300 + 301 + /* 302 + * Set up usr-out data after submitting the job to optimize 303 + * the job submission path. 304 + */ 305 + virtio_gpu_install_out_fence_fd(&submit); 306 + virtio_gpu_complete_submit(&submit); 307 + cleanup: 308 + virtio_gpu_cleanup_submit(&submit); 309 + 310 + return ret; 311 + }
+21
drivers/video/fbdev/Kconfig
··· 158 158 bool 159 159 depends on FB 160 160 161 + config FB_IO_HELPERS 162 + bool 163 + depends on FB 164 + select FB_CFB_COPYAREA 165 + select FB_CFB_FILLRECT 166 + select FB_CFB_IMAGEBLIT 167 + 168 + config FB_SYS_HELPERS 169 + bool 170 + depends on FB 171 + select FB_SYS_COPYAREA 172 + select FB_SYS_FILLRECT 173 + select FB_SYS_FOPS 174 + select FB_SYS_IMAGEBLIT 175 + 176 + config FB_SYS_HELPERS_DEFERRED 177 + bool 178 + depends on FB 179 + select FB_DEFERRED_IO 180 + select FB_SYS_HELPERS 181 + 161 182 config FB_HECUBA 162 183 tristate 163 184 depends on FB
+4
include/drm/bridge/samsung-dsim.h
··· 54 54 unsigned int has_freqband:1; 55 55 unsigned int has_clklane_stop:1; 56 56 unsigned int num_clks; 57 + unsigned int min_freq; 57 58 unsigned int max_freq; 58 59 unsigned int wait_for_reset; 59 60 unsigned int num_bits_resol; 60 61 unsigned int pll_p_offset; 61 62 const unsigned int *reg_values; 63 + u16 m_min; 64 + u16 m_max; 62 65 }; 63 66 64 67 struct samsung_dsim_host_ops { ··· 93 90 94 91 u32 pll_clk_rate; 95 92 u32 burst_clk_rate; 93 + u32 hs_clock; 96 94 u32 esc_clk_rate; 97 95 u32 lanes; 98 96 u32 mode_flags;
+7
include/drm/drm_drv.h
··· 401 401 struct drm_device *dev, uint32_t handle, 402 402 uint64_t *offset); 403 403 404 + /** 405 + * @show_fdinfo: 406 + * 407 + * Print device specific fdinfo. See Documentation/gpu/drm-usage-stats.rst. 408 + */ 409 + void (*show_fdinfo)(struct drm_printer *p, struct drm_file *f); 410 + 404 411 /** @major: driver major number */ 405 412 int major; 406 413 /** @minor: driver minor number */
+3 -80
include/drm/drm_fb_helper.h
··· 253 253 struct drm_fb_helper *fb_helper, 254 254 struct drm_fb_helper_surface_size *sizes); 255 255 256 + void drm_fb_helper_damage_range(struct fb_info *info, off_t off, size_t len); 257 + void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height); 258 + 256 259 void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist); 257 - 258 - ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, 259 - size_t count, loff_t *ppos); 260 - ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, 261 - size_t count, loff_t *ppos); 262 - 263 - void drm_fb_helper_sys_fillrect(struct fb_info *info, 264 - const struct fb_fillrect *rect); 265 - void drm_fb_helper_sys_copyarea(struct fb_info *info, 266 - const struct fb_copyarea *area); 267 - void drm_fb_helper_sys_imageblit(struct fb_info *info, 268 - const struct fb_image *image); 269 - 270 - ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf, 271 - size_t count, loff_t *ppos); 272 - ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf, 273 - size_t count, loff_t *ppos); 274 - 275 - void drm_fb_helper_cfb_fillrect(struct fb_info *info, 276 - const struct fb_fillrect *rect); 277 - void drm_fb_helper_cfb_copyarea(struct fb_info *info, 278 - const struct fb_copyarea *area); 279 - void drm_fb_helper_cfb_imageblit(struct fb_info *info, 280 - const struct fb_image *image); 281 260 282 261 void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend); 283 262 void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, ··· 371 392 static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) 372 393 { 373 394 return -ENODEV; 374 - } 375 - 376 - static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, 377 - char __user *buf, size_t count, 378 - loff_t *ppos) 379 - { 380 - return -ENODEV; 381 - } 382 - 383 - static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info, 384 - const char __user *buf, 385 - size_t count, loff_t *ppos) 386 - { 387 - return -ENODEV; 388 - } 389 - 390 - static inline void drm_fb_helper_sys_fillrect(struct fb_info *info, 391 - const struct fb_fillrect *rect) 392 - { 393 - } 394 - 395 - static inline void drm_fb_helper_sys_copyarea(struct fb_info *info, 396 - const struct fb_copyarea *area) 397 - { 398 - } 399 - 400 - static inline void drm_fb_helper_sys_imageblit(struct fb_info *info, 401 - const struct fb_image *image) 402 - { 403 - } 404 - 405 - static inline ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf, 406 - size_t count, loff_t *ppos) 407 - { 408 - return -ENODEV; 409 - } 410 - 411 - static inline ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf, 412 - size_t count, loff_t *ppos) 413 - { 414 - return -ENODEV; 415 - } 416 - 417 - static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info, 418 - const struct fb_fillrect *rect) 419 - { 420 - } 421 - 422 - static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info, 423 - const struct fb_copyarea *area) 424 - { 425 - } 426 - 427 - static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info, 428 - const struct fb_image *image) 429 - { 430 395 } 431 396 432 397 static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
+32
include/drm/drm_file.h
··· 41 41 struct dma_fence; 42 42 struct drm_file; 43 43 struct drm_device; 44 + struct drm_printer; 44 45 struct device; 45 46 struct file; 46 47 ··· 259 258 /** @pid: Process that opened this file. */ 260 259 struct pid *pid; 261 260 261 + /** @client_id: A unique id for fdinfo */ 262 + u64 client_id; 263 + 262 264 /** @magic: Authentication magic, see @authenticated. */ 263 265 drm_magic_t magic; 264 266 ··· 442 438 void drm_send_event_timestamp_locked(struct drm_device *dev, 443 439 struct drm_pending_event *e, 444 440 ktime_t timestamp); 441 + 442 + /** 443 + * struct drm_memory_stats - GEM object stats associated 444 + * @shared: Total size of GEM objects shared between processes 445 + * @private: Total size of GEM objects 446 + * @resident: Total size of GEM objects backing pages 447 + * @purgeable: Total size of GEM objects that can be purged (resident and not active) 448 + * @active: Total size of GEM objects active on one or more engines 449 + * 450 + * Used by drm_print_memory_stats() 451 + */ 452 + struct drm_memory_stats { 453 + u64 shared; 454 + u64 private; 455 + u64 resident; 456 + u64 purgeable; 457 + u64 active; 458 + }; 459 + 460 + enum drm_gem_object_status; 461 + 462 + void drm_print_memory_stats(struct drm_printer *p, 463 + const struct drm_memory_stats *stats, 464 + enum drm_gem_object_status supported_status, 465 + const char *region); 466 + 467 + void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file); 468 + void drm_show_fdinfo(struct seq_file *m, struct file *f); 445 469 446 470 struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags); 447 471
+32
include/drm/drm_gem.h
··· 43 43 struct drm_gem_object; 44 44 45 45 /** 46 + * enum drm_gem_object_status - bitmask of object state for fdinfo reporting 47 + * @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned) 48 + * @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace 49 + * 50 + * Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status 51 + * and drm_show_fdinfo(). Note that an object can DRM_GEM_OBJECT_PURGEABLE if 52 + * it still active or not resident, in which case drm_show_fdinfo() will not 53 + * account for it as purgeable. So drivers do not need to check if the buffer 54 + * is idle and resident to return this bit. (Ie. userspace can mark a buffer 55 + * as purgeable even while it is still busy on the GPU.. it does not _actually_ 56 + * become puregeable until it becomes idle. The status gem object func does 57 + * not need to consider this.) 58 + */ 59 + enum drm_gem_object_status { 60 + DRM_GEM_OBJECT_RESIDENT = BIT(0), 61 + DRM_GEM_OBJECT_PURGEABLE = BIT(1), 62 + }; 63 + 64 + /** 46 65 * struct drm_gem_object_funcs - GEM object functions 47 66 */ 48 67 struct drm_gem_object_funcs { ··· 192 173 * This callback is optional. 193 174 */ 194 175 int (*evict)(struct drm_gem_object *obj); 176 + 177 + /** 178 + * @status: 179 + * 180 + * The optional status callback can return additional object state 181 + * which determines which stats the object is counted against. The 182 + * callback is called under table_lock. Racing against object status 183 + * change is "harmless", and the callback can expect to not race 184 + * against object destruction. 185 + * 186 + * Called by drm_show_memory_stats(). 187 + */ 188 + enum drm_gem_object_status (*status)(struct drm_gem_object *obj); 195 189 196 190 /** 197 191 * @vm_ops:
+112
include/linux/fb.h
··· 539 539 size_t count, loff_t *ppos); 540 540 541 541 /* 542 + * Initializes struct fb_ops for framebuffers in I/O memory. 543 + */ 544 + 545 + #define __FB_DEFAULT_IO_OPS_RDWR \ 546 + .fb_read = fb_io_read, \ 547 + .fb_write = fb_io_write 548 + 549 + #define __FB_DEFAULT_IO_OPS_DRAW \ 550 + .fb_fillrect = cfb_fillrect, \ 551 + .fb_copyarea = cfb_copyarea, \ 552 + .fb_imageblit = cfb_imageblit 553 + 554 + #define __FB_DEFAULT_IO_OPS_MMAP \ 555 + .fb_mmap = NULL // default implementation 556 + 557 + #define FB_DEFAULT_IO_OPS \ 558 + __FB_DEFAULT_IO_OPS_RDWR, \ 559 + __FB_DEFAULT_IO_OPS_DRAW, \ 560 + __FB_DEFAULT_IO_OPS_MMAP 561 + 562 + /* 542 563 * Drawing operations where framebuffer is in system RAM 543 564 */ 565 + 544 566 extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect); 545 567 extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area); 546 568 extern void sys_imageblit(struct fb_info *info, const struct fb_image *image); ··· 570 548 size_t count, loff_t *ppos); 571 549 extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, 572 550 size_t count, loff_t *ppos); 551 + 552 + /* 553 + * Initializes struct fb_ops for framebuffers in system memory. 554 + */ 555 + 556 + #define __FB_DEFAULT_SYS_OPS_RDWR \ 557 + .fb_read = fb_sys_read, \ 558 + .fb_write = fb_sys_write 559 + 560 + #define __FB_DEFAULT_SYS_OPS_DRAW \ 561 + .fb_fillrect = sys_fillrect, \ 562 + .fb_copyarea = sys_copyarea, \ 563 + .fb_imageblit = sys_imageblit 564 + 565 + #define __FB_DEFAULT_SYS_OPS_MMAP \ 566 + .fb_mmap = NULL // default implementation 567 + 568 + #define FB_DEFAULT_SYS_OPS \ 569 + __FB_DEFAULT_SYS_OPS_RDWR, \ 570 + __FB_DEFAULT_SYS_OPS_DRAW, \ 571 + __FB_DEFAULT_SYS_OPS_MMAP 573 572 574 573 /* drivers/video/fbmem.c */ 575 574 extern int register_framebuffer(struct fb_info *fb_info); ··· 646 603 extern void fb_deferred_io_cleanup(struct fb_info *info); 647 604 extern int fb_deferred_io_fsync(struct file *file, loff_t start, 648 605 loff_t end, int datasync); 606 + 607 + /* 608 + * Generate callbacks for deferred I/O 609 + */ 610 + 611 + #define __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, __mode) \ 612 + static ssize_t __prefix ## _defio_read(struct fb_info *info, char __user *buf, \ 613 + size_t count, loff_t *ppos) \ 614 + { \ 615 + return fb_ ## __mode ## _read(info, buf, count, ppos); \ 616 + } \ 617 + static ssize_t __prefix ## _defio_write(struct fb_info *info, const char __user *buf, \ 618 + size_t count, loff_t *ppos) \ 619 + { \ 620 + unsigned long offset = *ppos; \ 621 + ssize_t ret = fb_ ## __mode ## _write(info, buf, count, ppos); \ 622 + if (ret > 0) \ 623 + __damage_range(info, offset, ret); \ 624 + return ret; \ 625 + } 626 + 627 + #define __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, __mode) \ 628 + static void __prefix ## _defio_fillrect(struct fb_info *info, \ 629 + const struct fb_fillrect *rect) \ 630 + { \ 631 + __mode ## _fillrect(info, rect); \ 632 + __damage_area(info, rect->dx, rect->dy, rect->width, rect->height); \ 633 + } \ 634 + static void __prefix ## _defio_copyarea(struct fb_info *info, \ 635 + const struct fb_copyarea *area) \ 636 + { \ 637 + __mode ## _copyarea(info, area); \ 638 + __damage_area(info, area->dx, area->dy, area->width, area->height); \ 639 + } \ 640 + static void __prefix ## _defio_imageblit(struct fb_info *info, \ 641 + const struct fb_image *image) \ 642 + { \ 643 + __mode ## _imageblit(info, image); \ 644 + __damage_area(info, image->dx, image->dy, image->width, image->height); \ 645 + } 646 + 647 + #define FB_GEN_DEFAULT_DEFERRED_IO_OPS(__prefix, __damage_range, __damage_area) \ 648 + __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, io) \ 649 + __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, cfb) 650 + 651 + #define FB_GEN_DEFAULT_DEFERRED_SYS_OPS(__prefix, __damage_range, __damage_area) \ 652 + __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \ 653 + __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys) 654 + 655 + /* 656 + * Initializes struct fb_ops for deferred I/O. 657 + */ 658 + 659 + #define __FB_DEFAULT_DEFERRED_OPS_RDWR(__prefix) \ 660 + .fb_read = __prefix ## _defio_read, \ 661 + .fb_write = __prefix ## _defio_write 662 + 663 + #define __FB_DEFAULT_DEFERRED_OPS_DRAW(__prefix) \ 664 + .fb_fillrect = __prefix ## _defio_fillrect, \ 665 + .fb_copyarea = __prefix ## _defio_copyarea, \ 666 + .fb_imageblit = __prefix ## _defio_imageblit 667 + 668 + #define __FB_DEFAULT_DEFERRED_OPS_MMAP(__prefix) \ 669 + .fb_mmap = fb_deferred_io_mmap 670 + 671 + #define FB_DEFAULT_DEFERRED_OPS(__prefix) \ 672 + __FB_DEFAULT_DEFERRED_OPS_RDWR(__prefix), \ 673 + __FB_DEFAULT_DEFERRED_OPS_DRAW(__prefix), \ 674 + __FB_DEFAULT_DEFERRED_OPS_MMAP(__prefix) 649 675 650 676 static inline bool fb_be_math(struct fb_info *info) 651 677 {
+10
include/uapi/drm/habanalabs_accel.h
··· 787 787 * The address which accessing it caused the razwi. 788 788 * Razwi initiator. 789 789 * Razwi cause, was it a page fault or MMU access error. 790 + * May return 0 even though no new data is available, in that case 791 + * timestamp will be 0. 790 792 * HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES - Retrieve valid page sizes for device memory allocation 791 793 * HL_INFO_SECURED_ATTESTATION - Retrieve attestation report of the boot. 792 794 * HL_INFO_REGISTER_EVENTFD - Register eventfd for event notifications. 793 795 * HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd 794 796 * HL_INFO_GET_EVENTS - Retrieve the last occurred events 795 797 * HL_INFO_UNDEFINED_OPCODE_EVENT - Retrieve last undefined opcode error information. 798 + * May return 0 even though no new data is available, in that case 799 + * timestamp will be 0. 796 800 * HL_INFO_ENGINE_STATUS - Retrieve the status of all the h/w engines in the asic. 797 801 * HL_INFO_PAGE_FAULT_EVENT - Retrieve parameters of captured page fault. 802 + * May return 0 even though no new data is available, in that case 803 + * timestamp will be 0. 798 804 * HL_INFO_USER_MAPPINGS - Retrieve user mappings, captured after page fault event. 799 805 * HL_INFO_FW_GENERIC_REQ - Send generic request to FW. 800 806 * HL_INFO_HW_ERR_EVENT - Retrieve information on the reported HW error. 807 + * May return 0 even though no new data is available, in that case 808 + * timestamp will be 0. 801 809 * HL_INFO_FW_ERR_EVENT - Retrieve information on the reported FW error. 810 + * May return 0 even though no new data is available, in that case 811 + * timestamp will be 0. 802 812 */ 803 813 #define HL_INFO_HW_IP_INFO 0 804 814 #define HL_INFO_HW_EVENTS 1
+43 -1
include/uapi/drm/i915_drm.h
··· 674 674 * If the IOCTL is successful, the returned parameter will be set to one of the 675 675 * following values: 676 676 * * 0 if HuC firmware load is not complete, 677 - * * 1 if HuC firmware is authenticated and running. 677 + * * 1 if HuC firmware is loaded and fully authenticated, 678 + * * 2 if HuC firmware is loaded and authenticated for clear media only 678 679 */ 679 680 #define I915_PARAM_HUC_STATUS 42 680 681 ··· 3680 3679 * 3681 3680 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see 3682 3681 * struct drm_i915_gem_create_ext_protected_content. 3682 + * 3683 + * For I915_GEM_CREATE_EXT_SET_PAT usage see 3684 + * struct drm_i915_gem_create_ext_set_pat. 3683 3685 */ 3684 3686 #define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 3685 3687 #define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1 3688 + #define I915_GEM_CREATE_EXT_SET_PAT 2 3686 3689 __u64 extensions; 3687 3690 }; 3688 3691 ··· 3799 3794 struct i915_user_extension base; 3800 3795 /** @flags: reserved for future usage, currently MBZ */ 3801 3796 __u32 flags; 3797 + }; 3798 + 3799 + /** 3800 + * struct drm_i915_gem_create_ext_set_pat - The 3801 + * I915_GEM_CREATE_EXT_SET_PAT extension. 3802 + * 3803 + * If this extension is provided, the specified caching policy (PAT index) is 3804 + * applied to the buffer object. 3805 + * 3806 + * Below is an example on how to create an object with specific caching policy: 3807 + * 3808 + * .. code-block:: C 3809 + * 3810 + * struct drm_i915_gem_create_ext_set_pat set_pat_ext = { 3811 + * .base = { .name = I915_GEM_CREATE_EXT_SET_PAT }, 3812 + * .pat_index = 0, 3813 + * }; 3814 + * struct drm_i915_gem_create_ext create_ext = { 3815 + * .size = PAGE_SIZE, 3816 + * .extensions = (uintptr_t)&set_pat_ext, 3817 + * }; 3818 + * 3819 + * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); 3820 + * if (err) ... 3821 + */ 3822 + struct drm_i915_gem_create_ext_set_pat { 3823 + /** @base: Extension link. See struct i915_user_extension. */ 3824 + struct i915_user_extension base; 3825 + /** 3826 + * @pat_index: PAT index to be set 3827 + * PAT index is a bit field in Page Table Entry to control caching 3828 + * behaviors for GPU accesses. The definition of PAT index is 3829 + * platform dependent and can be found in hardware specifications, 3830 + */ 3831 + __u32 pat_index; 3832 + /** @rsvd: reserved for future use */ 3833 + __u32 rsvd; 3802 3834 }; 3803 3835 3804 3836 /* ID of the protected content session managed by i915 when PXP is active */