Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2019-04-04' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.2:

UAPI Changes:
-syncobj: Add TIMELINE_WAIT|QUERY|TRANSFER|TIMELINE_SIGNAL ioctls (Chunming)
-Clarify that 1.0 can be represented by drm_color_lut (Daniel)

Cross-subsystem Changes:
-dt-bindings: Add binding for rk3066 hdmi (Johan)
-dt-bindings: Add binding for Feiyang FY07024DI26A30-D panel (Jagan)
-dt-bindings: Add Rocktech vendor prefix and jh057n00900 panel bindings (Guido)
-MAINTAINERS: Add lima and ASPEED entries (Joel & Qiang)

Core Changes:
-memory: use dma_alloc_coherent when mem encryption is active (Christian)
-dma_buf: add support for a dma_fence chain (Christian)
-shmem_gem: fix off-by-one bug in new shmem gem helpers (Dan)

Driver Changes:
-rockchip: Add support for rk3066 hdmi (Johan)
-ASPEED: Add driver supporting ASPEED BMC display controller to drm (Joel)
-lima: Add driver supporting Arm Mali4xx gpus to drm (Qiang)
-vc4/v3d: Various cleanups and improved error handling (Eric)
-panel: Add support for Feiyang FY07024DI26A30-D MIPI-DSI panel (Jagan)
-panel: Add support for Rocktech jh057n00900 MIPI-DSI panel (Guido)

Cc: Johan Jonker <jbx6244@gmail.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Chunming Zhou <david1.zhou@amd.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Eric Anholt <eric@anholt.net>
Cc: Qiang Yu <yuq825@gmail.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jagan Teki <jagan@amarulasolutions.com>
Cc: Guido Günther <agx@sigxcpu.org>
Cc: Joel Stanley <joel@jms.id.au>
[airlied: fixed XA limit build breakage, Rodrigo also submitted the same patch, but
I squashed it in the merge.]
Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Sean Paul <sean@poorly.run>
Link: https://patchwork.freedesktop.org/patch/msgid/20190404201016.GA139524@art_vandelay

+8558 -739
+20
Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
··· 1 + Feiyang FY07024DI26A30-D 7" MIPI-DSI LCD Panel 2 + 3 + Required properties: 4 + - compatible: must be "feiyang,fy07024di26a30d" 5 + - reg: DSI virtual channel used by that screen 6 + - avdd-supply: analog regulator dc1 switch 7 + - dvdd-supply: 3v3 digital regulator 8 + - reset-gpios: a GPIO phandle for the reset pin 9 + 10 + Optional properties: 11 + - backlight: phandle for the backlight control. 12 + 13 + panel@0 { 14 + compatible = "feiyang,fy07024di26a30d"; 15 + reg = <0>; 16 + avdd-supply = <&reg_dc1sw>; 17 + dvdd-supply = <&reg_dldo2>; 18 + reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */ 19 + backlight = <&backlight>; 20 + };
+1 -1
Documentation/devicetree/bindings/display/panel/innolux,p079zca.txt
··· 12 12 Example: 13 13 14 14 &mipi_dsi { 15 - panel { 15 + panel@0 { 16 16 compatible = "innolux,p079zca"; 17 17 reg = <0>; 18 18 power-supply = <...>;
+1 -1
Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt
··· 13 13 Example: 14 14 15 15 &mipi_dsi { 16 - panel { 16 + panel@0 { 17 17 compatible = "innolux,p079zca"; 18 18 reg = <0>; 19 19 avdd-supply = <...>;
+1 -1
Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt
··· 12 12 Example: 13 13 14 14 &mipi_dsi { 15 - panel { 15 + panel@0 { 16 16 compatible = "kingdisplay,kd097d04"; 17 17 reg = <0>; 18 18 power-supply = <...>;
+18
Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt
··· 1 + Rocktech jh057n00900 5.5" 720x1440 TFT LCD panel 2 + 3 + Required properties: 4 + - compatible: should be "rocktech,jh057n00900" 5 + - reg: DSI virtual channel of the peripheral 6 + - reset-gpios: panel reset gpio 7 + - backlight: phandle of the backlight device attached to the panel 8 + 9 + Example: 10 + 11 + &mipi_dsi { 12 + panel@0 { 13 + compatible = "rocktech,jh057n00900"; 14 + reg = <0>; 15 + backlight = <&backlight>; 16 + reset-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>; 17 + }; 18 + };
+72
Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.txt
··· 1 + Rockchip specific extensions for rk3066 HDMI 2 + ============================================ 3 + 4 + Required properties: 5 + - compatible: 6 + "rockchip,rk3066-hdmi"; 7 + - reg: 8 + Physical base address and length of the controller's registers. 9 + - clocks, clock-names: 10 + Phandle to HDMI controller clock, name should be "hclk". 11 + - interrupts: 12 + HDMI interrupt number. 13 + - power-domains: 14 + Phandle to the RK3066_PD_VIO power domain. 15 + - rockchip,grf: 16 + This soc uses GRF regs to switch the HDMI TX input between vop0 and vop1. 17 + - ports: 18 + Contains one port node with two endpoints, numbered 0 and 1, 19 + connected respectively to vop0 and vop1. 20 + Contains one port node with one endpoint 21 + connected to a hdmi-connector node. 22 + - pinctrl-0, pinctrl-name: 23 + Switch the iomux for the HPD/I2C pins to HDMI function. 24 + 25 + Example: 26 + hdmi: hdmi@10116000 { 27 + compatible = "rockchip,rk3066-hdmi"; 28 + reg = <0x10116000 0x2000>; 29 + interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>; 30 + clocks = <&cru HCLK_HDMI>; 31 + clock-names = "hclk"; 32 + power-domains = <&power RK3066_PD_VIO>; 33 + rockchip,grf = <&grf>; 34 + pinctrl-names = "default"; 35 + pinctrl-0 = <&hdmii2c_xfer>, <&hdmi_hpd>; 36 + 37 + ports { 38 + #address-cells = <1>; 39 + #size-cells = <0>; 40 + hdmi_in: port@0 { 41 + reg = <0>; 42 + #address-cells = <1>; 43 + #size-cells = <0>; 44 + hdmi_in_vop0: endpoint@0 { 45 + reg = <0>; 46 + remote-endpoint = <&vop0_out_hdmi>; 47 + }; 48 + hdmi_in_vop1: endpoint@1 { 49 + reg = <1>; 50 + remote-endpoint = <&vop1_out_hdmi>; 51 + }; 52 + }; 53 + hdmi_out: port@1 { 54 + reg = <1>; 55 + hdmi_out_con: endpoint { 56 + remote-endpoint = <&hdmi_con_in>; 57 + }; 58 + }; 59 + }; 60 + }; 61 + 62 + &pinctrl { 63 + hdmi { 64 + hdmi_hpd: hdmi-hpd { 65 + rockchip,pins = <0 RK_PA0 1 &pcfg_pull_default>; 66 + }; 67 + hdmii2c_xfer: hdmii2c-xfer { 68 + rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>, 69 + <0 RK_PA2 1 &pcfg_pull_none>; 70 + }; 71 + }; 72 + };
+41
Documentation/devicetree/bindings/gpu/aspeed-gfx.txt
··· 1 + Device tree configuration for the GFX display device on the ASPEED SoCs 2 + 3 + Required properties: 4 + - compatible 5 + * Must be one of the following: 6 + + aspeed,ast2500-gfx 7 + + aspeed,ast2400-gfx 8 + * In addition, the ASPEED pinctrl bindings require the 'syscon' property to 9 + be present 10 + 11 + - reg: Physical base address and length of the GFX registers 12 + 13 + - interrupts: interrupt number for the GFX device 14 + 15 + - clocks: clock number used to generate the pixel clock 16 + 17 + - resets: reset line that must be released to use the GFX device 18 + 19 + - memory-region: 20 + Phandle to a memory region to allocate from, as defined in 21 + Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt 22 + 23 + 24 + Example: 25 + 26 + gfx: display@1e6e6000 { 27 + compatible = "aspeed,ast2500-gfx", "syscon"; 28 + reg = <0x1e6e6000 0x1000>; 29 + reg-io-width = <4>; 30 + clocks = <&syscon ASPEED_CLK_GATE_D1CLK>; 31 + resets = <&syscon ASPEED_RESET_CRT1>; 32 + interrupts = <0x19>; 33 + memory-region = <&gfx_memory>; 34 + }; 35 + 36 + gfx_memory: framebuffer { 37 + size = <0x01000000>; 38 + alignment = <0x01000000>; 39 + compatible = "shared-dma-pool"; 40 + reusable; 41 + };
+1
Documentation/devicetree/bindings/vendor-prefixes.txt
··· 345 345 rikomagic Rikomagic Tech Corp. Ltd 346 346 riscv RISC-V Foundation 347 347 rockchip Fuzhou Rockchip Electronics Co., Ltd 348 + rocktech ROCKTECH DISPLAYS LIMITED 348 349 rohm ROHM Semiconductor Co., Ltd 349 350 ronbo Ronbo Electronics 350 351 roofull Shenzhen Roofull Technology Co, Ltd
+29
MAINTAINERS
··· 4894 4894 S: Odd Fixes 4895 4895 F: drivers/gpu/drm/ast/ 4896 4896 4897 + DRM DRIVER FOR ASPEED BMC GFX 4898 + M: Joel Stanley <joel@jms.id.au> 4899 + L: linux-aspeed@lists.ozlabs.org 4900 + T: git git://anongit.freedesktop.org/drm/drm-misc 4901 + S: Supported 4902 + F: drivers/gpu/drm/aspeed/ 4903 + F: Documentation/devicetree/bindings/gpu/aspeed-gfx.txt 4904 + 4897 4905 DRM DRIVER FOR BOCHS VIRTUAL GPU 4898 4906 M: Gerd Hoffmann <kraxel@redhat.com> 4899 4907 L: virtualization@lists.linux-foundation.org ··· 4914 4906 T: git git://anongit.freedesktop.org/drm/drm-misc 4915 4907 S: Maintained 4916 4908 F: drivers/gpu/drm/tve200/ 4909 + 4910 + DRM DRIVER FOR FEIYANG FY07024DI26A30-D MIPI-DSI LCD PANELS 4911 + M: Jagan Teki <jagan@amarulasolutions.com> 4912 + S: Maintained 4913 + F: drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c 4914 + F: Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt 4917 4915 4918 4916 DRM DRIVER FOR ILITEK ILI9225 PANELS 4919 4917 M: David Lechner <david@lechnology.com> ··· 5011 4997 S: Orphan / Obsolete 5012 4998 F: drivers/gpu/drm/r128/ 5013 4999 F: include/uapi/drm/r128_drm.h 5000 + 5001 + DRM DRIVER FOR ROCKTECH JH057N00900 PANELS 5002 + M: Guido Günther <agx@sigxcpu.org> 5003 + S: Maintained 5004 + F: drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c 5005 + F: Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt 5014 5006 5015 5007 DRM DRIVER FOR SAVAGE VIDEO CARDS 5016 5008 S: Orphan / Obsolete ··· 5205 5185 S: Maintained 5206 5186 F: drivers/gpu/drm/hisilicon/ 5207 5187 F: Documentation/devicetree/bindings/display/hisilicon/ 5188 + 5189 + DRM DRIVERS FOR LIMA 5190 + M: Qiang Yu <yuq825@gmail.com> 5191 + L: dri-devel@lists.freedesktop.org 5192 + L: lima@lists.freedesktop.org 5193 + S: Maintained 5194 + F: drivers/gpu/drm/lima/ 5195 + F: include/uapi/drm/lima_drm.h 5196 + T: git git://anongit.freedesktop.org/drm/drm-misc 5208 5197 5209 5198 DRM DRIVERS FOR MEDIATEK 5210 5199 M: CK Hu <ck.hu@mediatek.com>
+2 -1
drivers/dma-buf/Makefile
··· 1 - obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o 1 + obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ 2 + reservation.o seqno-fence.o 2 3 obj-$(CONFIG_SYNC_FILE) += sync_file.o 3 4 obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o 4 5 obj-$(CONFIG_UDMABUF) += udmabuf.o
+241
drivers/dma-buf/dma-fence-chain.c
··· 1 + /* 2 + * fence-chain: chain fences together in a timeline 3 + * 4 + * Copyright (C) 2018 Advanced Micro Devices, Inc. 5 + * Authors: 6 + * Christian König <christian.koenig@amd.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License version 2 as published by 10 + * the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope that it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + */ 17 + 18 + #include <linux/dma-fence-chain.h> 19 + 20 + static bool dma_fence_chain_enable_signaling(struct dma_fence *fence); 21 + 22 + /** 23 + * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence 24 + * @chain: chain node to get the previous node from 25 + * 26 + * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the 27 + * chain node. 28 + */ 29 + static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain) 30 + { 31 + struct dma_fence *prev; 32 + 33 + rcu_read_lock(); 34 + prev = dma_fence_get_rcu_safe(&chain->prev); 35 + rcu_read_unlock(); 36 + return prev; 37 + } 38 + 39 + /** 40 + * dma_fence_chain_walk - chain walking function 41 + * @fence: current chain node 42 + * 43 + * Walk the chain to the next node. Returns the next fence or NULL if we are at 44 + * the end of the chain. Garbage collects chain nodes which are already 45 + * signaled. 46 + */ 47 + struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence) 48 + { 49 + struct dma_fence_chain *chain, *prev_chain; 50 + struct dma_fence *prev, *replacement, *tmp; 51 + 52 + chain = to_dma_fence_chain(fence); 53 + if (!chain) { 54 + dma_fence_put(fence); 55 + return NULL; 56 + } 57 + 58 + while ((prev = dma_fence_chain_get_prev(chain))) { 59 + 60 + prev_chain = to_dma_fence_chain(prev); 61 + if (prev_chain) { 62 + if (!dma_fence_is_signaled(prev_chain->fence)) 63 + break; 64 + 65 + replacement = dma_fence_chain_get_prev(prev_chain); 66 + } else { 67 + if (!dma_fence_is_signaled(prev)) 68 + break; 69 + 70 + replacement = NULL; 71 + } 72 + 73 + tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement); 74 + if (tmp == prev) 75 + dma_fence_put(tmp); 76 + else 77 + dma_fence_put(replacement); 78 + dma_fence_put(prev); 79 + } 80 + 81 + dma_fence_put(fence); 82 + return prev; 83 + } 84 + EXPORT_SYMBOL(dma_fence_chain_walk); 85 + 86 + /** 87 + * dma_fence_chain_find_seqno - find fence chain node by seqno 88 + * @pfence: pointer to the chain node where to start 89 + * @seqno: the sequence number to search for 90 + * 91 + * Advance the fence pointer to the chain node which will signal this sequence 92 + * number. If no sequence number is provided then this is a no-op. 93 + * 94 + * Returns EINVAL if the fence is not a chain node or the sequence number has 95 + * not yet advanced far enough. 96 + */ 97 + int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno) 98 + { 99 + struct dma_fence_chain *chain; 100 + 101 + if (!seqno) 102 + return 0; 103 + 104 + chain = to_dma_fence_chain(*pfence); 105 + if (!chain || chain->base.seqno < seqno) 106 + return -EINVAL; 107 + 108 + dma_fence_chain_for_each(*pfence, &chain->base) { 109 + if ((*pfence)->context != chain->base.context || 110 + to_dma_fence_chain(*pfence)->prev_seqno < seqno) 111 + break; 112 + } 113 + dma_fence_put(&chain->base); 114 + 115 + return 0; 116 + } 117 + EXPORT_SYMBOL(dma_fence_chain_find_seqno); 118 + 119 + static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence) 120 + { 121 + return "dma_fence_chain"; 122 + } 123 + 124 + static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence) 125 + { 126 + return "unbound"; 127 + } 128 + 129 + static void dma_fence_chain_irq_work(struct irq_work *work) 130 + { 131 + struct dma_fence_chain *chain; 132 + 133 + chain = container_of(work, typeof(*chain), work); 134 + 135 + /* Try to rearm the callback */ 136 + if (!dma_fence_chain_enable_signaling(&chain->base)) 137 + /* Ok, we are done. No more unsignaled fences left */ 138 + dma_fence_signal(&chain->base); 139 + dma_fence_put(&chain->base); 140 + } 141 + 142 + static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) 143 + { 144 + struct dma_fence_chain *chain; 145 + 146 + chain = container_of(cb, typeof(*chain), cb); 147 + irq_work_queue(&chain->work); 148 + dma_fence_put(f); 149 + } 150 + 151 + static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) 152 + { 153 + struct dma_fence_chain *head = to_dma_fence_chain(fence); 154 + 155 + dma_fence_get(&head->base); 156 + dma_fence_chain_for_each(fence, &head->base) { 157 + struct dma_fence_chain *chain = to_dma_fence_chain(fence); 158 + struct dma_fence *f = chain ? chain->fence : fence; 159 + 160 + dma_fence_get(f); 161 + if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) { 162 + dma_fence_put(fence); 163 + return true; 164 + } 165 + dma_fence_put(f); 166 + } 167 + dma_fence_put(&head->base); 168 + return false; 169 + } 170 + 171 + static bool dma_fence_chain_signaled(struct dma_fence *fence) 172 + { 173 + dma_fence_chain_for_each(fence, fence) { 174 + struct dma_fence_chain *chain = to_dma_fence_chain(fence); 175 + struct dma_fence *f = chain ? chain->fence : fence; 176 + 177 + if (!dma_fence_is_signaled(f)) { 178 + dma_fence_put(fence); 179 + return false; 180 + } 181 + } 182 + 183 + return true; 184 + } 185 + 186 + static void dma_fence_chain_release(struct dma_fence *fence) 187 + { 188 + struct dma_fence_chain *chain = to_dma_fence_chain(fence); 189 + 190 + dma_fence_put(rcu_dereference_protected(chain->prev, true)); 191 + dma_fence_put(chain->fence); 192 + dma_fence_free(fence); 193 + } 194 + 195 + const struct dma_fence_ops dma_fence_chain_ops = { 196 + .get_driver_name = dma_fence_chain_get_driver_name, 197 + .get_timeline_name = dma_fence_chain_get_timeline_name, 198 + .enable_signaling = dma_fence_chain_enable_signaling, 199 + .signaled = dma_fence_chain_signaled, 200 + .release = dma_fence_chain_release, 201 + }; 202 + EXPORT_SYMBOL(dma_fence_chain_ops); 203 + 204 + /** 205 + * dma_fence_chain_init - initialize a fence chain 206 + * @chain: the chain node to initialize 207 + * @prev: the previous fence 208 + * @fence: the current fence 209 + * 210 + * Initialize a new chain node and either start a new chain or add the node to 211 + * the existing chain of the previous fence. 212 + */ 213 + void dma_fence_chain_init(struct dma_fence_chain *chain, 214 + struct dma_fence *prev, 215 + struct dma_fence *fence, 216 + uint64_t seqno) 217 + { 218 + struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev); 219 + uint64_t context; 220 + 221 + spin_lock_init(&chain->lock); 222 + rcu_assign_pointer(chain->prev, prev); 223 + chain->fence = fence; 224 + chain->prev_seqno = 0; 225 + init_irq_work(&chain->work, dma_fence_chain_irq_work); 226 + 227 + /* Try to reuse the context of the previous chain node. */ 228 + if (prev_chain && __dma_fence_is_later(seqno, prev->seqno)) { 229 + context = prev->context; 230 + chain->prev_seqno = prev->seqno; 231 + } else { 232 + context = dma_fence_context_alloc(1); 233 + /* Make sure that we always have a valid sequence number. */ 234 + if (prev_chain) 235 + seqno = max(prev->seqno, seqno); 236 + } 237 + 238 + dma_fence_init(&chain->base, &dma_fence_chain_ops, 239 + &chain->lock, context, seqno); 240 + } 241 + EXPORT_SYMBOL(dma_fence_chain_init);
+4
drivers/gpu/drm/Kconfig
··· 335 335 336 336 source "drivers/gpu/drm/vboxvideo/Kconfig" 337 337 338 + source "drivers/gpu/drm/lima/Kconfig" 339 + 340 + source "drivers/gpu/drm/aspeed/Kconfig" 341 + 338 342 # Keep legacy drivers last 339 343 340 344 menuconfig DRM_LEGACY
+2
drivers/gpu/drm/Makefile
··· 110 110 obj-$(CONFIG_DRM_TVE200) += tve200/ 111 111 obj-$(CONFIG_DRM_XEN) += xen/ 112 112 obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ 113 + obj-$(CONFIG_DRM_LIMA) += lima/ 114 + obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
+14
drivers/gpu/drm/aspeed/Kconfig
··· 1 + config DRM_ASPEED_GFX 2 + tristate "ASPEED BMC Display Controller" 3 + depends on DRM && OF 4 + select DRM_KMS_HELPER 5 + select DRM_KMS_CMA_HELPER 6 + select DRM_PANEL 7 + select DMA_CMA 8 + select CMA 9 + select MFD_SYSCON 10 + help 11 + Chose this option if you have an ASPEED AST2500 SOC Display 12 + Controller (aka GFX). 13 + 14 + If M is selected this module will be called aspeed_gfx.
+3
drivers/gpu/drm/aspeed/Makefile
··· 1 + aspeed_gfx-y := aspeed_gfx_drv.o aspeed_gfx_crtc.o aspeed_gfx_out.o 2 + 3 + obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed_gfx.o
+104
drivers/gpu/drm/aspeed/aspeed_gfx.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + /* Copyright 2018 IBM Corporation */ 3 + 4 + #include <drm/drm_device.h> 5 + #include <drm/drm_simple_kms_helper.h> 6 + 7 + struct aspeed_gfx { 8 + void __iomem *base; 9 + struct clk *clk; 10 + struct reset_control *rst; 11 + struct regmap *scu; 12 + 13 + struct drm_simple_display_pipe pipe; 14 + struct drm_connector connector; 15 + struct drm_fbdev_cma *fbdev; 16 + }; 17 + 18 + int aspeed_gfx_create_pipe(struct drm_device *drm); 19 + int aspeed_gfx_create_output(struct drm_device *drm); 20 + 21 + #define CRT_CTRL1 0x60 /* CRT Control I */ 22 + #define CRT_CTRL2 0x64 /* CRT Control II */ 23 + #define CRT_STATUS 0x68 /* CRT Status */ 24 + #define CRT_MISC 0x6c /* CRT Misc Setting */ 25 + #define CRT_HORIZ0 0x70 /* CRT Horizontal Total & Display Enable End */ 26 + #define CRT_HORIZ1 0x74 /* CRT Horizontal Retrace Start & End */ 27 + #define CRT_VERT0 0x78 /* CRT Vertical Total & Display Enable End */ 28 + #define CRT_VERT1 0x7C /* CRT Vertical Retrace Start & End */ 29 + #define CRT_ADDR 0x80 /* CRT Display Starting Address */ 30 + #define CRT_OFFSET 0x84 /* CRT Display Offset & Terminal Count */ 31 + #define CRT_THROD 0x88 /* CRT Threshold */ 32 + #define CRT_XSCALE 0x8C /* CRT Scaling-Up Factor */ 33 + #define CRT_CURSOR0 0x90 /* CRT Hardware Cursor X & Y Offset */ 34 + #define CRT_CURSOR1 0x94 /* CRT Hardware Cursor X & Y Position */ 35 + #define CRT_CURSOR2 0x98 /* CRT Hardware Cursor Pattern Address */ 36 + #define CRT_9C 0x9C 37 + #define CRT_OSD_H 0xA0 /* CRT OSD Horizontal Start/End */ 38 + #define CRT_OSD_V 0xA4 /* CRT OSD Vertical Start/End */ 39 + #define CRT_OSD_ADDR 0xA8 /* CRT OSD Pattern Address */ 40 + #define CRT_OSD_DISP 0xAC /* CRT OSD Offset */ 41 + #define CRT_OSD_THRESH 0xB0 /* CRT OSD Threshold & Alpha */ 42 + #define CRT_B4 0xB4 43 + #define CRT_STS_V 0xB8 /* CRT Status V */ 44 + #define CRT_SCRATCH 0xBC /* Scratchpad */ 45 + #define CRT_BB0_ADDR 0xD0 /* CRT Display BB0 Starting Address */ 46 + #define CRT_BB1_ADDR 0xD4 /* CRT Display BB1 Starting Address */ 47 + #define CRT_BB_COUNT 0xD8 /* CRT Display BB Terminal Count */ 48 + #define OSD_COLOR1 0xE0 /* OSD Color Palette Index 1 & 0 */ 49 + #define OSD_COLOR2 0xE4 /* OSD Color Palette Index 3 & 2 */ 50 + #define OSD_COLOR3 0xE8 /* OSD Color Palette Index 5 & 4 */ 51 + #define OSD_COLOR4 0xEC /* OSD Color Palette Index 7 & 6 */ 52 + #define OSD_COLOR5 0xF0 /* OSD Color Palette Index 9 & 8 */ 53 + #define OSD_COLOR6 0xF4 /* OSD Color Palette Index 11 & 10 */ 54 + #define OSD_COLOR7 0xF8 /* OSD Color Palette Index 13 & 12 */ 55 + #define OSD_COLOR8 0xFC /* OSD Color Palette Index 15 & 14 */ 56 + 57 + /* CTRL1 */ 58 + #define CRT_CTRL_EN BIT(0) 59 + #define CRT_CTRL_HW_CURSOR_EN BIT(1) 60 + #define CRT_CTRL_OSD_EN BIT(2) 61 + #define CRT_CTRL_INTERLACED BIT(3) 62 + #define CRT_CTRL_COLOR_RGB565 (0 << 7) 63 + #define CRT_CTRL_COLOR_YUV444 (1 << 7) 64 + #define CRT_CTRL_COLOR_XRGB8888 (2 << 7) 65 + #define CRT_CTRL_COLOR_RGB888 (3 << 7) 66 + #define CRT_CTRL_COLOR_YUV444_2RGB (5 << 7) 67 + #define CRT_CTRL_COLOR_YUV422 (7 << 7) 68 + #define CRT_CTRL_COLOR_MASK GENMASK(9, 7) 69 + #define CRT_CTRL_HSYNC_NEGATIVE BIT(16) 70 + #define CRT_CTRL_VSYNC_NEGATIVE BIT(17) 71 + #define CRT_CTRL_VERTICAL_INTR_EN BIT(30) 72 + #define CRT_CTRL_VERTICAL_INTR_STS BIT(31) 73 + 74 + /* CTRL2 */ 75 + #define CRT_CTRL_DAC_EN BIT(0) 76 + #define CRT_CTRL_VBLANK_LINE(x) (((x) << 20) & CRT_CTRL_VBLANK_LINE_MASK) 77 + #define CRT_CTRL_VBLANK_LINE_MASK GENMASK(20, 31) 78 + 79 + /* CRT_HORIZ0 */ 80 + #define CRT_H_TOTAL(x) (x) 81 + #define CRT_H_DE(x) ((x) << 16) 82 + 83 + /* CRT_HORIZ1 */ 84 + #define CRT_H_RS_START(x) (x) 85 + #define CRT_H_RS_END(x) ((x) << 16) 86 + 87 + /* CRT_VIRT0 */ 88 + #define CRT_V_TOTAL(x) (x) 89 + #define CRT_V_DE(x) ((x) << 16) 90 + 91 + /* CRT_VIRT1 */ 92 + #define CRT_V_RS_START(x) (x) 93 + #define CRT_V_RS_END(x) ((x) << 16) 94 + 95 + /* CRT_OFFSET */ 96 + #define CRT_DISP_OFFSET(x) (x) 97 + #define CRT_TERM_COUNT(x) ((x) << 16) 98 + 99 + /* CRT_THROD */ 100 + #define CRT_THROD_LOW(x) (x) 101 + #define CRT_THROD_HIGH(x) ((x) << 8) 102 + 103 + /* Default Threshold Seting */ 104 + #define G5_CRT_THROD_VAL (CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3C))
+241
drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + // Copyright 2018 IBM Corporation 3 + 4 + #include <linux/clk.h> 5 + #include <linux/reset.h> 6 + #include <linux/regmap.h> 7 + 8 + #include <drm/drm_crtc_helper.h> 9 + #include <drm/drm_device.h> 10 + #include <drm/drm_fb_cma_helper.h> 11 + #include <drm/drm_fourcc.h> 12 + #include <drm/drm_gem_cma_helper.h> 13 + #include <drm/drm_gem_framebuffer_helper.h> 14 + #include <drm/drm_panel.h> 15 + #include <drm/drm_simple_kms_helper.h> 16 + #include <drm/drm_vblank.h> 17 + 18 + #include "aspeed_gfx.h" 19 + 20 + static struct aspeed_gfx * 21 + drm_pipe_to_aspeed_gfx(struct drm_simple_display_pipe *pipe) 22 + { 23 + return container_of(pipe, struct aspeed_gfx, pipe); 24 + } 25 + 26 + static int aspeed_gfx_set_pixel_fmt(struct aspeed_gfx *priv, u32 *bpp) 27 + { 28 + struct drm_crtc *crtc = &priv->pipe.crtc; 29 + struct drm_device *drm = crtc->dev; 30 + const u32 format = crtc->primary->state->fb->format->format; 31 + u32 ctrl1; 32 + 33 + ctrl1 = readl(priv->base + CRT_CTRL1); 34 + ctrl1 &= ~CRT_CTRL_COLOR_MASK; 35 + 36 + switch (format) { 37 + case DRM_FORMAT_RGB565: 38 + dev_dbg(drm->dev, "Setting up RGB565 mode\n"); 39 + ctrl1 |= CRT_CTRL_COLOR_RGB565; 40 + *bpp = 16; 41 + break; 42 + case DRM_FORMAT_XRGB8888: 43 + dev_dbg(drm->dev, "Setting up XRGB8888 mode\n"); 44 + ctrl1 |= CRT_CTRL_COLOR_XRGB8888; 45 + *bpp = 32; 46 + break; 47 + default: 48 + dev_err(drm->dev, "Unhandled pixel format %08x\n", format); 49 + return -EINVAL; 50 + } 51 + 52 + writel(ctrl1, priv->base + CRT_CTRL1); 53 + 54 + return 0; 55 + } 56 + 57 + static void aspeed_gfx_enable_controller(struct aspeed_gfx *priv) 58 + { 59 + u32 ctrl1 = readl(priv->base + CRT_CTRL1); 60 + u32 ctrl2 = readl(priv->base + CRT_CTRL2); 61 + 62 + /* SCU2C: set DAC source for display output to Graphics CRT (GFX) */ 63 + regmap_update_bits(priv->scu, 0x2c, BIT(16), BIT(16)); 64 + 65 + writel(ctrl1 | CRT_CTRL_EN, priv->base + CRT_CTRL1); 66 + writel(ctrl2 | CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2); 67 + } 68 + 69 + static void aspeed_gfx_disable_controller(struct aspeed_gfx *priv) 70 + { 71 + u32 ctrl1 = readl(priv->base + CRT_CTRL1); 72 + u32 ctrl2 = readl(priv->base + CRT_CTRL2); 73 + 74 + writel(ctrl1 & ~CRT_CTRL_EN, priv->base + CRT_CTRL1); 75 + writel(ctrl2 & ~CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2); 76 + 77 + regmap_update_bits(priv->scu, 0x2c, BIT(16), 0); 78 + } 79 + 80 + static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv) 81 + { 82 + struct drm_display_mode *m = &priv->pipe.crtc.state->adjusted_mode; 83 + u32 ctrl1, d_offset, t_count, bpp; 84 + int err; 85 + 86 + err = aspeed_gfx_set_pixel_fmt(priv, &bpp); 87 + if (err) 88 + return; 89 + 90 + #if 0 91 + /* TODO: we have only been able to test with the 40MHz USB clock. The 92 + * clock is fixed, so we cannot adjust it here. */ 93 + clk_set_rate(priv->pixel_clk, m->crtc_clock * 1000); 94 + #endif 95 + 96 + ctrl1 = readl(priv->base + CRT_CTRL1); 97 + ctrl1 &= ~(CRT_CTRL_INTERLACED | 98 + CRT_CTRL_HSYNC_NEGATIVE | 99 + CRT_CTRL_VSYNC_NEGATIVE); 100 + 101 + if (m->flags & DRM_MODE_FLAG_INTERLACE) 102 + ctrl1 |= CRT_CTRL_INTERLACED; 103 + 104 + if (!(m->flags & DRM_MODE_FLAG_PHSYNC)) 105 + ctrl1 |= CRT_CTRL_HSYNC_NEGATIVE; 106 + 107 + if (!(m->flags & DRM_MODE_FLAG_PVSYNC)) 108 + ctrl1 |= CRT_CTRL_VSYNC_NEGATIVE; 109 + 110 + writel(ctrl1, priv->base + CRT_CTRL1); 111 + 112 + /* Horizontal timing */ 113 + writel(CRT_H_TOTAL(m->htotal - 1) | CRT_H_DE(m->hdisplay - 1), 114 + priv->base + CRT_HORIZ0); 115 + writel(CRT_H_RS_START(m->hsync_start - 1) | CRT_H_RS_END(m->hsync_end), 116 + priv->base + CRT_HORIZ1); 117 + 118 + 119 + /* Vertical timing */ 120 + writel(CRT_V_TOTAL(m->vtotal - 1) | CRT_V_DE(m->vdisplay - 1), 121 + priv->base + CRT_VERT0); 122 + writel(CRT_V_RS_START(m->vsync_start) | CRT_V_RS_END(m->vsync_end), 123 + priv->base + CRT_VERT1); 124 + 125 + /* 126 + * Display Offset: address difference between consecutive scan lines 127 + * Terminal Count: memory size of one scan line 128 + */ 129 + d_offset = m->hdisplay * bpp / 8; 130 + t_count = (m->hdisplay * bpp + 127) / 128; 131 + writel(CRT_DISP_OFFSET(d_offset) | CRT_TERM_COUNT(t_count), 132 + priv->base + CRT_OFFSET); 133 + 134 + /* 135 + * Threshold: FIFO thresholds of refill and stop (16 byte chunks 136 + * per line, rounded up) 137 + */ 138 + writel(G5_CRT_THROD_VAL, priv->base + CRT_THROD); 139 + } 140 + 141 + static void aspeed_gfx_pipe_enable(struct drm_simple_display_pipe *pipe, 142 + struct drm_crtc_state *crtc_state, 143 + struct drm_plane_state *plane_state) 144 + { 145 + struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe); 146 + struct drm_crtc *crtc = &pipe->crtc; 147 + 148 + aspeed_gfx_crtc_mode_set_nofb(priv); 149 + aspeed_gfx_enable_controller(priv); 150 + drm_crtc_vblank_on(crtc); 151 + } 152 + 153 + static void aspeed_gfx_pipe_disable(struct drm_simple_display_pipe *pipe) 154 + { 155 + struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe); 156 + struct drm_crtc *crtc = &pipe->crtc; 157 + 158 + drm_crtc_vblank_off(crtc); 159 + aspeed_gfx_disable_controller(priv); 160 + } 161 + 162 + static void aspeed_gfx_pipe_update(struct drm_simple_display_pipe *pipe, 163 + struct drm_plane_state *plane_state) 164 + { 165 + struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe); 166 + struct drm_crtc *crtc = &pipe->crtc; 167 + struct drm_framebuffer *fb = pipe->plane.state->fb; 168 + struct drm_pending_vblank_event *event; 169 + struct drm_gem_cma_object *gem; 170 + 171 + spin_lock_irq(&crtc->dev->event_lock); 172 + event = crtc->state->event; 173 + if (event) { 174 + crtc->state->event = NULL; 175 + 176 + if (drm_crtc_vblank_get(crtc) == 0) 177 + drm_crtc_arm_vblank_event(crtc, event); 178 + else 179 + drm_crtc_send_vblank_event(crtc, event); 180 + } 181 + spin_unlock_irq(&crtc->dev->event_lock); 182 + 183 + if (!fb) 184 + return; 185 + 186 + gem = drm_fb_cma_get_gem_obj(fb, 0); 187 + if (!gem) 188 + return; 189 + writel(gem->paddr, priv->base + CRT_ADDR); 190 + } 191 + 192 + static int aspeed_gfx_enable_vblank(struct drm_simple_display_pipe *pipe) 193 + { 194 + struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe); 195 + u32 reg = readl(priv->base + CRT_CTRL1); 196 + 197 + /* Clear pending VBLANK IRQ */ 198 + writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1); 199 + 200 + reg |= CRT_CTRL_VERTICAL_INTR_EN; 201 + writel(reg, priv->base + CRT_CTRL1); 202 + 203 + return 0; 204 + } 205 + 206 + static void aspeed_gfx_disable_vblank(struct drm_simple_display_pipe *pipe) 207 + { 208 + struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe); 209 + u32 reg = readl(priv->base + CRT_CTRL1); 210 + 211 + reg &= ~CRT_CTRL_VERTICAL_INTR_EN; 212 + writel(reg, priv->base + CRT_CTRL1); 213 + 214 + /* Clear pending VBLANK IRQ */ 215 + writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1); 216 + } 217 + 218 + static struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = { 219 + .enable = aspeed_gfx_pipe_enable, 220 + .disable = aspeed_gfx_pipe_disable, 221 + .update = aspeed_gfx_pipe_update, 222 + .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, 223 + .enable_vblank = aspeed_gfx_enable_vblank, 224 + .disable_vblank = aspeed_gfx_disable_vblank, 225 + }; 226 + 227 + static const uint32_t aspeed_gfx_formats[] = { 228 + DRM_FORMAT_XRGB8888, 229 + DRM_FORMAT_RGB565, 230 + }; 231 + 232 + int aspeed_gfx_create_pipe(struct drm_device *drm) 233 + { 234 + struct aspeed_gfx *priv = drm->dev_private; 235 + 236 + return drm_simple_display_pipe_init(drm, &priv->pipe, &aspeed_gfx_funcs, 237 + aspeed_gfx_formats, 238 + ARRAY_SIZE(aspeed_gfx_formats), 239 + NULL, 240 + &priv->connector); 241 + }
+269
drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + // Copyright 2018 IBM Corporation 3 + 4 + #include <linux/clk.h> 5 + #include <linux/dma-mapping.h> 6 + #include <linux/irq.h> 7 + #include <linux/mfd/syscon.h> 8 + #include <linux/module.h> 9 + #include <linux/of.h> 10 + #include <linux/of_reserved_mem.h> 11 + #include <linux/platform_device.h> 12 + #include <linux/regmap.h> 13 + #include <linux/reset.h> 14 + 15 + #include <drm/drm_atomic_helper.h> 16 + #include <drm/drm_crtc_helper.h> 17 + #include <drm/drm_device.h> 18 + #include <drm/drm_fb_cma_helper.h> 19 + #include <drm/drm_fb_helper.h> 20 + #include <drm/drm_gem_cma_helper.h> 21 + #include <drm/drm_gem_framebuffer_helper.h> 22 + #include <drm/drm_probe_helper.h> 23 + #include <drm/drm_simple_kms_helper.h> 24 + #include <drm/drm_vblank.h> 25 + #include <drm/drm_drv.h> 26 + 27 + #include "aspeed_gfx.h" 28 + 29 + /** 30 + * DOC: ASPEED GFX Driver 31 + * 32 + * This driver is for the ASPEED BMC SoC's 'GFX' display hardware, also called 33 + * the 'SOC Display Controller' in the datasheet. This driver runs on the ARM 34 + * based BMC systems, unlike the ast driver which runs on a host CPU and is for 35 + * a PCIe graphics device. 36 + * 37 + * The AST2500 supports a total of 3 output paths: 38 + * 39 + * 1. VGA output, the output target can choose either or both to the DAC 40 + * or DVO interface. 41 + * 42 + * 2. Graphics CRT output, the output target can choose either or both to 43 + * the DAC or DVO interface. 44 + * 45 + * 3. Video input from DVO, the video input can be used for video engine 46 + * capture or DAC display output. 47 + * 48 + * Output options are selected in SCU2C. 49 + * 50 + * The "VGA mode" device is the PCI attached controller. The "Graphics CRT" 51 + * is the ARM's internal display controller. 52 + * 53 + * The driver only supports a simple configuration consisting of a 40MHz 54 + * pixel clock, fixed by hardware limitations, and the VGA output path. 55 + * 56 + * The driver was written with the 'AST2500 Software Programming Guide' v17, 57 + * which is available under NDA from ASPEED. 58 + */ 59 + 60 + static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = { 61 + .fb_create = drm_gem_fb_create, 62 + .atomic_check = drm_atomic_helper_check, 63 + .atomic_commit = drm_atomic_helper_commit, 64 + }; 65 + 66 + static void aspeed_gfx_setup_mode_config(struct drm_device *drm) 67 + { 68 + drm_mode_config_init(drm); 69 + 70 + drm->mode_config.min_width = 0; 71 + drm->mode_config.min_height = 0; 72 + drm->mode_config.max_width = 800; 73 + drm->mode_config.max_height = 600; 74 + drm->mode_config.funcs = &aspeed_gfx_mode_config_funcs; 75 + } 76 + 77 + static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data) 78 + { 79 + struct drm_device *drm = data; 80 + struct aspeed_gfx *priv = drm->dev_private; 81 + u32 reg; 82 + 83 + reg = readl(priv->base + CRT_CTRL1); 84 + 85 + if (reg & CRT_CTRL_VERTICAL_INTR_STS) { 86 + drm_crtc_handle_vblank(&priv->pipe.crtc); 87 + writel(reg, priv->base + CRT_CTRL1); 88 + return IRQ_HANDLED; 89 + } 90 + 91 + return IRQ_NONE; 92 + } 93 + 94 + 95 + 96 + static int aspeed_gfx_load(struct drm_device *drm) 97 + { 98 + struct platform_device *pdev = to_platform_device(drm->dev); 99 + struct aspeed_gfx *priv; 100 + struct resource *res; 101 + int ret; 102 + 103 + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 104 + if (!priv) 105 + return -ENOMEM; 106 + drm->dev_private = priv; 107 + 108 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 109 + priv->base = devm_ioremap_resource(drm->dev, res); 110 + if (IS_ERR(priv->base)) 111 + return PTR_ERR(priv->base); 112 + 113 + priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu"); 114 + if (IS_ERR(priv->scu)) { 115 + dev_err(&pdev->dev, "failed to find SCU regmap\n"); 116 + return PTR_ERR(priv->scu); 117 + } 118 + 119 + ret = of_reserved_mem_device_init(drm->dev); 120 + if (ret) { 121 + dev_err(&pdev->dev, 122 + "failed to initialize reserved mem: %d\n", ret); 123 + return ret; 124 + } 125 + 126 + ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); 127 + if (ret) { 128 + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", ret); 129 + return ret; 130 + } 131 + 132 + priv->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL); 133 + if (IS_ERR(priv->rst)) { 134 + dev_err(&pdev->dev, 135 + "missing or invalid reset controller device tree entry"); 136 + return PTR_ERR(priv->rst); 137 + } 138 + reset_control_deassert(priv->rst); 139 + 140 + priv->clk = devm_clk_get(drm->dev, NULL); 141 + if (IS_ERR(priv->clk)) { 142 + dev_err(&pdev->dev, 143 + "missing or invalid clk device tree entry"); 144 + return PTR_ERR(priv->clk); 145 + } 146 + clk_prepare_enable(priv->clk); 147 + 148 + /* Sanitize control registers */ 149 + writel(0, priv->base + CRT_CTRL1); 150 + writel(0, priv->base + CRT_CTRL2); 151 + 152 + aspeed_gfx_setup_mode_config(drm); 153 + 154 + ret = drm_vblank_init(drm, 1); 155 + if (ret < 0) { 156 + dev_err(drm->dev, "Failed to initialise vblank\n"); 157 + return ret; 158 + } 159 + 160 + ret = aspeed_gfx_create_output(drm); 161 + if (ret < 0) { 162 + dev_err(drm->dev, "Failed to create outputs\n"); 163 + return ret; 164 + } 165 + 166 + ret = aspeed_gfx_create_pipe(drm); 167 + if (ret < 0) { 168 + dev_err(drm->dev, "Cannot setup simple display pipe\n"); 169 + return ret; 170 + } 171 + 172 + ret = devm_request_irq(drm->dev, platform_get_irq(pdev, 0), 173 + aspeed_gfx_irq_handler, 0, "aspeed gfx", drm); 174 + if (ret < 0) { 175 + dev_err(drm->dev, "Failed to install IRQ handler\n"); 176 + return ret; 177 + } 178 + 179 + drm_mode_config_reset(drm); 180 + 181 + drm_fbdev_generic_setup(drm, 32); 182 + 183 + return 0; 184 + } 185 + 186 + static void aspeed_gfx_unload(struct drm_device *drm) 187 + { 188 + drm_kms_helper_poll_fini(drm); 189 + drm_mode_config_cleanup(drm); 190 + 191 + drm->dev_private = NULL; 192 + } 193 + 194 + DEFINE_DRM_GEM_CMA_FOPS(fops); 195 + 196 + static struct drm_driver aspeed_gfx_driver = { 197 + .driver_features = DRIVER_GEM | DRIVER_MODESET | 198 + DRIVER_PRIME | DRIVER_ATOMIC, 199 + .gem_create_object = drm_cma_gem_create_object_default_funcs, 200 + .dumb_create = drm_gem_cma_dumb_create, 201 + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 202 + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 203 + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, 204 + .gem_prime_mmap = drm_gem_prime_mmap, 205 + .fops = &fops, 206 + .name = "aspeed-gfx-drm", 207 + .desc = "ASPEED GFX DRM", 208 + .date = "20180319", 209 + .major = 1, 210 + .minor = 0, 211 + }; 212 + 213 + static const struct of_device_id aspeed_gfx_match[] = { 214 + { .compatible = "aspeed,ast2500-gfx" }, 215 + { } 216 + }; 217 + 218 + static int aspeed_gfx_probe(struct platform_device *pdev) 219 + { 220 + struct drm_device *drm; 221 + int ret; 222 + 223 + drm = drm_dev_alloc(&aspeed_gfx_driver, &pdev->dev); 224 + if (IS_ERR(drm)) 225 + return PTR_ERR(drm); 226 + 227 + ret = aspeed_gfx_load(drm); 228 + if (ret) 229 + goto err_free; 230 + 231 + ret = drm_dev_register(drm, 0); 232 + if (ret) 233 + goto err_unload; 234 + 235 + return 0; 236 + 237 + err_unload: 238 + aspeed_gfx_unload(drm); 239 + err_free: 240 + drm_dev_put(drm); 241 + 242 + return ret; 243 + } 244 + 245 + static int aspeed_gfx_remove(struct platform_device *pdev) 246 + { 247 + struct drm_device *drm = platform_get_drvdata(pdev); 248 + 249 + drm_dev_unregister(drm); 250 + aspeed_gfx_unload(drm); 251 + drm_dev_put(drm); 252 + 253 + return 0; 254 + } 255 + 256 + static struct platform_driver aspeed_gfx_platform_driver = { 257 + .probe = aspeed_gfx_probe, 258 + .remove = aspeed_gfx_remove, 259 + .driver = { 260 + .name = "aspeed_gfx", 261 + .of_match_table = aspeed_gfx_match, 262 + }, 263 + }; 264 + 265 + module_platform_driver(aspeed_gfx_platform_driver); 266 + 267 + MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>"); 268 + MODULE_DESCRIPTION("ASPEED BMC DRM/KMS driver"); 269 + MODULE_LICENSE("GPL");
+42
drivers/gpu/drm/aspeed/aspeed_gfx_out.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + // Copyright 2018 IBM Corporation 3 + 4 + #include <drm/drm_atomic_helper.h> 5 + #include <drm/drm_connector.h> 6 + #include <drm/drm_crtc_helper.h> 7 + #include <drm/drm_probe_helper.h> 8 + 9 + #include "aspeed_gfx.h" 10 + 11 + static int aspeed_gfx_get_modes(struct drm_connector *connector) 12 + { 13 + return drm_add_modes_noedid(connector, 800, 600); 14 + } 15 + 16 + static const struct 17 + drm_connector_helper_funcs aspeed_gfx_connector_helper_funcs = { 18 + .get_modes = aspeed_gfx_get_modes, 19 + }; 20 + 21 + static const struct drm_connector_funcs aspeed_gfx_connector_funcs = { 22 + .fill_modes = drm_helper_probe_single_connector_modes, 23 + .destroy = drm_connector_cleanup, 24 + .reset = drm_atomic_helper_connector_reset, 25 + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 26 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 27 + }; 28 + 29 + int aspeed_gfx_create_output(struct drm_device *drm) 30 + { 31 + struct aspeed_gfx *priv = drm->dev_private; 32 + int ret; 33 + 34 + priv->connector.dpms = DRM_MODE_DPMS_OFF; 35 + priv->connector.polled = 0; 36 + drm_connector_helper_add(&priv->connector, 37 + &aspeed_gfx_connector_helper_funcs); 38 + ret = drm_connector_init(drm, &priv->connector, 39 + &aspeed_gfx_connector_funcs, 40 + DRM_MODE_CONNECTOR_Unknown); 41 + return ret; 42 + }
-1
drivers/gpu/drm/bochs/bochs.h
··· 73 73 struct drm_crtc crtc; 74 74 struct drm_encoder encoder; 75 75 struct drm_connector connector; 76 - bool mode_config_initialized; 77 76 78 77 /* ttm */ 79 78 struct {
+2 -5
drivers/gpu/drm/bochs/bochs_kms.c
··· 267 267 int bochs_kms_init(struct bochs_device *bochs) 268 268 { 269 269 drm_mode_config_init(bochs->dev); 270 - bochs->mode_config_initialized = true; 271 270 272 271 bochs->dev->mode_config.max_width = 8192; 273 272 bochs->dev->mode_config.max_height = 8192; ··· 291 292 292 293 void bochs_kms_fini(struct bochs_device *bochs) 293 294 { 294 - if (bochs->mode_config_initialized) { 295 - drm_mode_config_cleanup(bochs->dev); 296 - bochs->mode_config_initialized = false; 297 - } 295 + drm_atomic_helper_shutdown(bochs->dev); 296 + drm_mode_config_cleanup(bochs->dev); 298 297 }
-1
drivers/gpu/drm/cirrus/cirrus_drv.h
··· 101 101 102 102 struct cirrus_fbdev; 103 103 struct cirrus_mode_info { 104 - bool mode_config_initialized; 105 104 struct cirrus_crtc *crtc; 106 105 /* pointer to fbdev info structure */ 107 106 struct cirrus_fbdev *gfbdev;
+2 -6
drivers/gpu/drm/cirrus/cirrus_mode.c
··· 575 575 int ret; 576 576 577 577 drm_mode_config_init(cdev->dev); 578 - cdev->mode_info.mode_config_initialized = true; 579 578 580 579 cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH; 581 580 cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT; ··· 612 613 void cirrus_modeset_fini(struct cirrus_device *cdev) 613 614 { 614 615 cirrus_fbdev_fini(cdev); 615 - 616 - if (cdev->mode_info.mode_config_initialized) { 617 - drm_mode_config_cleanup(cdev->dev); 618 - cdev->mode_info.mode_config_initialized = false; 619 - } 616 + drm_helper_force_disable_all(cdev->dev); 617 + drm_mode_config_cleanup(cdev->dev); 620 618 }
+8 -13
drivers/gpu/drm/drm_fb_helper.c
··· 639 639 static void dpms_legacy(struct drm_fb_helper *fb_helper, int dpms_mode) 640 640 { 641 641 struct drm_device *dev = fb_helper->dev; 642 - struct drm_crtc *crtc; 643 642 struct drm_connector *connector; 643 + struct drm_mode_set *modeset; 644 644 int i, j; 645 645 646 646 drm_modeset_lock_all(dev); 647 647 for (i = 0; i < fb_helper->crtc_count; i++) { 648 - crtc = fb_helper->crtc_info[i].mode_set.crtc; 648 + modeset = &fb_helper->crtc_info[i].mode_set; 649 649 650 - if (!crtc->enabled) 650 + if (!modeset->crtc->enabled) 651 651 continue; 652 652 653 - /* Walk the connectors & encoders on this fb turning them on/off */ 654 - drm_fb_helper_for_each_connector(fb_helper, j) { 655 - connector = fb_helper->connector_info[j]->connector; 653 + for (j = 0; j < modeset->num_connectors; j++) { 654 + connector = modeset->connectors[j]; 656 655 connector->funcs->dpms(connector, dpms_mode); 657 656 drm_object_property_set_value(&connector->base, 658 657 dev->mode_config.dpms_property, dpms_mode); ··· 1873 1874 int crtc_count = 0; 1874 1875 int i; 1875 1876 struct drm_fb_helper_surface_size sizes; 1876 - int gamma_size = 0; 1877 1877 int best_depth = 0; 1878 1878 1879 1879 memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size)); ··· 1888 1890 if (preferred_bpp != sizes.surface_bpp) 1889 1891 sizes.surface_depth = sizes.surface_bpp = preferred_bpp; 1890 1892 1891 - /* first up get a count of crtcs now in use and new min/maxes width/heights */ 1892 1893 drm_fb_helper_for_each_connector(fb_helper, i) { 1893 1894 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; 1894 1895 struct drm_cmdline_mode *cmdline_mode; ··· 1967 1970 sizes.surface_depth = best_depth; 1968 1971 } 1969 1972 1973 + /* first up get a count of crtcs now in use and new min/maxes width/heights */ 1970 1974 crtc_count = 0; 1971 1975 for (i = 0; i < fb_helper->crtc_count; i++) { 1972 1976 struct drm_display_mode *desired_mode; ··· 1989 1991 1990 1992 x = fb_helper->crtc_info[i].x; 1991 1993 y = fb_helper->crtc_info[i].y; 1992 - 1993 - if (gamma_size == 0) 1994 - gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; 1995 1994 1996 1995 sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width); 1997 1996 sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height); ··· 3312 3317 return ret; 3313 3318 } 3314 3319 3315 - drm_client_add(&fb_helper->client); 3316 - 3317 3320 if (!preferred_bpp) 3318 3321 preferred_bpp = dev->mode_config.preferred_depth; 3319 3322 if (!preferred_bpp) ··· 3321 3328 ret = drm_fbdev_client_hotplug(&fb_helper->client); 3322 3329 if (ret) 3323 3330 DRM_DEV_DEBUG(dev->dev, "client hotplug ret=%d\n", ret); 3331 + 3332 + drm_client_add(&fb_helper->client); 3324 3333 3325 3334 return 0; 3326 3335 }
+1 -1
drivers/gpu/drm/drm_gem_shmem_helper.c
··· 408 408 loff_t num_pages = obj->size >> PAGE_SHIFT; 409 409 struct page *page; 410 410 411 - if (vmf->pgoff > num_pages || WARN_ON_ONCE(!shmem->pages)) 411 + if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages)) 412 412 return VM_FAULT_SIGBUS; 413 413 414 414 page = shmem->pages[vmf->pgoff];
+8
drivers/gpu/drm/drm_internal.h
··· 180 180 struct drm_file *file_private); 181 181 int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, 182 182 struct drm_file *file_private); 183 + int drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data, 184 + struct drm_file *file_private); 183 185 int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, 184 186 struct drm_file *file_private); 187 + int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, 188 + struct drm_file *file_private); 185 189 int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, 186 190 struct drm_file *file_private); 187 191 int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, 188 192 struct drm_file *file_private); 193 + int drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data, 194 + struct drm_file *file_private); 195 + int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, 196 + struct drm_file *file_private); 189 197 190 198 /* drm_framebuffer.c */ 191 199 void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
+8
drivers/gpu/drm/drm_ioctl.c
··· 686 686 DRM_UNLOCKED|DRM_RENDER_ALLOW), 687 687 DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, 688 688 DRM_UNLOCKED|DRM_RENDER_ALLOW), 689 + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TRANSFER, drm_syncobj_transfer_ioctl, 690 + DRM_UNLOCKED|DRM_RENDER_ALLOW), 689 691 DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl, 692 + DRM_UNLOCKED|DRM_RENDER_ALLOW), 693 + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl, 690 694 DRM_UNLOCKED|DRM_RENDER_ALLOW), 691 695 DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl, 692 696 DRM_UNLOCKED|DRM_RENDER_ALLOW), 693 697 DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl, 698 + DRM_UNLOCKED|DRM_RENDER_ALLOW), 699 + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, drm_syncobj_timeline_signal_ioctl, 700 + DRM_UNLOCKED|DRM_RENDER_ALLOW), 701 + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl, 694 702 DRM_UNLOCKED|DRM_RENDER_ALLOW), 695 703 DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED), 696 704 DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED),
+7
drivers/gpu/drm/drm_memory.c
··· 168 168 if (xen_pv_domain()) 169 169 return true; 170 170 171 + /* 172 + * Enforce dma_alloc_coherent when memory encryption is active as well 173 + * for the same reasons as for Xen paravirtual hosts. 174 + */ 175 + if (mem_encrypt_active()) 176 + return true; 177 + 171 178 for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) { 172 179 max_iomem = max(max_iomem, tmp->end); 173 180 }
+28
drivers/gpu/drm/drm_print.c
··· 253 253 va_end(args); 254 254 } 255 255 EXPORT_SYMBOL(drm_err); 256 + 257 + /** 258 + * drm_print_regset32 - print the contents of registers to a 259 + * &drm_printer stream. 260 + * 261 + * @p: the &drm printer 262 + * @regset: the list of registers to print. 263 + * 264 + * Often in driver debug, it's useful to be able to either capture the 265 + * contents of registers in the steady state using debugfs or at 266 + * specific points during operation. This lets the driver have a 267 + * single list of registers for both. 268 + */ 269 + void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset) 270 + { 271 + int namelen = 0; 272 + int i; 273 + 274 + for (i = 0; i < regset->nregs; i++) 275 + namelen = max(namelen, (int)strlen(regset->regs[i].name)); 276 + 277 + for (i = 0; i < regset->nregs; i++) { 278 + drm_printf(p, "%*s = 0x%08x\n", 279 + namelen, regset->regs[i].name, 280 + readl(regset->base + regset->regs[i].offset)); 281 + } 282 + } 283 + EXPORT_SYMBOL(drm_print_regset32);
+417 -29
drivers/gpu/drm/drm_syncobj.c
··· 61 61 struct task_struct *task; 62 62 struct dma_fence *fence; 63 63 struct dma_fence_cb fence_cb; 64 + u64 point; 64 65 }; 65 66 66 67 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, ··· 96 95 static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj, 97 96 struct syncobj_wait_entry *wait) 98 97 { 98 + struct dma_fence *fence; 99 + 99 100 if (wait->fence) 100 101 return; 101 102 ··· 106 103 * have the lock, try one more time just to be sure we don't add a 107 104 * callback when a fence has already been set. 108 105 */ 109 - if (syncobj->fence) 110 - wait->fence = dma_fence_get( 111 - rcu_dereference_protected(syncobj->fence, 1)); 112 - else 106 + fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1)); 107 + if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) { 108 + dma_fence_put(fence); 113 109 list_add_tail(&wait->node, &syncobj->cb_list); 110 + } else if (!fence) { 111 + wait->fence = dma_fence_get_stub(); 112 + } else { 113 + wait->fence = fence; 114 + } 114 115 spin_unlock(&syncobj->lock); 115 116 } 116 117 ··· 128 121 list_del_init(&wait->node); 129 122 spin_unlock(&syncobj->lock); 130 123 } 124 + 125 + /** 126 + * drm_syncobj_add_point - add new timeline point to the syncobj 127 + * @syncobj: sync object to add timeline point do 128 + * @chain: chain node to use to add the point 129 + * @fence: fence to encapsulate in the chain node 130 + * @point: sequence number to use for the point 131 + * 132 + * Add the chain node as new timeline point to the syncobj. 133 + */ 134 + void drm_syncobj_add_point(struct drm_syncobj *syncobj, 135 + struct dma_fence_chain *chain, 136 + struct dma_fence *fence, 137 + uint64_t point) 138 + { 139 + struct syncobj_wait_entry *cur, *tmp; 140 + struct dma_fence *prev; 141 + 142 + dma_fence_get(fence); 143 + 144 + spin_lock(&syncobj->lock); 145 + 146 + prev = drm_syncobj_fence_get(syncobj); 147 + /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */ 148 + if (prev && prev->seqno >= point) 149 + DRM_ERROR("You are adding an unorder point to timeline!\n"); 150 + dma_fence_chain_init(chain, prev, fence, point); 151 + rcu_assign_pointer(syncobj->fence, &chain->base); 152 + 153 + list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) 154 + syncobj_wait_syncobj_func(syncobj, cur); 155 + spin_unlock(&syncobj->lock); 156 + 157 + /* Walk the chain once to trigger garbage collection */ 158 + dma_fence_chain_for_each(fence, prev); 159 + dma_fence_put(prev); 160 + } 161 + EXPORT_SYMBOL(drm_syncobj_add_point); 131 162 132 163 /** 133 164 * drm_syncobj_replace_fence - replace fence in a sync object. ··· 190 145 rcu_assign_pointer(syncobj->fence, fence); 191 146 192 147 if (fence != old_fence) { 193 - list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { 194 - list_del_init(&cur->node); 148 + list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) 195 149 syncobj_wait_syncobj_func(syncobj, cur); 196 - } 197 150 } 198 151 199 152 spin_unlock(&syncobj->lock); ··· 214 171 dma_fence_put(fence); 215 172 } 216 173 174 + /* 5s default for wait submission */ 175 + #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL 217 176 /** 218 177 * drm_syncobj_find_fence - lookup and reference the fence in a sync object 219 178 * @file_private: drm file private pointer ··· 236 191 struct dma_fence **fence) 237 192 { 238 193 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 239 - int ret = 0; 194 + struct syncobj_wait_entry wait; 195 + u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT); 196 + int ret; 240 197 241 198 if (!syncobj) 242 199 return -ENOENT; 243 200 244 201 *fence = drm_syncobj_fence_get(syncobj); 245 - if (!*fence) { 202 + drm_syncobj_put(syncobj); 203 + 204 + if (*fence) { 205 + ret = dma_fence_chain_find_seqno(fence, point); 206 + if (!ret) 207 + return 0; 208 + dma_fence_put(*fence); 209 + } else { 246 210 ret = -EINVAL; 247 211 } 248 - drm_syncobj_put(syncobj); 212 + 213 + if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) 214 + return ret; 215 + 216 + memset(&wait, 0, sizeof(wait)); 217 + wait.task = current; 218 + wait.point = point; 219 + drm_syncobj_fence_add_wait(syncobj, &wait); 220 + 221 + do { 222 + set_current_state(TASK_INTERRUPTIBLE); 223 + if (wait.fence) { 224 + ret = 0; 225 + break; 226 + } 227 + if (timeout == 0) { 228 + ret = -ETIME; 229 + break; 230 + } 231 + 232 + if (signal_pending(current)) { 233 + ret = -ERESTARTSYS; 234 + break; 235 + } 236 + 237 + timeout = schedule_timeout(timeout); 238 + } while (1); 239 + 240 + __set_current_state(TASK_RUNNING); 241 + *fence = wait.fence; 242 + 243 + if (wait.node.next) 244 + drm_syncobj_remove_wait(syncobj, &wait); 245 + 249 246 return ret; 250 247 } 251 248 EXPORT_SYMBOL(drm_syncobj_find_fence); ··· 680 593 &args->handle); 681 594 } 682 595 596 + static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private, 597 + struct drm_syncobj_transfer *args) 598 + { 599 + struct drm_syncobj *timeline_syncobj = NULL; 600 + struct dma_fence *fence; 601 + struct dma_fence_chain *chain; 602 + int ret; 603 + 604 + timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle); 605 + if (!timeline_syncobj) { 606 + return -ENOENT; 607 + } 608 + ret = drm_syncobj_find_fence(file_private, args->src_handle, 609 + args->src_point, args->flags, 610 + &fence); 611 + if (ret) 612 + goto err; 613 + chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); 614 + if (!chain) { 615 + ret = -ENOMEM; 616 + goto err1; 617 + } 618 + drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point); 619 + err1: 620 + dma_fence_put(fence); 621 + err: 622 + drm_syncobj_put(timeline_syncobj); 623 + 624 + return ret; 625 + } 626 + 627 + static int 628 + drm_syncobj_transfer_to_binary(struct drm_file *file_private, 629 + struct drm_syncobj_transfer *args) 630 + { 631 + struct drm_syncobj *binary_syncobj = NULL; 632 + struct dma_fence *fence; 633 + int ret; 634 + 635 + binary_syncobj = drm_syncobj_find(file_private, args->dst_handle); 636 + if (!binary_syncobj) 637 + return -ENOENT; 638 + ret = drm_syncobj_find_fence(file_private, args->src_handle, 639 + args->src_point, args->flags, &fence); 640 + if (ret) 641 + goto err; 642 + drm_syncobj_replace_fence(binary_syncobj, fence); 643 + dma_fence_put(fence); 644 + err: 645 + drm_syncobj_put(binary_syncobj); 646 + 647 + return ret; 648 + } 649 + int 650 + drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data, 651 + struct drm_file *file_private) 652 + { 653 + struct drm_syncobj_transfer *args = data; 654 + int ret; 655 + 656 + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 657 + return -ENODEV; 658 + 659 + if (args->pad) 660 + return -EINVAL; 661 + 662 + if (args->dst_point) 663 + ret = drm_syncobj_transfer_to_timeline(file_private, args); 664 + else 665 + ret = drm_syncobj_transfer_to_binary(file_private, args); 666 + 667 + return ret; 668 + } 669 + 683 670 static void syncobj_wait_fence_func(struct dma_fence *fence, 684 671 struct dma_fence_cb *cb) 685 672 { ··· 766 605 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 767 606 struct syncobj_wait_entry *wait) 768 607 { 608 + struct dma_fence *fence; 609 + 769 610 /* This happens inside the syncobj lock */ 770 - wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 771 - lockdep_is_held(&syncobj->lock))); 611 + fence = rcu_dereference_protected(syncobj->fence, 612 + lockdep_is_held(&syncobj->lock)); 613 + dma_fence_get(fence); 614 + if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) { 615 + dma_fence_put(fence); 616 + return; 617 + } else if (!fence) { 618 + wait->fence = dma_fence_get_stub(); 619 + } else { 620 + wait->fence = fence; 621 + } 622 + 772 623 wake_up_process(wait->task); 624 + list_del_init(&wait->node); 773 625 } 774 626 775 627 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, 628 + void __user *user_points, 776 629 uint32_t count, 777 630 uint32_t flags, 778 631 signed long timeout, ··· 794 619 { 795 620 struct syncobj_wait_entry *entries; 796 621 struct dma_fence *fence; 622 + uint64_t *points; 797 623 uint32_t signaled_count, i; 798 624 799 - entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 800 - if (!entries) 625 + points = kmalloc_array(count, sizeof(*points), GFP_KERNEL); 626 + if (points == NULL) 801 627 return -ENOMEM; 802 628 629 + if (!user_points) { 630 + memset(points, 0, count * sizeof(uint64_t)); 631 + 632 + } else if (copy_from_user(points, user_points, 633 + sizeof(uint64_t) * count)) { 634 + timeout = -EFAULT; 635 + goto err_free_points; 636 + } 637 + 638 + entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 639 + if (!entries) { 640 + timeout = -ENOMEM; 641 + goto err_free_points; 642 + } 803 643 /* Walk the list of sync objects and initialize entries. We do 804 644 * this up-front so that we can properly return -EINVAL if there is 805 645 * a syncobj with a missing fence and then never have the chance of ··· 822 632 */ 823 633 signaled_count = 0; 824 634 for (i = 0; i < count; ++i) { 635 + struct dma_fence *fence; 636 + 825 637 entries[i].task = current; 826 - entries[i].fence = drm_syncobj_fence_get(syncobjs[i]); 827 - if (!entries[i].fence) { 638 + entries[i].point = points[i]; 639 + fence = drm_syncobj_fence_get(syncobjs[i]); 640 + if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) { 641 + dma_fence_put(fence); 828 642 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 829 643 continue; 830 644 } else { ··· 837 643 } 838 644 } 839 645 840 - if (dma_fence_is_signaled(entries[i].fence)) { 646 + if (fence) 647 + entries[i].fence = fence; 648 + else 649 + entries[i].fence = dma_fence_get_stub(); 650 + 651 + if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) || 652 + dma_fence_is_signaled(entries[i].fence)) { 841 653 if (signaled_count == 0 && idx) 842 654 *idx = i; 843 655 signaled_count++; ··· 876 676 if (!fence) 877 677 continue; 878 678 879 - if (dma_fence_is_signaled(fence) || 679 + if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) || 680 + dma_fence_is_signaled(fence) || 880 681 (!entries[i].fence_cb.func && 881 682 dma_fence_add_callback(fence, 882 683 &entries[i].fence_cb, ··· 922 721 } 923 722 kfree(entries); 924 723 724 + err_free_points: 725 + kfree(points); 726 + 925 727 return timeout; 926 728 } 927 729 ··· 964 760 static int drm_syncobj_array_wait(struct drm_device *dev, 965 761 struct drm_file *file_private, 966 762 struct drm_syncobj_wait *wait, 967 - struct drm_syncobj **syncobjs) 763 + struct drm_syncobj_timeline_wait *timeline_wait, 764 + struct drm_syncobj **syncobjs, bool timeline) 968 765 { 969 - signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); 766 + signed long timeout = 0; 970 767 uint32_t first = ~0; 971 768 972 - timeout = drm_syncobj_array_wait_timeout(syncobjs, 973 - wait->count_handles, 974 - wait->flags, 975 - timeout, &first); 976 - if (timeout < 0) 977 - return timeout; 978 - 979 - wait->first_signaled = first; 769 + if (!timeline) { 770 + timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); 771 + timeout = drm_syncobj_array_wait_timeout(syncobjs, 772 + NULL, 773 + wait->count_handles, 774 + wait->flags, 775 + timeout, &first); 776 + if (timeout < 0) 777 + return timeout; 778 + wait->first_signaled = first; 779 + } else { 780 + timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec); 781 + timeout = drm_syncobj_array_wait_timeout(syncobjs, 782 + u64_to_user_ptr(timeline_wait->points), 783 + timeline_wait->count_handles, 784 + timeline_wait->flags, 785 + timeout, &first); 786 + if (timeout < 0) 787 + return timeout; 788 + timeline_wait->first_signaled = first; 789 + } 980 790 return 0; 981 791 } 982 792 ··· 1076 858 return ret; 1077 859 1078 860 ret = drm_syncobj_array_wait(dev, file_private, 1079 - args, syncobjs); 861 + args, NULL, syncobjs, false); 1080 862 1081 863 drm_syncobj_array_free(syncobjs, args->count_handles); 1082 864 1083 865 return ret; 1084 866 } 867 + 868 + int 869 + drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, 870 + struct drm_file *file_private) 871 + { 872 + struct drm_syncobj_timeline_wait *args = data; 873 + struct drm_syncobj **syncobjs; 874 + int ret = 0; 875 + 876 + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 877 + return -ENODEV; 878 + 879 + if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | 880 + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | 881 + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) 882 + return -EINVAL; 883 + 884 + if (args->count_handles == 0) 885 + return -EINVAL; 886 + 887 + ret = drm_syncobj_array_find(file_private, 888 + u64_to_user_ptr(args->handles), 889 + args->count_handles, 890 + &syncobjs); 891 + if (ret < 0) 892 + return ret; 893 + 894 + ret = drm_syncobj_array_wait(dev, file_private, 895 + NULL, args, syncobjs, true); 896 + 897 + drm_syncobj_array_free(syncobjs, args->count_handles); 898 + 899 + return ret; 900 + } 901 + 1085 902 1086 903 int 1087 904 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, ··· 1179 926 for (i = 0; i < args->count_handles; i++) 1180 927 drm_syncobj_assign_null_handle(syncobjs[i]); 1181 928 929 + drm_syncobj_array_free(syncobjs, args->count_handles); 930 + 931 + return ret; 932 + } 933 + 934 + int 935 + drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data, 936 + struct drm_file *file_private) 937 + { 938 + struct drm_syncobj_timeline_array *args = data; 939 + struct drm_syncobj **syncobjs; 940 + struct dma_fence_chain **chains; 941 + uint64_t *points; 942 + uint32_t i, j; 943 + int ret; 944 + 945 + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 946 + return -EOPNOTSUPP; 947 + 948 + if (args->pad != 0) 949 + return -EINVAL; 950 + 951 + if (args->count_handles == 0) 952 + return -EINVAL; 953 + 954 + ret = drm_syncobj_array_find(file_private, 955 + u64_to_user_ptr(args->handles), 956 + args->count_handles, 957 + &syncobjs); 958 + if (ret < 0) 959 + return ret; 960 + 961 + points = kmalloc_array(args->count_handles, sizeof(*points), 962 + GFP_KERNEL); 963 + if (!points) { 964 + ret = -ENOMEM; 965 + goto out; 966 + } 967 + if (!u64_to_user_ptr(args->points)) { 968 + memset(points, 0, args->count_handles * sizeof(uint64_t)); 969 + } else if (copy_from_user(points, u64_to_user_ptr(args->points), 970 + sizeof(uint64_t) * args->count_handles)) { 971 + ret = -EFAULT; 972 + goto err_points; 973 + } 974 + 975 + chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL); 976 + if (!chains) { 977 + ret = -ENOMEM; 978 + goto err_points; 979 + } 980 + for (i = 0; i < args->count_handles; i++) { 981 + chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); 982 + if (!chains[i]) { 983 + for (j = 0; j < i; j++) 984 + kfree(chains[j]); 985 + ret = -ENOMEM; 986 + goto err_chains; 987 + } 988 + } 989 + 990 + for (i = 0; i < args->count_handles; i++) { 991 + struct dma_fence *fence = dma_fence_get_stub(); 992 + 993 + drm_syncobj_add_point(syncobjs[i], chains[i], 994 + fence, points[i]); 995 + dma_fence_put(fence); 996 + } 997 + err_chains: 998 + kfree(chains); 999 + err_points: 1000 + kfree(points); 1001 + out: 1002 + drm_syncobj_array_free(syncobjs, args->count_handles); 1003 + 1004 + return ret; 1005 + } 1006 + 1007 + int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, 1008 + struct drm_file *file_private) 1009 + { 1010 + struct drm_syncobj_timeline_array *args = data; 1011 + struct drm_syncobj **syncobjs; 1012 + uint64_t __user *points = u64_to_user_ptr(args->points); 1013 + uint32_t i; 1014 + int ret; 1015 + 1016 + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 1017 + return -ENODEV; 1018 + 1019 + if (args->pad != 0) 1020 + return -EINVAL; 1021 + 1022 + if (args->count_handles == 0) 1023 + return -EINVAL; 1024 + 1025 + ret = drm_syncobj_array_find(file_private, 1026 + u64_to_user_ptr(args->handles), 1027 + args->count_handles, 1028 + &syncobjs); 1029 + if (ret < 0) 1030 + return ret; 1031 + 1032 + for (i = 0; i < args->count_handles; i++) { 1033 + struct dma_fence_chain *chain; 1034 + struct dma_fence *fence; 1035 + uint64_t point; 1036 + 1037 + fence = drm_syncobj_fence_get(syncobjs[i]); 1038 + chain = to_dma_fence_chain(fence); 1039 + if (chain) { 1040 + struct dma_fence *iter, *last_signaled = NULL; 1041 + 1042 + dma_fence_chain_for_each(iter, fence) { 1043 + if (!iter) 1044 + break; 1045 + dma_fence_put(last_signaled); 1046 + last_signaled = dma_fence_get(iter); 1047 + if (!to_dma_fence_chain(last_signaled)->prev_seqno) 1048 + /* It is most likely that timeline has 1049 + * unorder points. */ 1050 + break; 1051 + } 1052 + point = dma_fence_is_signaled(last_signaled) ? 1053 + last_signaled->seqno : 1054 + to_dma_fence_chain(last_signaled)->prev_seqno; 1055 + dma_fence_put(last_signaled); 1056 + } else { 1057 + point = 0; 1058 + } 1059 + ret = copy_to_user(&points[i], &point, sizeof(uint64_t)); 1060 + ret = ret ? -EFAULT : 0; 1061 + if (ret) 1062 + break; 1063 + } 1182 1064 drm_syncobj_array_free(syncobjs, args->count_handles); 1183 1065 1184 1066 return ret;
+10
drivers/gpu/drm/lima/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0 OR MIT 2 + # Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> 3 + 4 + config DRM_LIMA 5 + tristate "LIMA (DRM support for ARM Mali 400/450 GPU)" 6 + depends on DRM 7 + depends on ARM || ARM64 || COMPILE_TEST 8 + select DRM_SCHED 9 + help 10 + DRM driver for ARM Mali 400/450 GPUs.
+21
drivers/gpu/drm/lima/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 OR MIT 2 + # Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> 3 + 4 + lima-y := \ 5 + lima_drv.o \ 6 + lima_device.o \ 7 + lima_pmu.o \ 8 + lima_l2_cache.o \ 9 + lima_mmu.o \ 10 + lima_gp.o \ 11 + lima_pp.o \ 12 + lima_gem.o \ 13 + lima_vm.o \ 14 + lima_sched.o \ 15 + lima_ctx.o \ 16 + lima_gem_prime.o \ 17 + lima_dlbu.o \ 18 + lima_bcast.o \ 19 + lima_object.o 20 + 21 + obj-$(CONFIG_DRM_LIMA) += lima.o
+47
drivers/gpu/drm/lima/lima_bcast.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/io.h> 5 + #include <linux/device.h> 6 + 7 + #include "lima_device.h" 8 + #include "lima_bcast.h" 9 + #include "lima_regs.h" 10 + 11 + #define bcast_write(reg, data) writel(data, ip->iomem + reg) 12 + #define bcast_read(reg) readl(ip->iomem + reg) 13 + 14 + void lima_bcast_enable(struct lima_device *dev, int num_pp) 15 + { 16 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; 17 + struct lima_ip *ip = dev->ip + lima_ip_bcast; 18 + int i, mask = bcast_read(LIMA_BCAST_BROADCAST_MASK) & 0xffff0000; 19 + 20 + for (i = 0; i < num_pp; i++) { 21 + struct lima_ip *pp = pipe->processor[i]; 22 + 23 + mask |= 1 << (pp->id - lima_ip_pp0); 24 + } 25 + 26 + bcast_write(LIMA_BCAST_BROADCAST_MASK, mask); 27 + } 28 + 29 + int lima_bcast_init(struct lima_ip *ip) 30 + { 31 + int i, mask = 0; 32 + 33 + for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) { 34 + if (ip->dev->ip[i].present) 35 + mask |= 1 << (i - lima_ip_pp0); 36 + } 37 + 38 + bcast_write(LIMA_BCAST_BROADCAST_MASK, mask << 16); 39 + bcast_write(LIMA_BCAST_INTERRUPT_MASK, mask); 40 + return 0; 41 + } 42 + 43 + void lima_bcast_fini(struct lima_ip *ip) 44 + { 45 + 46 + } 47 +
+14
drivers/gpu/drm/lima/lima_bcast.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_BCAST_H__ 5 + #define __LIMA_BCAST_H__ 6 + 7 + struct lima_ip; 8 + 9 + int lima_bcast_init(struct lima_ip *ip); 10 + void lima_bcast_fini(struct lima_ip *ip); 11 + 12 + void lima_bcast_enable(struct lima_device *dev, int num_pp); 13 + 14 + #endif
+98
drivers/gpu/drm/lima/lima_ctx.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/slab.h> 5 + 6 + #include "lima_device.h" 7 + #include "lima_ctx.h" 8 + 9 + int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id) 10 + { 11 + struct lima_ctx *ctx; 12 + int i, err; 13 + 14 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 15 + if (!ctx) 16 + return -ENOMEM; 17 + ctx->dev = dev; 18 + kref_init(&ctx->refcnt); 19 + 20 + for (i = 0; i < lima_pipe_num; i++) { 21 + err = lima_sched_context_init(dev->pipe + i, ctx->context + i, &ctx->guilty); 22 + if (err) 23 + goto err_out0; 24 + } 25 + 26 + err = xa_alloc(&mgr->handles, id, ctx, xa_limit_32b, GFP_KERNEL); 27 + if (err < 0) 28 + goto err_out0; 29 + 30 + return 0; 31 + 32 + err_out0: 33 + for (i--; i >= 0; i--) 34 + lima_sched_context_fini(dev->pipe + i, ctx->context + i); 35 + kfree(ctx); 36 + return err; 37 + } 38 + 39 + static void lima_ctx_do_release(struct kref *ref) 40 + { 41 + struct lima_ctx *ctx = container_of(ref, struct lima_ctx, refcnt); 42 + int i; 43 + 44 + for (i = 0; i < lima_pipe_num; i++) 45 + lima_sched_context_fini(ctx->dev->pipe + i, ctx->context + i); 46 + kfree(ctx); 47 + } 48 + 49 + int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id) 50 + { 51 + struct lima_ctx *ctx; 52 + int ret = 0; 53 + 54 + mutex_lock(&mgr->lock); 55 + ctx = xa_erase(&mgr->handles, id); 56 + if (ctx) 57 + kref_put(&ctx->refcnt, lima_ctx_do_release); 58 + else 59 + ret = -EINVAL; 60 + mutex_unlock(&mgr->lock); 61 + return ret; 62 + } 63 + 64 + struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id) 65 + { 66 + struct lima_ctx *ctx; 67 + 68 + mutex_lock(&mgr->lock); 69 + ctx = xa_load(&mgr->handles, id); 70 + if (ctx) 71 + kref_get(&ctx->refcnt); 72 + mutex_unlock(&mgr->lock); 73 + return ctx; 74 + } 75 + 76 + void lima_ctx_put(struct lima_ctx *ctx) 77 + { 78 + kref_put(&ctx->refcnt, lima_ctx_do_release); 79 + } 80 + 81 + void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr) 82 + { 83 + mutex_init(&mgr->lock); 84 + xa_init_flags(&mgr->handles, XA_FLAGS_ALLOC); 85 + } 86 + 87 + void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr) 88 + { 89 + struct lima_ctx *ctx; 90 + unsigned long id; 91 + 92 + xa_for_each(&mgr->handles, id, ctx) { 93 + kref_put(&ctx->refcnt, lima_ctx_do_release); 94 + } 95 + 96 + xa_destroy(&mgr->handles); 97 + mutex_destroy(&mgr->lock); 98 + }
+30
drivers/gpu/drm/lima/lima_ctx.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_CTX_H__ 5 + #define __LIMA_CTX_H__ 6 + 7 + #include <linux/xarray.h> 8 + 9 + #include "lima_device.h" 10 + 11 + struct lima_ctx { 12 + struct kref refcnt; 13 + struct lima_device *dev; 14 + struct lima_sched_context context[lima_pipe_num]; 15 + atomic_t guilty; 16 + }; 17 + 18 + struct lima_ctx_mgr { 19 + struct mutex lock; 20 + struct xarray handles; 21 + }; 22 + 23 + int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id); 24 + int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id); 25 + struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id); 26 + void lima_ctx_put(struct lima_ctx *ctx); 27 + void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr); 28 + void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr); 29 + 30 + #endif
+385
drivers/gpu/drm/lima/lima_device.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/regulator/consumer.h> 5 + #include <linux/reset.h> 6 + #include <linux/clk.h> 7 + #include <linux/dma-mapping.h> 8 + #include <linux/platform_device.h> 9 + 10 + #include "lima_device.h" 11 + #include "lima_gp.h" 12 + #include "lima_pp.h" 13 + #include "lima_mmu.h" 14 + #include "lima_pmu.h" 15 + #include "lima_l2_cache.h" 16 + #include "lima_dlbu.h" 17 + #include "lima_bcast.h" 18 + #include "lima_vm.h" 19 + 20 + struct lima_ip_desc { 21 + char *name; 22 + char *irq_name; 23 + bool must_have[lima_gpu_num]; 24 + int offset[lima_gpu_num]; 25 + 26 + int (*init)(struct lima_ip *ip); 27 + void (*fini)(struct lima_ip *ip); 28 + }; 29 + 30 + #define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \ 31 + [lima_ip_##ipname] = { \ 32 + .name = #ipname, \ 33 + .irq_name = irq, \ 34 + .must_have = { \ 35 + [lima_gpu_mali400] = mst0, \ 36 + [lima_gpu_mali450] = mst1, \ 37 + }, \ 38 + .offset = { \ 39 + [lima_gpu_mali400] = off0, \ 40 + [lima_gpu_mali450] = off1, \ 41 + }, \ 42 + .init = lima_##func##_init, \ 43 + .fini = lima_##func##_fini, \ 44 + } 45 + 46 + static struct lima_ip_desc lima_ip_desc[lima_ip_num] = { 47 + LIMA_IP_DESC(pmu, false, false, 0x02000, 0x02000, pmu, "pmu"), 48 + LIMA_IP_DESC(l2_cache0, true, true, 0x01000, 0x10000, l2_cache, NULL), 49 + LIMA_IP_DESC(l2_cache1, false, true, -1, 0x01000, l2_cache, NULL), 50 + LIMA_IP_DESC(l2_cache2, false, false, -1, 0x11000, l2_cache, NULL), 51 + LIMA_IP_DESC(gp, true, true, 0x00000, 0x00000, gp, "gp"), 52 + LIMA_IP_DESC(pp0, true, true, 0x08000, 0x08000, pp, "pp0"), 53 + LIMA_IP_DESC(pp1, false, false, 0x0A000, 0x0A000, pp, "pp1"), 54 + LIMA_IP_DESC(pp2, false, false, 0x0C000, 0x0C000, pp, "pp2"), 55 + LIMA_IP_DESC(pp3, false, false, 0x0E000, 0x0E000, pp, "pp3"), 56 + LIMA_IP_DESC(pp4, false, false, -1, 0x28000, pp, "pp4"), 57 + LIMA_IP_DESC(pp5, false, false, -1, 0x2A000, pp, "pp5"), 58 + LIMA_IP_DESC(pp6, false, false, -1, 0x2C000, pp, "pp6"), 59 + LIMA_IP_DESC(pp7, false, false, -1, 0x2E000, pp, "pp7"), 60 + LIMA_IP_DESC(gpmmu, true, true, 0x03000, 0x03000, mmu, "gpmmu"), 61 + LIMA_IP_DESC(ppmmu0, true, true, 0x04000, 0x04000, mmu, "ppmmu0"), 62 + LIMA_IP_DESC(ppmmu1, false, false, 0x05000, 0x05000, mmu, "ppmmu1"), 63 + LIMA_IP_DESC(ppmmu2, false, false, 0x06000, 0x06000, mmu, "ppmmu2"), 64 + LIMA_IP_DESC(ppmmu3, false, false, 0x07000, 0x07000, mmu, "ppmmu3"), 65 + LIMA_IP_DESC(ppmmu4, false, false, -1, 0x1C000, mmu, "ppmmu4"), 66 + LIMA_IP_DESC(ppmmu5, false, false, -1, 0x1D000, mmu, "ppmmu5"), 67 + LIMA_IP_DESC(ppmmu6, false, false, -1, 0x1E000, mmu, "ppmmu6"), 68 + LIMA_IP_DESC(ppmmu7, false, false, -1, 0x1F000, mmu, "ppmmu7"), 69 + LIMA_IP_DESC(dlbu, false, true, -1, 0x14000, dlbu, NULL), 70 + LIMA_IP_DESC(bcast, false, true, -1, 0x13000, bcast, NULL), 71 + LIMA_IP_DESC(pp_bcast, false, true, -1, 0x16000, pp_bcast, "pp"), 72 + LIMA_IP_DESC(ppmmu_bcast, false, true, -1, 0x15000, mmu, NULL), 73 + }; 74 + 75 + const char *lima_ip_name(struct lima_ip *ip) 76 + { 77 + return lima_ip_desc[ip->id].name; 78 + } 79 + 80 + static int lima_clk_init(struct lima_device *dev) 81 + { 82 + int err; 83 + unsigned long bus_rate, gpu_rate; 84 + 85 + dev->clk_bus = devm_clk_get(dev->dev, "bus"); 86 + if (IS_ERR(dev->clk_bus)) { 87 + dev_err(dev->dev, "get bus clk failed %ld\n", PTR_ERR(dev->clk_bus)); 88 + return PTR_ERR(dev->clk_bus); 89 + } 90 + 91 + dev->clk_gpu = devm_clk_get(dev->dev, "core"); 92 + if (IS_ERR(dev->clk_gpu)) { 93 + dev_err(dev->dev, "get core clk failed %ld\n", PTR_ERR(dev->clk_gpu)); 94 + return PTR_ERR(dev->clk_gpu); 95 + } 96 + 97 + bus_rate = clk_get_rate(dev->clk_bus); 98 + dev_info(dev->dev, "bus rate = %lu\n", bus_rate); 99 + 100 + gpu_rate = clk_get_rate(dev->clk_gpu); 101 + dev_info(dev->dev, "mod rate = %lu", gpu_rate); 102 + 103 + err = clk_prepare_enable(dev->clk_bus); 104 + if (err) 105 + return err; 106 + 107 + err = clk_prepare_enable(dev->clk_gpu); 108 + if (err) 109 + goto error_out0; 110 + 111 + dev->reset = devm_reset_control_get_optional(dev->dev, NULL); 112 + if (IS_ERR(dev->reset)) { 113 + err = PTR_ERR(dev->reset); 114 + goto error_out1; 115 + } else if (dev->reset != NULL) { 116 + err = reset_control_deassert(dev->reset); 117 + if (err) 118 + goto error_out1; 119 + } 120 + 121 + return 0; 122 + 123 + error_out1: 124 + clk_disable_unprepare(dev->clk_gpu); 125 + error_out0: 126 + clk_disable_unprepare(dev->clk_bus); 127 + return err; 128 + } 129 + 130 + static void lima_clk_fini(struct lima_device *dev) 131 + { 132 + if (dev->reset != NULL) 133 + reset_control_assert(dev->reset); 134 + clk_disable_unprepare(dev->clk_gpu); 135 + clk_disable_unprepare(dev->clk_bus); 136 + } 137 + 138 + static int lima_regulator_init(struct lima_device *dev) 139 + { 140 + int ret; 141 + 142 + dev->regulator = devm_regulator_get_optional(dev->dev, "mali"); 143 + if (IS_ERR(dev->regulator)) { 144 + ret = PTR_ERR(dev->regulator); 145 + dev->regulator = NULL; 146 + if (ret == -ENODEV) 147 + return 0; 148 + dev_err(dev->dev, "failed to get regulator: %d\n", ret); 149 + return ret; 150 + } 151 + 152 + ret = regulator_enable(dev->regulator); 153 + if (ret < 0) { 154 + dev_err(dev->dev, "failed to enable regulator: %d\n", ret); 155 + return ret; 156 + } 157 + 158 + return 0; 159 + } 160 + 161 + static void lima_regulator_fini(struct lima_device *dev) 162 + { 163 + if (dev->regulator) 164 + regulator_disable(dev->regulator); 165 + } 166 + 167 + static int lima_init_ip(struct lima_device *dev, int index) 168 + { 169 + struct lima_ip_desc *desc = lima_ip_desc + index; 170 + struct lima_ip *ip = dev->ip + index; 171 + int offset = desc->offset[dev->id]; 172 + bool must = desc->must_have[dev->id]; 173 + int err; 174 + 175 + if (offset < 0) 176 + return 0; 177 + 178 + ip->dev = dev; 179 + ip->id = index; 180 + ip->iomem = dev->iomem + offset; 181 + if (desc->irq_name) { 182 + err = platform_get_irq_byname(dev->pdev, desc->irq_name); 183 + if (err < 0) 184 + goto out; 185 + ip->irq = err; 186 + } 187 + 188 + err = desc->init(ip); 189 + if (!err) { 190 + ip->present = true; 191 + return 0; 192 + } 193 + 194 + out: 195 + return must ? err : 0; 196 + } 197 + 198 + static void lima_fini_ip(struct lima_device *ldev, int index) 199 + { 200 + struct lima_ip_desc *desc = lima_ip_desc + index; 201 + struct lima_ip *ip = ldev->ip + index; 202 + 203 + if (ip->present) 204 + desc->fini(ip); 205 + } 206 + 207 + static int lima_init_gp_pipe(struct lima_device *dev) 208 + { 209 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp; 210 + int err; 211 + 212 + err = lima_sched_pipe_init(pipe, "gp"); 213 + if (err) 214 + return err; 215 + 216 + pipe->l2_cache[pipe->num_l2_cache++] = dev->ip + lima_ip_l2_cache0; 217 + pipe->mmu[pipe->num_mmu++] = dev->ip + lima_ip_gpmmu; 218 + pipe->processor[pipe->num_processor++] = dev->ip + lima_ip_gp; 219 + 220 + err = lima_gp_pipe_init(dev); 221 + if (err) { 222 + lima_sched_pipe_fini(pipe); 223 + return err; 224 + } 225 + 226 + return 0; 227 + } 228 + 229 + static void lima_fini_gp_pipe(struct lima_device *dev) 230 + { 231 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp; 232 + 233 + lima_gp_pipe_fini(dev); 234 + lima_sched_pipe_fini(pipe); 235 + } 236 + 237 + static int lima_init_pp_pipe(struct lima_device *dev) 238 + { 239 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; 240 + int err, i; 241 + 242 + err = lima_sched_pipe_init(pipe, "pp"); 243 + if (err) 244 + return err; 245 + 246 + for (i = 0; i < LIMA_SCHED_PIPE_MAX_PROCESSOR; i++) { 247 + struct lima_ip *pp = dev->ip + lima_ip_pp0 + i; 248 + struct lima_ip *ppmmu = dev->ip + lima_ip_ppmmu0 + i; 249 + struct lima_ip *l2_cache; 250 + 251 + if (dev->id == lima_gpu_mali400) 252 + l2_cache = dev->ip + lima_ip_l2_cache0; 253 + else 254 + l2_cache = dev->ip + lima_ip_l2_cache1 + (i >> 2); 255 + 256 + if (pp->present && ppmmu->present && l2_cache->present) { 257 + pipe->mmu[pipe->num_mmu++] = ppmmu; 258 + pipe->processor[pipe->num_processor++] = pp; 259 + if (!pipe->l2_cache[i >> 2]) 260 + pipe->l2_cache[pipe->num_l2_cache++] = l2_cache; 261 + } 262 + } 263 + 264 + if (dev->ip[lima_ip_bcast].present) { 265 + pipe->bcast_processor = dev->ip + lima_ip_pp_bcast; 266 + pipe->bcast_mmu = dev->ip + lima_ip_ppmmu_bcast; 267 + } 268 + 269 + err = lima_pp_pipe_init(dev); 270 + if (err) { 271 + lima_sched_pipe_fini(pipe); 272 + return err; 273 + } 274 + 275 + return 0; 276 + } 277 + 278 + static void lima_fini_pp_pipe(struct lima_device *dev) 279 + { 280 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; 281 + 282 + lima_pp_pipe_fini(dev); 283 + lima_sched_pipe_fini(pipe); 284 + } 285 + 286 + int lima_device_init(struct lima_device *ldev) 287 + { 288 + int err, i; 289 + struct resource *res; 290 + 291 + dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32)); 292 + 293 + err = lima_clk_init(ldev); 294 + if (err) { 295 + dev_err(ldev->dev, "clk init fail %d\n", err); 296 + return err; 297 + } 298 + 299 + err = lima_regulator_init(ldev); 300 + if (err) { 301 + dev_err(ldev->dev, "regulator init fail %d\n", err); 302 + goto err_out0; 303 + } 304 + 305 + ldev->empty_vm = lima_vm_create(ldev); 306 + if (!ldev->empty_vm) { 307 + err = -ENOMEM; 308 + goto err_out1; 309 + } 310 + 311 + ldev->va_start = 0; 312 + if (ldev->id == lima_gpu_mali450) { 313 + ldev->va_end = LIMA_VA_RESERVE_START; 314 + ldev->dlbu_cpu = dma_alloc_wc( 315 + ldev->dev, LIMA_PAGE_SIZE, 316 + &ldev->dlbu_dma, GFP_KERNEL); 317 + if (!ldev->dlbu_cpu) { 318 + err = -ENOMEM; 319 + goto err_out2; 320 + } 321 + } else 322 + ldev->va_end = LIMA_VA_RESERVE_END; 323 + 324 + res = platform_get_resource(ldev->pdev, IORESOURCE_MEM, 0); 325 + ldev->iomem = devm_ioremap_resource(ldev->dev, res); 326 + if (IS_ERR(ldev->iomem)) { 327 + dev_err(ldev->dev, "fail to ioremap iomem\n"); 328 + err = PTR_ERR(ldev->iomem); 329 + goto err_out3; 330 + } 331 + 332 + for (i = 0; i < lima_ip_num; i++) { 333 + err = lima_init_ip(ldev, i); 334 + if (err) 335 + goto err_out4; 336 + } 337 + 338 + err = lima_init_gp_pipe(ldev); 339 + if (err) 340 + goto err_out4; 341 + 342 + err = lima_init_pp_pipe(ldev); 343 + if (err) 344 + goto err_out5; 345 + 346 + return 0; 347 + 348 + err_out5: 349 + lima_fini_gp_pipe(ldev); 350 + err_out4: 351 + while (--i >= 0) 352 + lima_fini_ip(ldev, i); 353 + err_out3: 354 + if (ldev->dlbu_cpu) 355 + dma_free_wc(ldev->dev, LIMA_PAGE_SIZE, 356 + ldev->dlbu_cpu, ldev->dlbu_dma); 357 + err_out2: 358 + lima_vm_put(ldev->empty_vm); 359 + err_out1: 360 + lima_regulator_fini(ldev); 361 + err_out0: 362 + lima_clk_fini(ldev); 363 + return err; 364 + } 365 + 366 + void lima_device_fini(struct lima_device *ldev) 367 + { 368 + int i; 369 + 370 + lima_fini_pp_pipe(ldev); 371 + lima_fini_gp_pipe(ldev); 372 + 373 + for (i = lima_ip_num - 1; i >= 0; i--) 374 + lima_fini_ip(ldev, i); 375 + 376 + if (ldev->dlbu_cpu) 377 + dma_free_wc(ldev->dev, LIMA_PAGE_SIZE, 378 + ldev->dlbu_cpu, ldev->dlbu_dma); 379 + 380 + lima_vm_put(ldev->empty_vm); 381 + 382 + lima_regulator_fini(ldev); 383 + 384 + lima_clk_fini(ldev); 385 + }
+131
drivers/gpu/drm/lima/lima_device.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_DEVICE_H__ 5 + #define __LIMA_DEVICE_H__ 6 + 7 + #include <drm/drm_device.h> 8 + #include <linux/delay.h> 9 + 10 + #include "lima_sched.h" 11 + 12 + enum lima_gpu_id { 13 + lima_gpu_mali400 = 0, 14 + lima_gpu_mali450, 15 + lima_gpu_num, 16 + }; 17 + 18 + enum lima_ip_id { 19 + lima_ip_pmu, 20 + lima_ip_gpmmu, 21 + lima_ip_ppmmu0, 22 + lima_ip_ppmmu1, 23 + lima_ip_ppmmu2, 24 + lima_ip_ppmmu3, 25 + lima_ip_ppmmu4, 26 + lima_ip_ppmmu5, 27 + lima_ip_ppmmu6, 28 + lima_ip_ppmmu7, 29 + lima_ip_gp, 30 + lima_ip_pp0, 31 + lima_ip_pp1, 32 + lima_ip_pp2, 33 + lima_ip_pp3, 34 + lima_ip_pp4, 35 + lima_ip_pp5, 36 + lima_ip_pp6, 37 + lima_ip_pp7, 38 + lima_ip_l2_cache0, 39 + lima_ip_l2_cache1, 40 + lima_ip_l2_cache2, 41 + lima_ip_dlbu, 42 + lima_ip_bcast, 43 + lima_ip_pp_bcast, 44 + lima_ip_ppmmu_bcast, 45 + lima_ip_num, 46 + }; 47 + 48 + struct lima_device; 49 + 50 + struct lima_ip { 51 + struct lima_device *dev; 52 + enum lima_ip_id id; 53 + bool present; 54 + 55 + void __iomem *iomem; 56 + int irq; 57 + 58 + union { 59 + /* gp/pp */ 60 + bool async_reset; 61 + /* l2 cache */ 62 + spinlock_t lock; 63 + } data; 64 + }; 65 + 66 + enum lima_pipe_id { 67 + lima_pipe_gp, 68 + lima_pipe_pp, 69 + lima_pipe_num, 70 + }; 71 + 72 + struct lima_device { 73 + struct device *dev; 74 + struct drm_device *ddev; 75 + struct platform_device *pdev; 76 + 77 + enum lima_gpu_id id; 78 + u32 gp_version; 79 + u32 pp_version; 80 + int num_pp; 81 + 82 + void __iomem *iomem; 83 + struct clk *clk_bus; 84 + struct clk *clk_gpu; 85 + struct reset_control *reset; 86 + struct regulator *regulator; 87 + 88 + struct lima_ip ip[lima_ip_num]; 89 + struct lima_sched_pipe pipe[lima_pipe_num]; 90 + 91 + struct lima_vm *empty_vm; 92 + uint64_t va_start; 93 + uint64_t va_end; 94 + 95 + u32 *dlbu_cpu; 96 + dma_addr_t dlbu_dma; 97 + }; 98 + 99 + static inline struct lima_device * 100 + to_lima_dev(struct drm_device *dev) 101 + { 102 + return dev->dev_private; 103 + } 104 + 105 + int lima_device_init(struct lima_device *ldev); 106 + void lima_device_fini(struct lima_device *ldev); 107 + 108 + const char *lima_ip_name(struct lima_ip *ip); 109 + 110 + typedef int (*lima_poll_func_t)(struct lima_ip *); 111 + 112 + static inline int lima_poll_timeout(struct lima_ip *ip, lima_poll_func_t func, 113 + int sleep_us, int timeout_us) 114 + { 115 + ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); 116 + 117 + might_sleep_if(sleep_us); 118 + while (1) { 119 + if (func(ip)) 120 + return 0; 121 + 122 + if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) 123 + return -ETIMEDOUT; 124 + 125 + if (sleep_us) 126 + usleep_range((sleep_us >> 2) + 1, sleep_us); 127 + } 128 + return 0; 129 + } 130 + 131 + #endif
+58
drivers/gpu/drm/lima/lima_dlbu.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/io.h> 5 + #include <linux/device.h> 6 + 7 + #include "lima_device.h" 8 + #include "lima_dlbu.h" 9 + #include "lima_vm.h" 10 + #include "lima_regs.h" 11 + 12 + #define dlbu_write(reg, data) writel(data, ip->iomem + reg) 13 + #define dlbu_read(reg) readl(ip->iomem + reg) 14 + 15 + void lima_dlbu_enable(struct lima_device *dev, int num_pp) 16 + { 17 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; 18 + struct lima_ip *ip = dev->ip + lima_ip_dlbu; 19 + int i, mask = 0; 20 + 21 + for (i = 0; i < num_pp; i++) { 22 + struct lima_ip *pp = pipe->processor[i]; 23 + 24 + mask |= 1 << (pp->id - lima_ip_pp0); 25 + } 26 + 27 + dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, mask); 28 + } 29 + 30 + void lima_dlbu_disable(struct lima_device *dev) 31 + { 32 + struct lima_ip *ip = dev->ip + lima_ip_dlbu; 33 + 34 + dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, 0); 35 + } 36 + 37 + void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg) 38 + { 39 + dlbu_write(LIMA_DLBU_TLLIST_VBASEADDR, reg[0]); 40 + dlbu_write(LIMA_DLBU_FB_DIM, reg[1]); 41 + dlbu_write(LIMA_DLBU_TLLIST_CONF, reg[2]); 42 + dlbu_write(LIMA_DLBU_START_TILE_POS, reg[3]); 43 + } 44 + 45 + int lima_dlbu_init(struct lima_ip *ip) 46 + { 47 + struct lima_device *dev = ip->dev; 48 + 49 + dlbu_write(LIMA_DLBU_MASTER_TLLIST_PHYS_ADDR, dev->dlbu_dma | 1); 50 + dlbu_write(LIMA_DLBU_MASTER_TLLIST_VADDR, LIMA_VA_RESERVE_DLBU); 51 + 52 + return 0; 53 + } 54 + 55 + void lima_dlbu_fini(struct lima_ip *ip) 56 + { 57 + 58 + }
+18
drivers/gpu/drm/lima/lima_dlbu.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_DLBU_H__ 5 + #define __LIMA_DLBU_H__ 6 + 7 + struct lima_ip; 8 + struct lima_device; 9 + 10 + void lima_dlbu_enable(struct lima_device *dev, int num_pp); 11 + void lima_dlbu_disable(struct lima_device *dev); 12 + 13 + void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg); 14 + 15 + int lima_dlbu_init(struct lima_ip *ip); 16 + void lima_dlbu_fini(struct lima_ip *ip); 17 + 18 + #endif
+376
drivers/gpu/drm/lima/lima_drv.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/module.h> 5 + #include <linux/of_platform.h> 6 + #include <linux/uaccess.h> 7 + #include <linux/slab.h> 8 + #include <drm/drm_ioctl.h> 9 + #include <drm/drm_drv.h> 10 + #include <drm/drm_prime.h> 11 + #include <drm/lima_drm.h> 12 + 13 + #include "lima_drv.h" 14 + #include "lima_gem.h" 15 + #include "lima_gem_prime.h" 16 + #include "lima_vm.h" 17 + 18 + int lima_sched_timeout_ms; 19 + 20 + MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms (0 = no timeout (default))"); 21 + module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444); 22 + 23 + static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file) 24 + { 25 + struct drm_lima_get_param *args = data; 26 + struct lima_device *ldev = to_lima_dev(dev); 27 + 28 + if (args->pad) 29 + return -EINVAL; 30 + 31 + switch (args->param) { 32 + case DRM_LIMA_PARAM_GPU_ID: 33 + switch (ldev->id) { 34 + case lima_gpu_mali400: 35 + args->value = DRM_LIMA_PARAM_GPU_ID_MALI400; 36 + break; 37 + case lima_gpu_mali450: 38 + args->value = DRM_LIMA_PARAM_GPU_ID_MALI450; 39 + break; 40 + default: 41 + args->value = DRM_LIMA_PARAM_GPU_ID_UNKNOWN; 42 + break; 43 + } 44 + break; 45 + 46 + case DRM_LIMA_PARAM_NUM_PP: 47 + args->value = ldev->pipe[lima_pipe_pp].num_processor; 48 + break; 49 + 50 + case DRM_LIMA_PARAM_GP_VERSION: 51 + args->value = ldev->gp_version; 52 + break; 53 + 54 + case DRM_LIMA_PARAM_PP_VERSION: 55 + args->value = ldev->pp_version; 56 + break; 57 + 58 + default: 59 + return -EINVAL; 60 + } 61 + 62 + return 0; 63 + } 64 + 65 + static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_file *file) 66 + { 67 + struct drm_lima_gem_create *args = data; 68 + 69 + if (args->pad) 70 + return -EINVAL; 71 + 72 + if (args->flags) 73 + return -EINVAL; 74 + 75 + if (args->size == 0) 76 + return -EINVAL; 77 + 78 + return lima_gem_create_handle(dev, file, args->size, args->flags, &args->handle); 79 + } 80 + 81 + static int lima_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file) 82 + { 83 + struct drm_lima_gem_info *args = data; 84 + 85 + return lima_gem_get_info(file, args->handle, &args->va, &args->offset); 86 + } 87 + 88 + static int lima_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file) 89 + { 90 + struct drm_lima_gem_submit *args = data; 91 + struct lima_device *ldev = to_lima_dev(dev); 92 + struct lima_drm_priv *priv = file->driver_priv; 93 + struct drm_lima_gem_submit_bo *bos; 94 + struct lima_sched_pipe *pipe; 95 + struct lima_sched_task *task; 96 + struct lima_ctx *ctx; 97 + struct lima_submit submit = {0}; 98 + size_t size; 99 + int err = 0; 100 + 101 + if (args->pipe >= lima_pipe_num || args->nr_bos == 0) 102 + return -EINVAL; 103 + 104 + if (args->flags & ~(LIMA_SUBMIT_FLAG_EXPLICIT_FENCE)) 105 + return -EINVAL; 106 + 107 + pipe = ldev->pipe + args->pipe; 108 + if (args->frame_size != pipe->frame_size) 109 + return -EINVAL; 110 + 111 + bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL); 112 + if (!bos) 113 + return -ENOMEM; 114 + 115 + size = args->nr_bos * sizeof(*submit.bos); 116 + if (copy_from_user(bos, u64_to_user_ptr(args->bos), size)) { 117 + err = -EFAULT; 118 + goto out0; 119 + } 120 + 121 + task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL); 122 + if (!task) { 123 + err = -ENOMEM; 124 + goto out0; 125 + } 126 + 127 + task->frame = task + 1; 128 + if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) { 129 + err = -EFAULT; 130 + goto out1; 131 + } 132 + 133 + err = pipe->task_validate(pipe, task); 134 + if (err) 135 + goto out1; 136 + 137 + ctx = lima_ctx_get(&priv->ctx_mgr, args->ctx); 138 + if (!ctx) { 139 + err = -ENOENT; 140 + goto out1; 141 + } 142 + 143 + submit.pipe = args->pipe; 144 + submit.bos = bos; 145 + submit.lbos = (void *)bos + size; 146 + submit.nr_bos = args->nr_bos; 147 + submit.task = task; 148 + submit.ctx = ctx; 149 + submit.flags = args->flags; 150 + submit.in_sync[0] = args->in_sync[0]; 151 + submit.in_sync[1] = args->in_sync[1]; 152 + submit.out_sync = args->out_sync; 153 + 154 + err = lima_gem_submit(file, &submit); 155 + 156 + lima_ctx_put(ctx); 157 + out1: 158 + if (err) 159 + kmem_cache_free(pipe->task_slab, task); 160 + out0: 161 + kvfree(bos); 162 + return err; 163 + } 164 + 165 + static int lima_ioctl_gem_wait(struct drm_device *dev, void *data, struct drm_file *file) 166 + { 167 + struct drm_lima_gem_wait *args = data; 168 + 169 + if (args->op & ~(LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE)) 170 + return -EINVAL; 171 + 172 + return lima_gem_wait(file, args->handle, args->op, args->timeout_ns); 173 + } 174 + 175 + static int lima_ioctl_ctx_create(struct drm_device *dev, void *data, struct drm_file *file) 176 + { 177 + struct drm_lima_ctx_create *args = data; 178 + struct lima_drm_priv *priv = file->driver_priv; 179 + struct lima_device *ldev = to_lima_dev(dev); 180 + 181 + if (args->_pad) 182 + return -EINVAL; 183 + 184 + return lima_ctx_create(ldev, &priv->ctx_mgr, &args->id); 185 + } 186 + 187 + static int lima_ioctl_ctx_free(struct drm_device *dev, void *data, struct drm_file *file) 188 + { 189 + struct drm_lima_ctx_create *args = data; 190 + struct lima_drm_priv *priv = file->driver_priv; 191 + 192 + if (args->_pad) 193 + return -EINVAL; 194 + 195 + return lima_ctx_free(&priv->ctx_mgr, args->id); 196 + } 197 + 198 + static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file) 199 + { 200 + int err; 201 + struct lima_drm_priv *priv; 202 + struct lima_device *ldev = to_lima_dev(dev); 203 + 204 + priv = kzalloc(sizeof(*priv), GFP_KERNEL); 205 + if (!priv) 206 + return -ENOMEM; 207 + 208 + priv->vm = lima_vm_create(ldev); 209 + if (!priv->vm) { 210 + err = -ENOMEM; 211 + goto err_out0; 212 + } 213 + 214 + lima_ctx_mgr_init(&priv->ctx_mgr); 215 + 216 + file->driver_priv = priv; 217 + return 0; 218 + 219 + err_out0: 220 + kfree(priv); 221 + return err; 222 + } 223 + 224 + static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file) 225 + { 226 + struct lima_drm_priv *priv = file->driver_priv; 227 + 228 + lima_ctx_mgr_fini(&priv->ctx_mgr); 229 + lima_vm_put(priv->vm); 230 + kfree(priv); 231 + } 232 + 233 + static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = { 234 + DRM_IOCTL_DEF_DRV(LIMA_GET_PARAM, lima_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW), 235 + DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_AUTH|DRM_RENDER_ALLOW), 236 + DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW), 237 + DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), 238 + DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_AUTH|DRM_RENDER_ALLOW), 239 + DRM_IOCTL_DEF_DRV(LIMA_CTX_CREATE, lima_ioctl_ctx_create, DRM_AUTH|DRM_RENDER_ALLOW), 240 + DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_AUTH|DRM_RENDER_ALLOW), 241 + }; 242 + 243 + static const struct file_operations lima_drm_driver_fops = { 244 + .owner = THIS_MODULE, 245 + .open = drm_open, 246 + .release = drm_release, 247 + .unlocked_ioctl = drm_ioctl, 248 + #ifdef CONFIG_COMPAT 249 + .compat_ioctl = drm_compat_ioctl, 250 + #endif 251 + .mmap = lima_gem_mmap, 252 + }; 253 + 254 + static struct drm_driver lima_drm_driver = { 255 + .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME | DRIVER_SYNCOBJ, 256 + .open = lima_drm_driver_open, 257 + .postclose = lima_drm_driver_postclose, 258 + .ioctls = lima_drm_driver_ioctls, 259 + .num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls), 260 + .fops = &lima_drm_driver_fops, 261 + .gem_free_object_unlocked = lima_gem_free_object, 262 + .gem_open_object = lima_gem_object_open, 263 + .gem_close_object = lima_gem_object_close, 264 + .gem_vm_ops = &lima_gem_vm_ops, 265 + .name = "lima", 266 + .desc = "lima DRM", 267 + .date = "20190217", 268 + .major = 1, 269 + .minor = 0, 270 + .patchlevel = 0, 271 + 272 + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 273 + .gem_prime_import_sg_table = lima_gem_prime_import_sg_table, 274 + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 275 + .gem_prime_get_sg_table = lima_gem_prime_get_sg_table, 276 + .gem_prime_mmap = lima_gem_prime_mmap, 277 + }; 278 + 279 + static int lima_pdev_probe(struct platform_device *pdev) 280 + { 281 + struct lima_device *ldev; 282 + struct drm_device *ddev; 283 + int err; 284 + 285 + err = lima_sched_slab_init(); 286 + if (err) 287 + return err; 288 + 289 + ldev = devm_kzalloc(&pdev->dev, sizeof(*ldev), GFP_KERNEL); 290 + if (!ldev) { 291 + err = -ENOMEM; 292 + goto err_out0; 293 + } 294 + 295 + ldev->pdev = pdev; 296 + ldev->dev = &pdev->dev; 297 + ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev); 298 + 299 + platform_set_drvdata(pdev, ldev); 300 + 301 + /* Allocate and initialize the DRM device. */ 302 + ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev); 303 + if (IS_ERR(ddev)) 304 + return PTR_ERR(ddev); 305 + 306 + ddev->dev_private = ldev; 307 + ldev->ddev = ddev; 308 + 309 + err = lima_device_init(ldev); 310 + if (err) { 311 + dev_err(&pdev->dev, "Fatal error during GPU init\n"); 312 + goto err_out1; 313 + } 314 + 315 + /* 316 + * Register the DRM device with the core and the connectors with 317 + * sysfs. 318 + */ 319 + err = drm_dev_register(ddev, 0); 320 + if (err < 0) 321 + goto err_out2; 322 + 323 + return 0; 324 + 325 + err_out2: 326 + lima_device_fini(ldev); 327 + err_out1: 328 + drm_dev_put(ddev); 329 + err_out0: 330 + lima_sched_slab_fini(); 331 + return err; 332 + } 333 + 334 + static int lima_pdev_remove(struct platform_device *pdev) 335 + { 336 + struct lima_device *ldev = platform_get_drvdata(pdev); 337 + struct drm_device *ddev = ldev->ddev; 338 + 339 + drm_dev_unregister(ddev); 340 + lima_device_fini(ldev); 341 + drm_dev_put(ddev); 342 + lima_sched_slab_fini(); 343 + return 0; 344 + } 345 + 346 + static const struct of_device_id dt_match[] = { 347 + { .compatible = "arm,mali-400", .data = (void *)lima_gpu_mali400 }, 348 + { .compatible = "arm,mali-450", .data = (void *)lima_gpu_mali450 }, 349 + {} 350 + }; 351 + MODULE_DEVICE_TABLE(of, dt_match); 352 + 353 + static struct platform_driver lima_platform_driver = { 354 + .probe = lima_pdev_probe, 355 + .remove = lima_pdev_remove, 356 + .driver = { 357 + .name = "lima", 358 + .of_match_table = dt_match, 359 + }, 360 + }; 361 + 362 + static int __init lima_init(void) 363 + { 364 + return platform_driver_register(&lima_platform_driver); 365 + } 366 + module_init(lima_init); 367 + 368 + static void __exit lima_exit(void) 369 + { 370 + platform_driver_unregister(&lima_platform_driver); 371 + } 372 + module_exit(lima_exit); 373 + 374 + MODULE_AUTHOR("Lima Project Developers"); 375 + MODULE_DESCRIPTION("Lima DRM Driver"); 376 + MODULE_LICENSE("GPL v2");
+45
drivers/gpu/drm/lima/lima_drv.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_DRV_H__ 5 + #define __LIMA_DRV_H__ 6 + 7 + #include <drm/drm_file.h> 8 + 9 + #include "lima_ctx.h" 10 + 11 + extern int lima_sched_timeout_ms; 12 + 13 + struct lima_vm; 14 + struct lima_bo; 15 + struct lima_sched_task; 16 + 17 + struct drm_lima_gem_submit_bo; 18 + 19 + struct lima_drm_priv { 20 + struct lima_vm *vm; 21 + struct lima_ctx_mgr ctx_mgr; 22 + }; 23 + 24 + struct lima_submit { 25 + struct lima_ctx *ctx; 26 + int pipe; 27 + u32 flags; 28 + 29 + struct drm_lima_gem_submit_bo *bos; 30 + struct lima_bo **lbos; 31 + u32 nr_bos; 32 + 33 + u32 in_sync[2]; 34 + u32 out_sync; 35 + 36 + struct lima_sched_task *task; 37 + }; 38 + 39 + static inline struct lima_drm_priv * 40 + to_lima_drm_priv(struct drm_file *file) 41 + { 42 + return file->driver_priv; 43 + } 44 + 45 + #endif
+381
drivers/gpu/drm/lima/lima_gem.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/sync_file.h> 5 + #include <linux/pfn_t.h> 6 + 7 + #include <drm/drm_file.h> 8 + #include <drm/drm_syncobj.h> 9 + #include <drm/drm_utils.h> 10 + 11 + #include <drm/lima_drm.h> 12 + 13 + #include "lima_drv.h" 14 + #include "lima_gem.h" 15 + #include "lima_gem_prime.h" 16 + #include "lima_vm.h" 17 + #include "lima_object.h" 18 + 19 + int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, 20 + u32 size, u32 flags, u32 *handle) 21 + { 22 + int err; 23 + struct lima_bo *bo; 24 + struct lima_device *ldev = to_lima_dev(dev); 25 + 26 + bo = lima_bo_create(ldev, size, flags, NULL, NULL); 27 + if (IS_ERR(bo)) 28 + return PTR_ERR(bo); 29 + 30 + err = drm_gem_handle_create(file, &bo->gem, handle); 31 + 32 + /* drop reference from allocate - handle holds it now */ 33 + drm_gem_object_put_unlocked(&bo->gem); 34 + 35 + return err; 36 + } 37 + 38 + void lima_gem_free_object(struct drm_gem_object *obj) 39 + { 40 + struct lima_bo *bo = to_lima_bo(obj); 41 + 42 + if (!list_empty(&bo->va)) 43 + dev_err(obj->dev->dev, "lima gem free bo still has va\n"); 44 + 45 + lima_bo_destroy(bo); 46 + } 47 + 48 + int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) 49 + { 50 + struct lima_bo *bo = to_lima_bo(obj); 51 + struct lima_drm_priv *priv = to_lima_drm_priv(file); 52 + struct lima_vm *vm = priv->vm; 53 + 54 + return lima_vm_bo_add(vm, bo, true); 55 + } 56 + 57 + void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file) 58 + { 59 + struct lima_bo *bo = to_lima_bo(obj); 60 + struct lima_drm_priv *priv = to_lima_drm_priv(file); 61 + struct lima_vm *vm = priv->vm; 62 + 63 + lima_vm_bo_del(vm, bo); 64 + } 65 + 66 + int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset) 67 + { 68 + struct drm_gem_object *obj; 69 + struct lima_bo *bo; 70 + struct lima_drm_priv *priv = to_lima_drm_priv(file); 71 + struct lima_vm *vm = priv->vm; 72 + int err; 73 + 74 + obj = drm_gem_object_lookup(file, handle); 75 + if (!obj) 76 + return -ENOENT; 77 + 78 + bo = to_lima_bo(obj); 79 + 80 + *va = lima_vm_get_va(vm, bo); 81 + 82 + err = drm_gem_create_mmap_offset(obj); 83 + if (!err) 84 + *offset = drm_vma_node_offset_addr(&obj->vma_node); 85 + 86 + drm_gem_object_put_unlocked(obj); 87 + return err; 88 + } 89 + 90 + static vm_fault_t lima_gem_fault(struct vm_fault *vmf) 91 + { 92 + struct vm_area_struct *vma = vmf->vma; 93 + struct drm_gem_object *obj = vma->vm_private_data; 94 + struct lima_bo *bo = to_lima_bo(obj); 95 + pfn_t pfn; 96 + pgoff_t pgoff; 97 + 98 + /* We don't use vmf->pgoff since that has the fake offset: */ 99 + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 100 + pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV); 101 + 102 + return vmf_insert_mixed(vma, vmf->address, pfn); 103 + } 104 + 105 + const struct vm_operations_struct lima_gem_vm_ops = { 106 + .fault = lima_gem_fault, 107 + .open = drm_gem_vm_open, 108 + .close = drm_gem_vm_close, 109 + }; 110 + 111 + void lima_set_vma_flags(struct vm_area_struct *vma) 112 + { 113 + pgprot_t prot = vm_get_page_prot(vma->vm_flags); 114 + 115 + vma->vm_flags |= VM_MIXEDMAP; 116 + vma->vm_flags &= ~VM_PFNMAP; 117 + vma->vm_page_prot = pgprot_writecombine(prot); 118 + } 119 + 120 + int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma) 121 + { 122 + int ret; 123 + 124 + ret = drm_gem_mmap(filp, vma); 125 + if (ret) 126 + return ret; 127 + 128 + lima_set_vma_flags(vma); 129 + return 0; 130 + } 131 + 132 + static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, 133 + bool write, bool explicit) 134 + { 135 + int err = 0; 136 + 137 + if (!write) { 138 + err = reservation_object_reserve_shared(bo->gem.resv, 1); 139 + if (err) 140 + return err; 141 + } 142 + 143 + /* explicit sync use user passed dep fence */ 144 + if (explicit) 145 + return 0; 146 + 147 + /* implicit sync use bo fence in resv obj */ 148 + if (write) { 149 + unsigned nr_fences; 150 + struct dma_fence **fences; 151 + int i; 152 + 153 + err = reservation_object_get_fences_rcu( 154 + bo->gem.resv, NULL, &nr_fences, &fences); 155 + if (err || !nr_fences) 156 + return err; 157 + 158 + for (i = 0; i < nr_fences; i++) { 159 + err = lima_sched_task_add_dep(task, fences[i]); 160 + if (err) 161 + break; 162 + } 163 + 164 + /* for error case free remaining fences */ 165 + for ( ; i < nr_fences; i++) 166 + dma_fence_put(fences[i]); 167 + 168 + kfree(fences); 169 + } else { 170 + struct dma_fence *fence; 171 + 172 + fence = reservation_object_get_excl_rcu(bo->gem.resv); 173 + if (fence) { 174 + err = lima_sched_task_add_dep(task, fence); 175 + if (err) 176 + dma_fence_put(fence); 177 + } 178 + } 179 + 180 + return err; 181 + } 182 + 183 + static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos, 184 + struct ww_acquire_ctx *ctx) 185 + { 186 + int i, ret = 0, contended, slow_locked = -1; 187 + 188 + ww_acquire_init(ctx, &reservation_ww_class); 189 + 190 + retry: 191 + for (i = 0; i < nr_bos; i++) { 192 + if (i == slow_locked) { 193 + slow_locked = -1; 194 + continue; 195 + } 196 + 197 + ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx); 198 + if (ret < 0) { 199 + contended = i; 200 + goto err; 201 + } 202 + } 203 + 204 + ww_acquire_done(ctx); 205 + return 0; 206 + 207 + err: 208 + for (i--; i >= 0; i--) 209 + ww_mutex_unlock(&bos[i]->gem.resv->lock); 210 + 211 + if (slow_locked >= 0) 212 + ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock); 213 + 214 + if (ret == -EDEADLK) { 215 + /* we lost out in a seqno race, lock and retry.. */ 216 + ret = ww_mutex_lock_slow_interruptible( 217 + &bos[contended]->gem.resv->lock, ctx); 218 + if (!ret) { 219 + slow_locked = contended; 220 + goto retry; 221 + } 222 + } 223 + ww_acquire_fini(ctx); 224 + 225 + return ret; 226 + } 227 + 228 + static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos, 229 + struct ww_acquire_ctx *ctx) 230 + { 231 + int i; 232 + 233 + for (i = 0; i < nr_bos; i++) 234 + ww_mutex_unlock(&bos[i]->gem.resv->lock); 235 + ww_acquire_fini(ctx); 236 + } 237 + 238 + static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) 239 + { 240 + int i, err; 241 + 242 + for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) { 243 + struct dma_fence *fence = NULL; 244 + 245 + if (!submit->in_sync[i]) 246 + continue; 247 + 248 + err = drm_syncobj_find_fence(file, submit->in_sync[i], 249 + 0, 0, &fence); 250 + if (err) 251 + return err; 252 + 253 + err = lima_sched_task_add_dep(submit->task, fence); 254 + if (err) { 255 + dma_fence_put(fence); 256 + return err; 257 + } 258 + } 259 + 260 + return 0; 261 + } 262 + 263 + int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) 264 + { 265 + int i, err = 0; 266 + struct ww_acquire_ctx ctx; 267 + struct lima_drm_priv *priv = to_lima_drm_priv(file); 268 + struct lima_vm *vm = priv->vm; 269 + struct drm_syncobj *out_sync = NULL; 270 + struct dma_fence *fence; 271 + struct lima_bo **bos = submit->lbos; 272 + 273 + if (submit->out_sync) { 274 + out_sync = drm_syncobj_find(file, submit->out_sync); 275 + if (!out_sync) 276 + return -ENOENT; 277 + } 278 + 279 + for (i = 0; i < submit->nr_bos; i++) { 280 + struct drm_gem_object *obj; 281 + struct lima_bo *bo; 282 + 283 + obj = drm_gem_object_lookup(file, submit->bos[i].handle); 284 + if (!obj) { 285 + err = -ENOENT; 286 + goto err_out0; 287 + } 288 + 289 + bo = to_lima_bo(obj); 290 + 291 + /* increase refcnt of gpu va map to prevent unmapped when executing, 292 + * will be decreased when task done 293 + */ 294 + err = lima_vm_bo_add(vm, bo, false); 295 + if (err) { 296 + drm_gem_object_put_unlocked(obj); 297 + goto err_out0; 298 + } 299 + 300 + bos[i] = bo; 301 + } 302 + 303 + err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx); 304 + if (err) 305 + goto err_out0; 306 + 307 + err = lima_sched_task_init( 308 + submit->task, submit->ctx->context + submit->pipe, 309 + bos, submit->nr_bos, vm); 310 + if (err) 311 + goto err_out1; 312 + 313 + err = lima_gem_add_deps(file, submit); 314 + if (err) 315 + goto err_out2; 316 + 317 + for (i = 0; i < submit->nr_bos; i++) { 318 + err = lima_gem_sync_bo( 319 + submit->task, bos[i], 320 + submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE, 321 + submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE); 322 + if (err) 323 + goto err_out2; 324 + } 325 + 326 + fence = lima_sched_context_queue_task( 327 + submit->ctx->context + submit->pipe, submit->task); 328 + 329 + for (i = 0; i < submit->nr_bos; i++) { 330 + if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) 331 + reservation_object_add_excl_fence(bos[i]->gem.resv, fence); 332 + else 333 + reservation_object_add_shared_fence(bos[i]->gem.resv, fence); 334 + } 335 + 336 + lima_gem_unlock_bos(bos, submit->nr_bos, &ctx); 337 + 338 + for (i = 0; i < submit->nr_bos; i++) 339 + drm_gem_object_put_unlocked(&bos[i]->gem); 340 + 341 + if (out_sync) { 342 + drm_syncobj_replace_fence(out_sync, fence); 343 + drm_syncobj_put(out_sync); 344 + } 345 + 346 + dma_fence_put(fence); 347 + 348 + return 0; 349 + 350 + err_out2: 351 + lima_sched_task_fini(submit->task); 352 + err_out1: 353 + lima_gem_unlock_bos(bos, submit->nr_bos, &ctx); 354 + err_out0: 355 + for (i = 0; i < submit->nr_bos; i++) { 356 + if (!bos[i]) 357 + break; 358 + lima_vm_bo_del(vm, bos[i]); 359 + drm_gem_object_put_unlocked(&bos[i]->gem); 360 + } 361 + if (out_sync) 362 + drm_syncobj_put(out_sync); 363 + return err; 364 + } 365 + 366 + int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns) 367 + { 368 + bool write = op & LIMA_GEM_WAIT_WRITE; 369 + long ret, timeout; 370 + 371 + if (!op) 372 + return 0; 373 + 374 + timeout = drm_timeout_abs_to_jiffies(timeout_ns); 375 + 376 + ret = drm_gem_reservation_object_wait(file, handle, write, timeout); 377 + if (ret == 0) 378 + ret = timeout ? -ETIMEDOUT : -EBUSY; 379 + 380 + return ret; 381 + }
+25
drivers/gpu/drm/lima/lima_gem.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_GEM_H__ 5 + #define __LIMA_GEM_H__ 6 + 7 + struct lima_bo; 8 + struct lima_submit; 9 + 10 + extern const struct vm_operations_struct lima_gem_vm_ops; 11 + 12 + struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags); 13 + int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, 14 + u32 size, u32 flags, u32 *handle); 15 + void lima_gem_free_object(struct drm_gem_object *obj); 16 + int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file); 17 + void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file); 18 + int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset); 19 + int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma); 20 + int lima_gem_submit(struct drm_file *file, struct lima_submit *submit); 21 + int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns); 22 + 23 + void lima_set_vma_flags(struct vm_area_struct *vma); 24 + 25 + #endif
+47
drivers/gpu/drm/lima/lima_gem_prime.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/dma-buf.h> 5 + #include <drm/drm_prime.h> 6 + #include <drm/drm_drv.h> 7 + #include <drm/drm_file.h> 8 + 9 + #include "lima_device.h" 10 + #include "lima_object.h" 11 + #include "lima_gem.h" 12 + #include "lima_gem_prime.h" 13 + 14 + struct drm_gem_object *lima_gem_prime_import_sg_table( 15 + struct drm_device *dev, struct dma_buf_attachment *attach, 16 + struct sg_table *sgt) 17 + { 18 + struct lima_device *ldev = to_lima_dev(dev); 19 + struct lima_bo *bo; 20 + 21 + bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt, 22 + attach->dmabuf->resv); 23 + if (IS_ERR(bo)) 24 + return ERR_CAST(bo); 25 + 26 + return &bo->gem; 27 + } 28 + 29 + struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj) 30 + { 31 + struct lima_bo *bo = to_lima_bo(obj); 32 + int npages = obj->size >> PAGE_SHIFT; 33 + 34 + return drm_prime_pages_to_sg(bo->pages, npages); 35 + } 36 + 37 + int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 38 + { 39 + int ret; 40 + 41 + ret = drm_gem_mmap_obj(obj, obj->size, vma); 42 + if (ret) 43 + return ret; 44 + 45 + lima_set_vma_flags(vma); 46 + return 0; 47 + }
+13
drivers/gpu/drm/lima/lima_gem_prime.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_GEM_PRIME_H__ 5 + #define __LIMA_GEM_PRIME_H__ 6 + 7 + struct drm_gem_object *lima_gem_prime_import_sg_table( 8 + struct drm_device *dev, struct dma_buf_attachment *attach, 9 + struct sg_table *sgt); 10 + struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj); 11 + int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 12 + 13 + #endif
+283
drivers/gpu/drm/lima/lima_gp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/interrupt.h> 5 + #include <linux/iopoll.h> 6 + #include <linux/device.h> 7 + #include <linux/slab.h> 8 + 9 + #include <drm/lima_drm.h> 10 + 11 + #include "lima_device.h" 12 + #include "lima_gp.h" 13 + #include "lima_regs.h" 14 + 15 + #define gp_write(reg, data) writel(data, ip->iomem + reg) 16 + #define gp_read(reg) readl(ip->iomem + reg) 17 + 18 + static irqreturn_t lima_gp_irq_handler(int irq, void *data) 19 + { 20 + struct lima_ip *ip = data; 21 + struct lima_device *dev = ip->dev; 22 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp; 23 + u32 state = gp_read(LIMA_GP_INT_STAT); 24 + u32 status = gp_read(LIMA_GP_STATUS); 25 + bool done = false; 26 + 27 + /* for shared irq case */ 28 + if (!state) 29 + return IRQ_NONE; 30 + 31 + if (state & LIMA_GP_IRQ_MASK_ERROR) { 32 + dev_err(dev->dev, "gp error irq state=%x status=%x\n", 33 + state, status); 34 + 35 + /* mask all interrupts before hard reset */ 36 + gp_write(LIMA_GP_INT_MASK, 0); 37 + 38 + pipe->error = true; 39 + done = true; 40 + } else { 41 + bool valid = state & (LIMA_GP_IRQ_VS_END_CMD_LST | 42 + LIMA_GP_IRQ_PLBU_END_CMD_LST); 43 + bool active = status & (LIMA_GP_STATUS_VS_ACTIVE | 44 + LIMA_GP_STATUS_PLBU_ACTIVE); 45 + done = valid && !active; 46 + } 47 + 48 + gp_write(LIMA_GP_INT_CLEAR, state); 49 + 50 + if (done) 51 + lima_sched_pipe_task_done(pipe); 52 + 53 + return IRQ_HANDLED; 54 + } 55 + 56 + static void lima_gp_soft_reset_async(struct lima_ip *ip) 57 + { 58 + if (ip->data.async_reset) 59 + return; 60 + 61 + gp_write(LIMA_GP_INT_MASK, 0); 62 + gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_RESET_COMPLETED); 63 + gp_write(LIMA_GP_CMD, LIMA_GP_CMD_SOFT_RESET); 64 + ip->data.async_reset = true; 65 + } 66 + 67 + static int lima_gp_soft_reset_async_wait(struct lima_ip *ip) 68 + { 69 + struct lima_device *dev = ip->dev; 70 + int err; 71 + u32 v; 72 + 73 + if (!ip->data.async_reset) 74 + return 0; 75 + 76 + err = readl_poll_timeout(ip->iomem + LIMA_GP_INT_RAWSTAT, v, 77 + v & LIMA_GP_IRQ_RESET_COMPLETED, 78 + 0, 100); 79 + if (err) { 80 + dev_err(dev->dev, "gp soft reset time out\n"); 81 + return err; 82 + } 83 + 84 + gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL); 85 + gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED); 86 + 87 + ip->data.async_reset = false; 88 + return 0; 89 + } 90 + 91 + static int lima_gp_task_validate(struct lima_sched_pipe *pipe, 92 + struct lima_sched_task *task) 93 + { 94 + struct drm_lima_gp_frame *frame = task->frame; 95 + u32 *f = frame->frame; 96 + (void)pipe; 97 + 98 + if (f[LIMA_GP_VSCL_START_ADDR >> 2] > 99 + f[LIMA_GP_VSCL_END_ADDR >> 2] || 100 + f[LIMA_GP_PLBUCL_START_ADDR >> 2] > 101 + f[LIMA_GP_PLBUCL_END_ADDR >> 2] || 102 + f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] > 103 + f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]) 104 + return -EINVAL; 105 + 106 + if (f[LIMA_GP_VSCL_START_ADDR >> 2] == 107 + f[LIMA_GP_VSCL_END_ADDR >> 2] && 108 + f[LIMA_GP_PLBUCL_START_ADDR >> 2] == 109 + f[LIMA_GP_PLBUCL_END_ADDR >> 2]) 110 + return -EINVAL; 111 + 112 + return 0; 113 + } 114 + 115 + static void lima_gp_task_run(struct lima_sched_pipe *pipe, 116 + struct lima_sched_task *task) 117 + { 118 + struct lima_ip *ip = pipe->processor[0]; 119 + struct drm_lima_gp_frame *frame = task->frame; 120 + u32 *f = frame->frame; 121 + u32 cmd = 0; 122 + int i; 123 + 124 + if (f[LIMA_GP_VSCL_START_ADDR >> 2] != 125 + f[LIMA_GP_VSCL_END_ADDR >> 2]) 126 + cmd |= LIMA_GP_CMD_START_VS; 127 + if (f[LIMA_GP_PLBUCL_START_ADDR >> 2] != 128 + f[LIMA_GP_PLBUCL_END_ADDR >> 2]) 129 + cmd |= LIMA_GP_CMD_START_PLBU; 130 + 131 + /* before any hw ops, wait last success task async soft reset */ 132 + lima_gp_soft_reset_async_wait(ip); 133 + 134 + for (i = 0; i < LIMA_GP_FRAME_REG_NUM; i++) 135 + writel(f[i], ip->iomem + LIMA_GP_VSCL_START_ADDR + i * 4); 136 + 137 + gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC); 138 + gp_write(LIMA_GP_CMD, cmd); 139 + } 140 + 141 + static int lima_gp_hard_reset_poll(struct lima_ip *ip) 142 + { 143 + gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000); 144 + return gp_read(LIMA_GP_PERF_CNT_0_LIMIT) == 0xC01A0000; 145 + } 146 + 147 + static int lima_gp_hard_reset(struct lima_ip *ip) 148 + { 149 + struct lima_device *dev = ip->dev; 150 + int ret; 151 + 152 + gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000); 153 + gp_write(LIMA_GP_INT_MASK, 0); 154 + gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET); 155 + ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100); 156 + if (ret) { 157 + dev_err(dev->dev, "gp hard reset timeout\n"); 158 + return ret; 159 + } 160 + 161 + gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0); 162 + gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL); 163 + gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED); 164 + return 0; 165 + } 166 + 167 + static void lima_gp_task_fini(struct lima_sched_pipe *pipe) 168 + { 169 + lima_gp_soft_reset_async(pipe->processor[0]); 170 + } 171 + 172 + static void lima_gp_task_error(struct lima_sched_pipe *pipe) 173 + { 174 + struct lima_ip *ip = pipe->processor[0]; 175 + 176 + dev_err(ip->dev->dev, "gp task error int_state=%x status=%x\n", 177 + gp_read(LIMA_GP_INT_STAT), gp_read(LIMA_GP_STATUS)); 178 + 179 + lima_gp_hard_reset(ip); 180 + } 181 + 182 + static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe) 183 + { 184 + lima_sched_pipe_task_done(pipe); 185 + } 186 + 187 + static void lima_gp_print_version(struct lima_ip *ip) 188 + { 189 + u32 version, major, minor; 190 + char *name; 191 + 192 + version = gp_read(LIMA_GP_VERSION); 193 + major = (version >> 8) & 0xFF; 194 + minor = version & 0xFF; 195 + switch (version >> 16) { 196 + case 0xA07: 197 + name = "mali200"; 198 + break; 199 + case 0xC07: 200 + name = "mali300"; 201 + break; 202 + case 0xB07: 203 + name = "mali400"; 204 + break; 205 + case 0xD07: 206 + name = "mali450"; 207 + break; 208 + default: 209 + name = "unknown"; 210 + break; 211 + } 212 + dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n", 213 + lima_ip_name(ip), name, major, minor); 214 + } 215 + 216 + static struct kmem_cache *lima_gp_task_slab; 217 + static int lima_gp_task_slab_refcnt; 218 + 219 + int lima_gp_init(struct lima_ip *ip) 220 + { 221 + struct lima_device *dev = ip->dev; 222 + int err; 223 + 224 + lima_gp_print_version(ip); 225 + 226 + ip->data.async_reset = false; 227 + lima_gp_soft_reset_async(ip); 228 + err = lima_gp_soft_reset_async_wait(ip); 229 + if (err) 230 + return err; 231 + 232 + err = devm_request_irq(dev->dev, ip->irq, lima_gp_irq_handler, 233 + IRQF_SHARED, lima_ip_name(ip), ip); 234 + if (err) { 235 + dev_err(dev->dev, "gp %s fail to request irq\n", 236 + lima_ip_name(ip)); 237 + return err; 238 + } 239 + 240 + dev->gp_version = gp_read(LIMA_GP_VERSION); 241 + 242 + return 0; 243 + } 244 + 245 + void lima_gp_fini(struct lima_ip *ip) 246 + { 247 + 248 + } 249 + 250 + int lima_gp_pipe_init(struct lima_device *dev) 251 + { 252 + int frame_size = sizeof(struct drm_lima_gp_frame); 253 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp; 254 + 255 + if (!lima_gp_task_slab) { 256 + lima_gp_task_slab = kmem_cache_create_usercopy( 257 + "lima_gp_task", sizeof(struct lima_sched_task) + frame_size, 258 + 0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task), 259 + frame_size, NULL); 260 + if (!lima_gp_task_slab) 261 + return -ENOMEM; 262 + } 263 + lima_gp_task_slab_refcnt++; 264 + 265 + pipe->frame_size = frame_size; 266 + pipe->task_slab = lima_gp_task_slab; 267 + 268 + pipe->task_validate = lima_gp_task_validate; 269 + pipe->task_run = lima_gp_task_run; 270 + pipe->task_fini = lima_gp_task_fini; 271 + pipe->task_error = lima_gp_task_error; 272 + pipe->task_mmu_error = lima_gp_task_mmu_error; 273 + 274 + return 0; 275 + } 276 + 277 + void lima_gp_pipe_fini(struct lima_device *dev) 278 + { 279 + if (!--lima_gp_task_slab_refcnt) { 280 + kmem_cache_destroy(lima_gp_task_slab); 281 + lima_gp_task_slab = NULL; 282 + } 283 + }
+16
drivers/gpu/drm/lima/lima_gp.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_GP_H__ 5 + #define __LIMA_GP_H__ 6 + 7 + struct lima_ip; 8 + struct lima_device; 9 + 10 + int lima_gp_init(struct lima_ip *ip); 11 + void lima_gp_fini(struct lima_ip *ip); 12 + 13 + int lima_gp_pipe_init(struct lima_device *dev); 14 + void lima_gp_pipe_fini(struct lima_device *dev); 15 + 16 + #endif
+80
drivers/gpu/drm/lima/lima_l2_cache.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/iopoll.h> 5 + #include <linux/device.h> 6 + 7 + #include "lima_device.h" 8 + #include "lima_l2_cache.h" 9 + #include "lima_regs.h" 10 + 11 + #define l2_cache_write(reg, data) writel(data, ip->iomem + reg) 12 + #define l2_cache_read(reg) readl(ip->iomem + reg) 13 + 14 + static int lima_l2_cache_wait_idle(struct lima_ip *ip) 15 + { 16 + struct lima_device *dev = ip->dev; 17 + int err; 18 + u32 v; 19 + 20 + err = readl_poll_timeout(ip->iomem + LIMA_L2_CACHE_STATUS, v, 21 + !(v & LIMA_L2_CACHE_STATUS_COMMAND_BUSY), 22 + 0, 1000); 23 + if (err) { 24 + dev_err(dev->dev, "l2 cache wait command timeout\n"); 25 + return err; 26 + } 27 + return 0; 28 + } 29 + 30 + int lima_l2_cache_flush(struct lima_ip *ip) 31 + { 32 + int ret; 33 + 34 + spin_lock(&ip->data.lock); 35 + l2_cache_write(LIMA_L2_CACHE_COMMAND, LIMA_L2_CACHE_COMMAND_CLEAR_ALL); 36 + ret = lima_l2_cache_wait_idle(ip); 37 + spin_unlock(&ip->data.lock); 38 + return ret; 39 + } 40 + 41 + int lima_l2_cache_init(struct lima_ip *ip) 42 + { 43 + int i, err; 44 + u32 size; 45 + struct lima_device *dev = ip->dev; 46 + 47 + /* l2_cache2 only exists when one of PP4-7 present */ 48 + if (ip->id == lima_ip_l2_cache2) { 49 + for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) { 50 + if (dev->ip[i].present) 51 + break; 52 + } 53 + if (i > lima_ip_pp7) 54 + return -ENODEV; 55 + } 56 + 57 + spin_lock_init(&ip->data.lock); 58 + 59 + size = l2_cache_read(LIMA_L2_CACHE_SIZE); 60 + dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n", 61 + 1 << (((size >> 16) & 0xff) - 10), 62 + 1 << ((size >> 8) & 0xff), 63 + 1 << (size & 0xff), 64 + 1 << ((size >> 24) & 0xff)); 65 + 66 + err = lima_l2_cache_flush(ip); 67 + if (err) 68 + return err; 69 + 70 + l2_cache_write(LIMA_L2_CACHE_ENABLE, 71 + LIMA_L2_CACHE_ENABLE_ACCESS|LIMA_L2_CACHE_ENABLE_READ_ALLOCATE); 72 + l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c); 73 + 74 + return 0; 75 + } 76 + 77 + void lima_l2_cache_fini(struct lima_ip *ip) 78 + { 79 + 80 + }
+14
drivers/gpu/drm/lima/lima_l2_cache.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_L2_CACHE_H__ 5 + #define __LIMA_L2_CACHE_H__ 6 + 7 + struct lima_ip; 8 + 9 + int lima_l2_cache_init(struct lima_ip *ip); 10 + void lima_l2_cache_fini(struct lima_ip *ip); 11 + 12 + int lima_l2_cache_flush(struct lima_ip *ip); 13 + 14 + #endif
+142
drivers/gpu/drm/lima/lima_mmu.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/interrupt.h> 5 + #include <linux/iopoll.h> 6 + #include <linux/device.h> 7 + 8 + #include "lima_device.h" 9 + #include "lima_mmu.h" 10 + #include "lima_vm.h" 11 + #include "lima_object.h" 12 + #include "lima_regs.h" 13 + 14 + #define mmu_write(reg, data) writel(data, ip->iomem + reg) 15 + #define mmu_read(reg) readl(ip->iomem + reg) 16 + 17 + #define lima_mmu_send_command(cmd, addr, val, cond) \ 18 + ({ \ 19 + int __ret; \ 20 + \ 21 + mmu_write(LIMA_MMU_COMMAND, cmd); \ 22 + __ret = readl_poll_timeout(ip->iomem + (addr), val, \ 23 + cond, 0, 100); \ 24 + if (__ret) \ 25 + dev_err(dev->dev, \ 26 + "mmu command %x timeout\n", cmd); \ 27 + __ret; \ 28 + }) 29 + 30 + static irqreturn_t lima_mmu_irq_handler(int irq, void *data) 31 + { 32 + struct lima_ip *ip = data; 33 + struct lima_device *dev = ip->dev; 34 + u32 status = mmu_read(LIMA_MMU_INT_STATUS); 35 + struct lima_sched_pipe *pipe; 36 + 37 + /* for shared irq case */ 38 + if (!status) 39 + return IRQ_NONE; 40 + 41 + if (status & LIMA_MMU_INT_PAGE_FAULT) { 42 + u32 fault = mmu_read(LIMA_MMU_PAGE_FAULT_ADDR); 43 + 44 + dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n", 45 + fault, LIMA_MMU_STATUS_BUS_ID(status), 46 + status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read", 47 + lima_ip_name(ip)); 48 + } 49 + 50 + if (status & LIMA_MMU_INT_READ_BUS_ERROR) 51 + dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip)); 52 + 53 + /* mask all interrupts before resume */ 54 + mmu_write(LIMA_MMU_INT_MASK, 0); 55 + mmu_write(LIMA_MMU_INT_CLEAR, status); 56 + 57 + pipe = dev->pipe + (ip->id == lima_ip_gpmmu ? lima_pipe_gp : lima_pipe_pp); 58 + lima_sched_pipe_mmu_error(pipe); 59 + 60 + return IRQ_HANDLED; 61 + } 62 + 63 + int lima_mmu_init(struct lima_ip *ip) 64 + { 65 + struct lima_device *dev = ip->dev; 66 + int err; 67 + u32 v; 68 + 69 + if (ip->id == lima_ip_ppmmu_bcast) 70 + return 0; 71 + 72 + mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE); 73 + if (mmu_read(LIMA_MMU_DTE_ADDR) != 0xCAFEB000) { 74 + dev_err(dev->dev, "mmu %s dte write test fail\n", lima_ip_name(ip)); 75 + return -EIO; 76 + } 77 + 78 + mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET); 79 + err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET, 80 + LIMA_MMU_DTE_ADDR, v, v == 0); 81 + if (err) 82 + return err; 83 + 84 + err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler, 85 + IRQF_SHARED, lima_ip_name(ip), ip); 86 + if (err) { 87 + dev_err(dev->dev, "mmu %s fail to request irq\n", lima_ip_name(ip)); 88 + return err; 89 + } 90 + 91 + mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR); 92 + mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma); 93 + return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING, 94 + LIMA_MMU_STATUS, v, 95 + v & LIMA_MMU_STATUS_PAGING_ENABLED); 96 + } 97 + 98 + void lima_mmu_fini(struct lima_ip *ip) 99 + { 100 + 101 + } 102 + 103 + void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm) 104 + { 105 + struct lima_device *dev = ip->dev; 106 + u32 v; 107 + 108 + lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_STALL, 109 + LIMA_MMU_STATUS, v, 110 + v & LIMA_MMU_STATUS_STALL_ACTIVE); 111 + 112 + if (vm) 113 + mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma); 114 + 115 + /* flush the TLB */ 116 + mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE); 117 + 118 + lima_mmu_send_command(LIMA_MMU_COMMAND_DISABLE_STALL, 119 + LIMA_MMU_STATUS, v, 120 + !(v & LIMA_MMU_STATUS_STALL_ACTIVE)); 121 + } 122 + 123 + void lima_mmu_page_fault_resume(struct lima_ip *ip) 124 + { 125 + struct lima_device *dev = ip->dev; 126 + u32 status = mmu_read(LIMA_MMU_STATUS); 127 + u32 v; 128 + 129 + if (status & LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE) { 130 + dev_info(dev->dev, "mmu resume\n"); 131 + 132 + mmu_write(LIMA_MMU_INT_MASK, 0); 133 + mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE); 134 + lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET, 135 + LIMA_MMU_DTE_ADDR, v, v == 0); 136 + mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR); 137 + mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma); 138 + lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING, 139 + LIMA_MMU_STATUS, v, 140 + v & LIMA_MMU_STATUS_PAGING_ENABLED); 141 + } 142 + }
+16
drivers/gpu/drm/lima/lima_mmu.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_MMU_H__ 5 + #define __LIMA_MMU_H__ 6 + 7 + struct lima_ip; 8 + struct lima_vm; 9 + 10 + int lima_mmu_init(struct lima_ip *ip); 11 + void lima_mmu_fini(struct lima_ip *ip); 12 + 13 + void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm); 14 + void lima_mmu_page_fault_resume(struct lima_ip *ip); 15 + 16 + #endif
+122
drivers/gpu/drm/lima/lima_object.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <drm/drm_prime.h> 5 + #include <linux/pagemap.h> 6 + #include <linux/dma-mapping.h> 7 + 8 + #include "lima_object.h" 9 + 10 + void lima_bo_destroy(struct lima_bo *bo) 11 + { 12 + if (bo->sgt) { 13 + kfree(bo->pages); 14 + drm_prime_gem_destroy(&bo->gem, bo->sgt); 15 + } else { 16 + if (bo->pages_dma_addr) { 17 + int i, npages = bo->gem.size >> PAGE_SHIFT; 18 + 19 + for (i = 0; i < npages; i++) { 20 + if (bo->pages_dma_addr[i]) 21 + dma_unmap_page(bo->gem.dev->dev, 22 + bo->pages_dma_addr[i], 23 + PAGE_SIZE, DMA_BIDIRECTIONAL); 24 + } 25 + } 26 + 27 + if (bo->pages) 28 + drm_gem_put_pages(&bo->gem, bo->pages, true, true); 29 + } 30 + 31 + kfree(bo->pages_dma_addr); 32 + drm_gem_object_release(&bo->gem); 33 + kfree(bo); 34 + } 35 + 36 + static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags, 37 + struct reservation_object *resv) 38 + { 39 + struct lima_bo *bo; 40 + int err; 41 + 42 + size = PAGE_ALIGN(size); 43 + 44 + bo = kzalloc(sizeof(*bo), GFP_KERNEL); 45 + if (!bo) 46 + return ERR_PTR(-ENOMEM); 47 + 48 + mutex_init(&bo->lock); 49 + INIT_LIST_HEAD(&bo->va); 50 + bo->gem.resv = resv; 51 + 52 + err = drm_gem_object_init(dev->ddev, &bo->gem, size); 53 + if (err) { 54 + kfree(bo); 55 + return ERR_PTR(err); 56 + } 57 + 58 + return bo; 59 + } 60 + 61 + struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size, 62 + u32 flags, struct sg_table *sgt, 63 + struct reservation_object *resv) 64 + { 65 + int i, err; 66 + size_t npages; 67 + struct lima_bo *bo, *ret; 68 + 69 + bo = lima_bo_create_struct(dev, size, flags, resv); 70 + if (IS_ERR(bo)) 71 + return bo; 72 + 73 + npages = bo->gem.size >> PAGE_SHIFT; 74 + 75 + bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL); 76 + if (!bo->pages_dma_addr) { 77 + ret = ERR_PTR(-ENOMEM); 78 + goto err_out; 79 + } 80 + 81 + if (sgt) { 82 + bo->sgt = sgt; 83 + 84 + bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); 85 + if (!bo->pages) { 86 + ret = ERR_PTR(-ENOMEM); 87 + goto err_out; 88 + } 89 + 90 + err = drm_prime_sg_to_page_addr_arrays( 91 + sgt, bo->pages, bo->pages_dma_addr, npages); 92 + if (err) { 93 + ret = ERR_PTR(err); 94 + goto err_out; 95 + } 96 + } else { 97 + mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32); 98 + bo->pages = drm_gem_get_pages(&bo->gem); 99 + if (IS_ERR(bo->pages)) { 100 + ret = ERR_CAST(bo->pages); 101 + bo->pages = NULL; 102 + goto err_out; 103 + } 104 + 105 + for (i = 0; i < npages; i++) { 106 + dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0, 107 + PAGE_SIZE, DMA_BIDIRECTIONAL); 108 + if (dma_mapping_error(dev->dev, addr)) { 109 + ret = ERR_PTR(-EFAULT); 110 + goto err_out; 111 + } 112 + bo->pages_dma_addr[i] = addr; 113 + } 114 + 115 + } 116 + 117 + return bo; 118 + 119 + err_out: 120 + lima_bo_destroy(bo); 121 + return ret; 122 + }
+36
drivers/gpu/drm/lima/lima_object.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_OBJECT_H__ 5 + #define __LIMA_OBJECT_H__ 6 + 7 + #include <drm/drm_gem.h> 8 + 9 + #include "lima_device.h" 10 + 11 + struct lima_bo { 12 + struct drm_gem_object gem; 13 + 14 + struct page **pages; 15 + dma_addr_t *pages_dma_addr; 16 + struct sg_table *sgt; 17 + void *vaddr; 18 + 19 + struct mutex lock; 20 + struct list_head va; 21 + }; 22 + 23 + static inline struct lima_bo * 24 + to_lima_bo(struct drm_gem_object *obj) 25 + { 26 + return container_of(obj, struct lima_bo, gem); 27 + } 28 + 29 + struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size, 30 + u32 flags, struct sg_table *sgt, 31 + struct reservation_object *resv); 32 + void lima_bo_destroy(struct lima_bo *bo); 33 + void *lima_bo_vmap(struct lima_bo *bo); 34 + void lima_bo_vunmap(struct lima_bo *bo); 35 + 36 + #endif
+60
drivers/gpu/drm/lima/lima_pmu.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/iopoll.h> 5 + #include <linux/device.h> 6 + 7 + #include "lima_device.h" 8 + #include "lima_pmu.h" 9 + #include "lima_regs.h" 10 + 11 + #define pmu_write(reg, data) writel(data, ip->iomem + reg) 12 + #define pmu_read(reg) readl(ip->iomem + reg) 13 + 14 + static int lima_pmu_wait_cmd(struct lima_ip *ip) 15 + { 16 + struct lima_device *dev = ip->dev; 17 + int err; 18 + u32 v; 19 + 20 + err = readl_poll_timeout(ip->iomem + LIMA_PMU_INT_RAWSTAT, 21 + v, v & LIMA_PMU_INT_CMD_MASK, 22 + 100, 100000); 23 + if (err) { 24 + dev_err(dev->dev, "timeout wait pmd cmd\n"); 25 + return err; 26 + } 27 + 28 + pmu_write(LIMA_PMU_INT_CLEAR, LIMA_PMU_INT_CMD_MASK); 29 + return 0; 30 + } 31 + 32 + int lima_pmu_init(struct lima_ip *ip) 33 + { 34 + int err; 35 + u32 stat; 36 + 37 + pmu_write(LIMA_PMU_INT_MASK, 0); 38 + 39 + /* If this value is too low, when in high GPU clk freq, 40 + * GPU will be in unstable state. 41 + */ 42 + pmu_write(LIMA_PMU_SW_DELAY, 0xffff); 43 + 44 + /* status reg 1=off 0=on */ 45 + stat = pmu_read(LIMA_PMU_STATUS); 46 + 47 + /* power up all ip */ 48 + if (stat) { 49 + pmu_write(LIMA_PMU_POWER_UP, stat); 50 + err = lima_pmu_wait_cmd(ip); 51 + if (err) 52 + return err; 53 + } 54 + return 0; 55 + } 56 + 57 + void lima_pmu_fini(struct lima_ip *ip) 58 + { 59 + 60 + }
+12
drivers/gpu/drm/lima/lima_pmu.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_PMU_H__ 5 + #define __LIMA_PMU_H__ 6 + 7 + struct lima_ip; 8 + 9 + int lima_pmu_init(struct lima_ip *ip); 10 + void lima_pmu_fini(struct lima_ip *ip); 11 + 12 + #endif
+427
drivers/gpu/drm/lima/lima_pp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/interrupt.h> 5 + #include <linux/io.h> 6 + #include <linux/device.h> 7 + #include <linux/slab.h> 8 + 9 + #include <drm/lima_drm.h> 10 + 11 + #include "lima_device.h" 12 + #include "lima_pp.h" 13 + #include "lima_dlbu.h" 14 + #include "lima_bcast.h" 15 + #include "lima_vm.h" 16 + #include "lima_regs.h" 17 + 18 + #define pp_write(reg, data) writel(data, ip->iomem + reg) 19 + #define pp_read(reg) readl(ip->iomem + reg) 20 + 21 + static void lima_pp_handle_irq(struct lima_ip *ip, u32 state) 22 + { 23 + struct lima_device *dev = ip->dev; 24 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; 25 + 26 + if (state & LIMA_PP_IRQ_MASK_ERROR) { 27 + u32 status = pp_read(LIMA_PP_STATUS); 28 + 29 + dev_err(dev->dev, "pp error irq state=%x status=%x\n", 30 + state, status); 31 + 32 + pipe->error = true; 33 + 34 + /* mask all interrupts before hard reset */ 35 + pp_write(LIMA_PP_INT_MASK, 0); 36 + } 37 + 38 + pp_write(LIMA_PP_INT_CLEAR, state); 39 + } 40 + 41 + static irqreturn_t lima_pp_irq_handler(int irq, void *data) 42 + { 43 + struct lima_ip *ip = data; 44 + struct lima_device *dev = ip->dev; 45 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; 46 + u32 state = pp_read(LIMA_PP_INT_STATUS); 47 + 48 + /* for shared irq case */ 49 + if (!state) 50 + return IRQ_NONE; 51 + 52 + lima_pp_handle_irq(ip, state); 53 + 54 + if (atomic_dec_and_test(&pipe->task)) 55 + lima_sched_pipe_task_done(pipe); 56 + 57 + return IRQ_HANDLED; 58 + } 59 + 60 + static irqreturn_t lima_pp_bcast_irq_handler(int irq, void *data) 61 + { 62 + int i; 63 + irqreturn_t ret = IRQ_NONE; 64 + struct lima_ip *pp_bcast = data; 65 + struct lima_device *dev = pp_bcast->dev; 66 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; 67 + struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame; 68 + 69 + for (i = 0; i < frame->num_pp; i++) { 70 + struct lima_ip *ip = pipe->processor[i]; 71 + u32 status, state; 72 + 73 + if (pipe->done & (1 << i)) 74 + continue; 75 + 76 + /* status read first in case int state change in the middle 77 + * which may miss the interrupt handling 78 + */ 79 + status = pp_read(LIMA_PP_STATUS); 80 + state = pp_read(LIMA_PP_INT_STATUS); 81 + 82 + if (state) { 83 + lima_pp_handle_irq(ip, state); 84 + ret = IRQ_HANDLED; 85 + } else { 86 + if (status & LIMA_PP_STATUS_RENDERING_ACTIVE) 87 + continue; 88 + } 89 + 90 + pipe->done |= (1 << i); 91 + if (atomic_dec_and_test(&pipe->task)) 92 + lima_sched_pipe_task_done(pipe); 93 + } 94 + 95 + return ret; 96 + } 97 + 98 + static void lima_pp_soft_reset_async(struct lima_ip *ip) 99 + { 100 + if (ip->data.async_reset) 101 + return; 102 + 103 + pp_write(LIMA_PP_INT_MASK, 0); 104 + pp_write(LIMA_PP_INT_RAWSTAT, LIMA_PP_IRQ_MASK_ALL); 105 + pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_SOFT_RESET); 106 + ip->data.async_reset = true; 107 + } 108 + 109 + static int lima_pp_soft_reset_poll(struct lima_ip *ip) 110 + { 111 + return !(pp_read(LIMA_PP_STATUS) & LIMA_PP_STATUS_RENDERING_ACTIVE) && 112 + pp_read(LIMA_PP_INT_RAWSTAT) == LIMA_PP_IRQ_RESET_COMPLETED; 113 + } 114 + 115 + static int lima_pp_soft_reset_async_wait_one(struct lima_ip *ip) 116 + { 117 + struct lima_device *dev = ip->dev; 118 + int ret; 119 + 120 + ret = lima_poll_timeout(ip, lima_pp_soft_reset_poll, 0, 100); 121 + if (ret) { 122 + dev_err(dev->dev, "pp %s reset time out\n", lima_ip_name(ip)); 123 + return ret; 124 + } 125 + 126 + pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL); 127 + pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED); 128 + return 0; 129 + } 130 + 131 + static int lima_pp_soft_reset_async_wait(struct lima_ip *ip) 132 + { 133 + int i, err = 0; 134 + 135 + if (!ip->data.async_reset) 136 + return 0; 137 + 138 + if (ip->id == lima_ip_pp_bcast) { 139 + struct lima_device *dev = ip->dev; 140 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; 141 + struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame; 142 + 143 + for (i = 0; i < frame->num_pp; i++) 144 + err |= lima_pp_soft_reset_async_wait_one(pipe->processor[i]); 145 + } else 146 + err = lima_pp_soft_reset_async_wait_one(ip); 147 + 148 + ip->data.async_reset = false; 149 + return err; 150 + } 151 + 152 + static void lima_pp_write_frame(struct lima_ip *ip, u32 *frame, u32 *wb) 153 + { 154 + int i, j, n = 0; 155 + 156 + for (i = 0; i < LIMA_PP_FRAME_REG_NUM; i++) 157 + writel(frame[i], ip->iomem + LIMA_PP_FRAME + i * 4); 158 + 159 + for (i = 0; i < 3; i++) { 160 + for (j = 0; j < LIMA_PP_WB_REG_NUM; j++) 161 + writel(wb[n++], ip->iomem + LIMA_PP_WB(i) + j * 4); 162 + } 163 + } 164 + 165 + static int lima_pp_hard_reset_poll(struct lima_ip *ip) 166 + { 167 + pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC01A0000); 168 + return pp_read(LIMA_PP_PERF_CNT_0_LIMIT) == 0xC01A0000; 169 + } 170 + 171 + static int lima_pp_hard_reset(struct lima_ip *ip) 172 + { 173 + struct lima_device *dev = ip->dev; 174 + int ret; 175 + 176 + pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC0FFE000); 177 + pp_write(LIMA_PP_INT_MASK, 0); 178 + pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_FORCE_RESET); 179 + ret = lima_poll_timeout(ip, lima_pp_hard_reset_poll, 10, 100); 180 + if (ret) { 181 + dev_err(dev->dev, "pp hard reset timeout\n"); 182 + return ret; 183 + } 184 + 185 + pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0); 186 + pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL); 187 + pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED); 188 + return 0; 189 + } 190 + 191 + static void lima_pp_print_version(struct lima_ip *ip) 192 + { 193 + u32 version, major, minor; 194 + char *name; 195 + 196 + version = pp_read(LIMA_PP_VERSION); 197 + major = (version >> 8) & 0xFF; 198 + minor = version & 0xFF; 199 + switch (version >> 16) { 200 + case 0xC807: 201 + name = "mali200"; 202 + break; 203 + case 0xCE07: 204 + name = "mali300"; 205 + break; 206 + case 0xCD07: 207 + name = "mali400"; 208 + break; 209 + case 0xCF07: 210 + name = "mali450"; 211 + break; 212 + default: 213 + name = "unknown"; 214 + break; 215 + } 216 + dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n", 217 + lima_ip_name(ip), name, major, minor); 218 + } 219 + 220 + int lima_pp_init(struct lima_ip *ip) 221 + { 222 + struct lima_device *dev = ip->dev; 223 + int err; 224 + 225 + lima_pp_print_version(ip); 226 + 227 + ip->data.async_reset = false; 228 + lima_pp_soft_reset_async(ip); 229 + err = lima_pp_soft_reset_async_wait(ip); 230 + if (err) 231 + return err; 232 + 233 + err = devm_request_irq(dev->dev, ip->irq, lima_pp_irq_handler, 234 + IRQF_SHARED, lima_ip_name(ip), ip); 235 + if (err) { 236 + dev_err(dev->dev, "pp %s fail to request irq\n", 237 + lima_ip_name(ip)); 238 + return err; 239 + } 240 + 241 + dev->pp_version = pp_read(LIMA_PP_VERSION); 242 + 243 + return 0; 244 + } 245 + 246 + void lima_pp_fini(struct lima_ip *ip) 247 + { 248 + 249 + } 250 + 251 + int lima_pp_bcast_init(struct lima_ip *ip) 252 + { 253 + struct lima_device *dev = ip->dev; 254 + int err; 255 + 256 + err = devm_request_irq(dev->dev, ip->irq, lima_pp_bcast_irq_handler, 257 + IRQF_SHARED, lima_ip_name(ip), ip); 258 + if (err) { 259 + dev_err(dev->dev, "pp %s fail to request irq\n", 260 + lima_ip_name(ip)); 261 + return err; 262 + } 263 + 264 + return 0; 265 + } 266 + 267 + void lima_pp_bcast_fini(struct lima_ip *ip) 268 + { 269 + 270 + } 271 + 272 + static int lima_pp_task_validate(struct lima_sched_pipe *pipe, 273 + struct lima_sched_task *task) 274 + { 275 + u32 num_pp; 276 + 277 + if (pipe->bcast_processor) { 278 + struct drm_lima_m450_pp_frame *f = task->frame; 279 + 280 + num_pp = f->num_pp; 281 + 282 + if (f->_pad) 283 + return -EINVAL; 284 + } else { 285 + struct drm_lima_m400_pp_frame *f = task->frame; 286 + 287 + num_pp = f->num_pp; 288 + } 289 + 290 + if (num_pp == 0 || num_pp > pipe->num_processor) 291 + return -EINVAL; 292 + 293 + return 0; 294 + } 295 + 296 + static void lima_pp_task_run(struct lima_sched_pipe *pipe, 297 + struct lima_sched_task *task) 298 + { 299 + if (pipe->bcast_processor) { 300 + struct drm_lima_m450_pp_frame *frame = task->frame; 301 + struct lima_device *dev = pipe->bcast_processor->dev; 302 + struct lima_ip *ip = pipe->bcast_processor; 303 + int i; 304 + 305 + pipe->done = 0; 306 + atomic_set(&pipe->task, frame->num_pp); 307 + 308 + if (frame->use_dlbu) { 309 + lima_dlbu_enable(dev, frame->num_pp); 310 + 311 + frame->frame[LIMA_PP_FRAME >> 2] = LIMA_VA_RESERVE_DLBU; 312 + lima_dlbu_set_reg(dev->ip + lima_ip_dlbu, frame->dlbu_regs); 313 + } else 314 + lima_dlbu_disable(dev); 315 + 316 + lima_bcast_enable(dev, frame->num_pp); 317 + 318 + lima_pp_soft_reset_async_wait(ip); 319 + 320 + lima_pp_write_frame(ip, frame->frame, frame->wb); 321 + 322 + for (i = 0; i < frame->num_pp; i++) { 323 + struct lima_ip *ip = pipe->processor[i]; 324 + 325 + pp_write(LIMA_PP_STACK, frame->fragment_stack_address[i]); 326 + if (!frame->use_dlbu) 327 + pp_write(LIMA_PP_FRAME, frame->plbu_array_address[i]); 328 + } 329 + 330 + pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_START_RENDERING); 331 + } else { 332 + struct drm_lima_m400_pp_frame *frame = task->frame; 333 + int i; 334 + 335 + atomic_set(&pipe->task, frame->num_pp); 336 + 337 + for (i = 0; i < frame->num_pp; i++) { 338 + struct lima_ip *ip = pipe->processor[i]; 339 + 340 + frame->frame[LIMA_PP_FRAME >> 2] = 341 + frame->plbu_array_address[i]; 342 + frame->frame[LIMA_PP_STACK >> 2] = 343 + frame->fragment_stack_address[i]; 344 + 345 + lima_pp_soft_reset_async_wait(ip); 346 + 347 + lima_pp_write_frame(ip, frame->frame, frame->wb); 348 + 349 + pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_START_RENDERING); 350 + } 351 + } 352 + } 353 + 354 + static void lima_pp_task_fini(struct lima_sched_pipe *pipe) 355 + { 356 + if (pipe->bcast_processor) 357 + lima_pp_soft_reset_async(pipe->bcast_processor); 358 + else { 359 + int i; 360 + 361 + for (i = 0; i < pipe->num_processor; i++) 362 + lima_pp_soft_reset_async(pipe->processor[i]); 363 + } 364 + } 365 + 366 + static void lima_pp_task_error(struct lima_sched_pipe *pipe) 367 + { 368 + int i; 369 + 370 + for (i = 0; i < pipe->num_processor; i++) { 371 + struct lima_ip *ip = pipe->processor[i]; 372 + 373 + dev_err(ip->dev->dev, "pp task error %d int_state=%x status=%x\n", 374 + i, pp_read(LIMA_PP_INT_STATUS), pp_read(LIMA_PP_STATUS)); 375 + 376 + lima_pp_hard_reset(ip); 377 + } 378 + } 379 + 380 + static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe) 381 + { 382 + if (atomic_dec_and_test(&pipe->task)) 383 + lima_sched_pipe_task_done(pipe); 384 + } 385 + 386 + static struct kmem_cache *lima_pp_task_slab; 387 + static int lima_pp_task_slab_refcnt; 388 + 389 + int lima_pp_pipe_init(struct lima_device *dev) 390 + { 391 + int frame_size; 392 + struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; 393 + 394 + if (dev->id == lima_gpu_mali400) 395 + frame_size = sizeof(struct drm_lima_m400_pp_frame); 396 + else 397 + frame_size = sizeof(struct drm_lima_m450_pp_frame); 398 + 399 + if (!lima_pp_task_slab) { 400 + lima_pp_task_slab = kmem_cache_create_usercopy( 401 + "lima_pp_task", sizeof(struct lima_sched_task) + frame_size, 402 + 0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task), 403 + frame_size, NULL); 404 + if (!lima_pp_task_slab) 405 + return -ENOMEM; 406 + } 407 + lima_pp_task_slab_refcnt++; 408 + 409 + pipe->frame_size = frame_size; 410 + pipe->task_slab = lima_pp_task_slab; 411 + 412 + pipe->task_validate = lima_pp_task_validate; 413 + pipe->task_run = lima_pp_task_run; 414 + pipe->task_fini = lima_pp_task_fini; 415 + pipe->task_error = lima_pp_task_error; 416 + pipe->task_mmu_error = lima_pp_task_mmu_error; 417 + 418 + return 0; 419 + } 420 + 421 + void lima_pp_pipe_fini(struct lima_device *dev) 422 + { 423 + if (!--lima_pp_task_slab_refcnt) { 424 + kmem_cache_destroy(lima_pp_task_slab); 425 + lima_pp_task_slab = NULL; 426 + } 427 + }
+19
drivers/gpu/drm/lima/lima_pp.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_PP_H__ 5 + #define __LIMA_PP_H__ 6 + 7 + struct lima_ip; 8 + struct lima_device; 9 + 10 + int lima_pp_init(struct lima_ip *ip); 11 + void lima_pp_fini(struct lima_ip *ip); 12 + 13 + int lima_pp_bcast_init(struct lima_ip *ip); 14 + void lima_pp_bcast_fini(struct lima_ip *ip); 15 + 16 + int lima_pp_pipe_init(struct lima_device *dev); 17 + void lima_pp_pipe_fini(struct lima_device *dev); 18 + 19 + #endif
+298
drivers/gpu/drm/lima/lima_regs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright 2010-2017 ARM Limited. All rights reserved. 3 + * Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> 4 + */ 5 + 6 + #ifndef __LIMA_REGS_H__ 7 + #define __LIMA_REGS_H__ 8 + 9 + /* This file's register definition is collected from the 10 + * official ARM Mali Utgard GPU kernel driver source code 11 + */ 12 + 13 + /* PMU regs */ 14 + #define LIMA_PMU_POWER_UP 0x00 15 + #define LIMA_PMU_POWER_DOWN 0x04 16 + #define LIMA_PMU_POWER_GP0_MASK BIT(0) 17 + #define LIMA_PMU_POWER_L2_MASK BIT(1) 18 + #define LIMA_PMU_POWER_PP_MASK(i) BIT(2 + i) 19 + 20 + /* 21 + * On Mali450 each block automatically starts up its corresponding L2 22 + * and the PPs are not fully independent controllable. 23 + * Instead PP0, PP1-3 and PP4-7 can be turned on or off. 24 + */ 25 + #define LIMA450_PMU_POWER_PP0_MASK BIT(1) 26 + #define LIMA450_PMU_POWER_PP13_MASK BIT(2) 27 + #define LIMA450_PMU_POWER_PP47_MASK BIT(3) 28 + 29 + #define LIMA_PMU_STATUS 0x08 30 + #define LIMA_PMU_INT_MASK 0x0C 31 + #define LIMA_PMU_INT_RAWSTAT 0x10 32 + #define LIMA_PMU_INT_CLEAR 0x18 33 + #define LIMA_PMU_INT_CMD_MASK BIT(0) 34 + #define LIMA_PMU_SW_DELAY 0x1C 35 + 36 + /* L2 cache regs */ 37 + #define LIMA_L2_CACHE_SIZE 0x0004 38 + #define LIMA_L2_CACHE_STATUS 0x0008 39 + #define LIMA_L2_CACHE_STATUS_COMMAND_BUSY BIT(0) 40 + #define LIMA_L2_CACHE_STATUS_DATA_BUSY BIT(1) 41 + #define LIMA_L2_CACHE_COMMAND 0x0010 42 + #define LIMA_L2_CACHE_COMMAND_CLEAR_ALL BIT(0) 43 + #define LIMA_L2_CACHE_CLEAR_PAGE 0x0014 44 + #define LIMA_L2_CACHE_MAX_READS 0x0018 45 + #define LIMA_L2_CACHE_ENABLE 0x001C 46 + #define LIMA_L2_CACHE_ENABLE_ACCESS BIT(0) 47 + #define LIMA_L2_CACHE_ENABLE_READ_ALLOCATE BIT(1) 48 + #define LIMA_L2_CACHE_PERFCNT_SRC0 0x0020 49 + #define LIMA_L2_CACHE_PERFCNT_VAL0 0x0024 50 + #define LIMA_L2_CACHE_PERFCNT_SRC1 0x0028 51 + #define LIMA_L2_CACHE_ERFCNT_VAL1 0x002C 52 + 53 + /* GP regs */ 54 + #define LIMA_GP_VSCL_START_ADDR 0x00 55 + #define LIMA_GP_VSCL_END_ADDR 0x04 56 + #define LIMA_GP_PLBUCL_START_ADDR 0x08 57 + #define LIMA_GP_PLBUCL_END_ADDR 0x0c 58 + #define LIMA_GP_PLBU_ALLOC_START_ADDR 0x10 59 + #define LIMA_GP_PLBU_ALLOC_END_ADDR 0x14 60 + #define LIMA_GP_CMD 0x20 61 + #define LIMA_GP_CMD_START_VS BIT(0) 62 + #define LIMA_GP_CMD_START_PLBU BIT(1) 63 + #define LIMA_GP_CMD_UPDATE_PLBU_ALLOC BIT(4) 64 + #define LIMA_GP_CMD_RESET BIT(5) 65 + #define LIMA_GP_CMD_FORCE_HANG BIT(6) 66 + #define LIMA_GP_CMD_STOP_BUS BIT(9) 67 + #define LIMA_GP_CMD_SOFT_RESET BIT(10) 68 + #define LIMA_GP_INT_RAWSTAT 0x24 69 + #define LIMA_GP_INT_CLEAR 0x28 70 + #define LIMA_GP_INT_MASK 0x2C 71 + #define LIMA_GP_INT_STAT 0x30 72 + #define LIMA_GP_IRQ_VS_END_CMD_LST BIT(0) 73 + #define LIMA_GP_IRQ_PLBU_END_CMD_LST BIT(1) 74 + #define LIMA_GP_IRQ_PLBU_OUT_OF_MEM BIT(2) 75 + #define LIMA_GP_IRQ_VS_SEM_IRQ BIT(3) 76 + #define LIMA_GP_IRQ_PLBU_SEM_IRQ BIT(4) 77 + #define LIMA_GP_IRQ_HANG BIT(5) 78 + #define LIMA_GP_IRQ_FORCE_HANG BIT(6) 79 + #define LIMA_GP_IRQ_PERF_CNT_0_LIMIT BIT(7) 80 + #define LIMA_GP_IRQ_PERF_CNT_1_LIMIT BIT(8) 81 + #define LIMA_GP_IRQ_WRITE_BOUND_ERR BIT(9) 82 + #define LIMA_GP_IRQ_SYNC_ERROR BIT(10) 83 + #define LIMA_GP_IRQ_AXI_BUS_ERROR BIT(11) 84 + #define LIMA_GP_IRQ_AXI_BUS_STOPPED BIT(12) 85 + #define LIMA_GP_IRQ_VS_INVALID_CMD BIT(13) 86 + #define LIMA_GP_IRQ_PLB_INVALID_CMD BIT(14) 87 + #define LIMA_GP_IRQ_RESET_COMPLETED BIT(19) 88 + #define LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW BIT(20) 89 + #define LIMA_GP_IRQ_SEMAPHORE_OVERFLOW BIT(21) 90 + #define LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS BIT(22) 91 + #define LIMA_GP_WRITE_BOUND_LOW 0x34 92 + #define LIMA_GP_PERF_CNT_0_ENABLE 0x3C 93 + #define LIMA_GP_PERF_CNT_1_ENABLE 0x40 94 + #define LIMA_GP_PERF_CNT_0_SRC 0x44 95 + #define LIMA_GP_PERF_CNT_1_SRC 0x48 96 + #define LIMA_GP_PERF_CNT_0_VALUE 0x4C 97 + #define LIMA_GP_PERF_CNT_1_VALUE 0x50 98 + #define LIMA_GP_PERF_CNT_0_LIMIT 0x54 99 + #define LIMA_GP_STATUS 0x68 100 + #define LIMA_GP_STATUS_VS_ACTIVE BIT(1) 101 + #define LIMA_GP_STATUS_BUS_STOPPED BIT(2) 102 + #define LIMA_GP_STATUS_PLBU_ACTIVE BIT(3) 103 + #define LIMA_GP_STATUS_BUS_ERROR BIT(6) 104 + #define LIMA_GP_STATUS_WRITE_BOUND_ERR BIT(8) 105 + #define LIMA_GP_VERSION 0x6C 106 + #define LIMA_GP_VSCL_START_ADDR_READ 0x80 107 + #define LIMA_GP_PLBCL_START_ADDR_READ 0x84 108 + #define LIMA_GP_CONTR_AXI_BUS_ERROR_STAT 0x94 109 + 110 + #define LIMA_GP_IRQ_MASK_ALL \ 111 + ( \ 112 + LIMA_GP_IRQ_VS_END_CMD_LST | \ 113 + LIMA_GP_IRQ_PLBU_END_CMD_LST | \ 114 + LIMA_GP_IRQ_PLBU_OUT_OF_MEM | \ 115 + LIMA_GP_IRQ_VS_SEM_IRQ | \ 116 + LIMA_GP_IRQ_PLBU_SEM_IRQ | \ 117 + LIMA_GP_IRQ_HANG | \ 118 + LIMA_GP_IRQ_FORCE_HANG | \ 119 + LIMA_GP_IRQ_PERF_CNT_0_LIMIT | \ 120 + LIMA_GP_IRQ_PERF_CNT_1_LIMIT | \ 121 + LIMA_GP_IRQ_WRITE_BOUND_ERR | \ 122 + LIMA_GP_IRQ_SYNC_ERROR | \ 123 + LIMA_GP_IRQ_AXI_BUS_ERROR | \ 124 + LIMA_GP_IRQ_AXI_BUS_STOPPED | \ 125 + LIMA_GP_IRQ_VS_INVALID_CMD | \ 126 + LIMA_GP_IRQ_PLB_INVALID_CMD | \ 127 + LIMA_GP_IRQ_RESET_COMPLETED | \ 128 + LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW | \ 129 + LIMA_GP_IRQ_SEMAPHORE_OVERFLOW | \ 130 + LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS) 131 + 132 + #define LIMA_GP_IRQ_MASK_ERROR \ 133 + ( \ 134 + LIMA_GP_IRQ_PLBU_OUT_OF_MEM | \ 135 + LIMA_GP_IRQ_FORCE_HANG | \ 136 + LIMA_GP_IRQ_WRITE_BOUND_ERR | \ 137 + LIMA_GP_IRQ_SYNC_ERROR | \ 138 + LIMA_GP_IRQ_AXI_BUS_ERROR | \ 139 + LIMA_GP_IRQ_VS_INVALID_CMD | \ 140 + LIMA_GP_IRQ_PLB_INVALID_CMD | \ 141 + LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW | \ 142 + LIMA_GP_IRQ_SEMAPHORE_OVERFLOW | \ 143 + LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS) 144 + 145 + #define LIMA_GP_IRQ_MASK_USED \ 146 + ( \ 147 + LIMA_GP_IRQ_VS_END_CMD_LST | \ 148 + LIMA_GP_IRQ_PLBU_END_CMD_LST | \ 149 + LIMA_GP_IRQ_MASK_ERROR) 150 + 151 + /* PP regs */ 152 + #define LIMA_PP_FRAME 0x0000 153 + #define LIMA_PP_RSW 0x0004 154 + #define LIMA_PP_STACK 0x0030 155 + #define LIMA_PP_STACK_SIZE 0x0034 156 + #define LIMA_PP_ORIGIN_OFFSET_X 0x0040 157 + #define LIMA_PP_WB(i) (0x0100 * (i + 1)) 158 + #define LIMA_PP_WB_SOURCE_SELECT 0x0000 159 + #define LIMA_PP_WB_SOURCE_ADDR 0x0004 160 + 161 + #define LIMA_PP_VERSION 0x1000 162 + #define LIMA_PP_CURRENT_REND_LIST_ADDR 0x1004 163 + #define LIMA_PP_STATUS 0x1008 164 + #define LIMA_PP_STATUS_RENDERING_ACTIVE BIT(0) 165 + #define LIMA_PP_STATUS_BUS_STOPPED BIT(4) 166 + #define LIMA_PP_CTRL 0x100c 167 + #define LIMA_PP_CTRL_STOP_BUS BIT(0) 168 + #define LIMA_PP_CTRL_FLUSH_CACHES BIT(3) 169 + #define LIMA_PP_CTRL_FORCE_RESET BIT(5) 170 + #define LIMA_PP_CTRL_START_RENDERING BIT(6) 171 + #define LIMA_PP_CTRL_SOFT_RESET BIT(7) 172 + #define LIMA_PP_INT_RAWSTAT 0x1020 173 + #define LIMA_PP_INT_CLEAR 0x1024 174 + #define LIMA_PP_INT_MASK 0x1028 175 + #define LIMA_PP_INT_STATUS 0x102c 176 + #define LIMA_PP_IRQ_END_OF_FRAME BIT(0) 177 + #define LIMA_PP_IRQ_END_OF_TILE BIT(1) 178 + #define LIMA_PP_IRQ_HANG BIT(2) 179 + #define LIMA_PP_IRQ_FORCE_HANG BIT(3) 180 + #define LIMA_PP_IRQ_BUS_ERROR BIT(4) 181 + #define LIMA_PP_IRQ_BUS_STOP BIT(5) 182 + #define LIMA_PP_IRQ_CNT_0_LIMIT BIT(6) 183 + #define LIMA_PP_IRQ_CNT_1_LIMIT BIT(7) 184 + #define LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR BIT(8) 185 + #define LIMA_PP_IRQ_INVALID_PLIST_COMMAND BIT(9) 186 + #define LIMA_PP_IRQ_CALL_STACK_UNDERFLOW BIT(10) 187 + #define LIMA_PP_IRQ_CALL_STACK_OVERFLOW BIT(11) 188 + #define LIMA_PP_IRQ_RESET_COMPLETED BIT(12) 189 + #define LIMA_PP_WRITE_BOUNDARY_LOW 0x1044 190 + #define LIMA_PP_BUS_ERROR_STATUS 0x1050 191 + #define LIMA_PP_PERF_CNT_0_ENABLE 0x1080 192 + #define LIMA_PP_PERF_CNT_0_SRC 0x1084 193 + #define LIMA_PP_PERF_CNT_0_LIMIT 0x1088 194 + #define LIMA_PP_PERF_CNT_0_VALUE 0x108c 195 + #define LIMA_PP_PERF_CNT_1_ENABLE 0x10a0 196 + #define LIMA_PP_PERF_CNT_1_SRC 0x10a4 197 + #define LIMA_PP_PERF_CNT_1_LIMIT 0x10a8 198 + #define LIMA_PP_PERF_CNT_1_VALUE 0x10ac 199 + #define LIMA_PP_PERFMON_CONTR 0x10b0 200 + #define LIMA_PP_PERFMON_BASE 0x10b4 201 + 202 + #define LIMA_PP_IRQ_MASK_ALL \ 203 + ( \ 204 + LIMA_PP_IRQ_END_OF_FRAME | \ 205 + LIMA_PP_IRQ_END_OF_TILE | \ 206 + LIMA_PP_IRQ_HANG | \ 207 + LIMA_PP_IRQ_FORCE_HANG | \ 208 + LIMA_PP_IRQ_BUS_ERROR | \ 209 + LIMA_PP_IRQ_BUS_STOP | \ 210 + LIMA_PP_IRQ_CNT_0_LIMIT | \ 211 + LIMA_PP_IRQ_CNT_1_LIMIT | \ 212 + LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR | \ 213 + LIMA_PP_IRQ_INVALID_PLIST_COMMAND | \ 214 + LIMA_PP_IRQ_CALL_STACK_UNDERFLOW | \ 215 + LIMA_PP_IRQ_CALL_STACK_OVERFLOW | \ 216 + LIMA_PP_IRQ_RESET_COMPLETED) 217 + 218 + #define LIMA_PP_IRQ_MASK_ERROR \ 219 + ( \ 220 + LIMA_PP_IRQ_FORCE_HANG | \ 221 + LIMA_PP_IRQ_BUS_ERROR | \ 222 + LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR | \ 223 + LIMA_PP_IRQ_INVALID_PLIST_COMMAND | \ 224 + LIMA_PP_IRQ_CALL_STACK_UNDERFLOW | \ 225 + LIMA_PP_IRQ_CALL_STACK_OVERFLOW) 226 + 227 + #define LIMA_PP_IRQ_MASK_USED \ 228 + ( \ 229 + LIMA_PP_IRQ_END_OF_FRAME | \ 230 + LIMA_PP_IRQ_MASK_ERROR) 231 + 232 + /* MMU regs */ 233 + #define LIMA_MMU_DTE_ADDR 0x0000 234 + #define LIMA_MMU_STATUS 0x0004 235 + #define LIMA_MMU_STATUS_PAGING_ENABLED BIT(0) 236 + #define LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1) 237 + #define LIMA_MMU_STATUS_STALL_ACTIVE BIT(2) 238 + #define LIMA_MMU_STATUS_IDLE BIT(3) 239 + #define LIMA_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4) 240 + #define LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5) 241 + #define LIMA_MMU_STATUS_BUS_ID(x) ((x >> 6) & 0x1F) 242 + #define LIMA_MMU_COMMAND 0x0008 243 + #define LIMA_MMU_COMMAND_ENABLE_PAGING 0x00 244 + #define LIMA_MMU_COMMAND_DISABLE_PAGING 0x01 245 + #define LIMA_MMU_COMMAND_ENABLE_STALL 0x02 246 + #define LIMA_MMU_COMMAND_DISABLE_STALL 0x03 247 + #define LIMA_MMU_COMMAND_ZAP_CACHE 0x04 248 + #define LIMA_MMU_COMMAND_PAGE_FAULT_DONE 0x05 249 + #define LIMA_MMU_COMMAND_HARD_RESET 0x06 250 + #define LIMA_MMU_PAGE_FAULT_ADDR 0x000C 251 + #define LIMA_MMU_ZAP_ONE_LINE 0x0010 252 + #define LIMA_MMU_INT_RAWSTAT 0x0014 253 + #define LIMA_MMU_INT_CLEAR 0x0018 254 + #define LIMA_MMU_INT_MASK 0x001C 255 + #define LIMA_MMU_INT_PAGE_FAULT BIT(0) 256 + #define LIMA_MMU_INT_READ_BUS_ERROR BIT(1) 257 + #define LIMA_MMU_INT_STATUS 0x0020 258 + 259 + #define LIMA_VM_FLAG_PRESENT BIT(0) 260 + #define LIMA_VM_FLAG_READ_PERMISSION BIT(1) 261 + #define LIMA_VM_FLAG_WRITE_PERMISSION BIT(2) 262 + #define LIMA_VM_FLAG_OVERRIDE_CACHE BIT(3) 263 + #define LIMA_VM_FLAG_WRITE_CACHEABLE BIT(4) 264 + #define LIMA_VM_FLAG_WRITE_ALLOCATE BIT(5) 265 + #define LIMA_VM_FLAG_WRITE_BUFFERABLE BIT(6) 266 + #define LIMA_VM_FLAG_READ_CACHEABLE BIT(7) 267 + #define LIMA_VM_FLAG_READ_ALLOCATE BIT(8) 268 + #define LIMA_VM_FLAG_MASK 0x1FF 269 + 270 + #define LIMA_VM_FLAGS_CACHE ( \ 271 + LIMA_VM_FLAG_PRESENT | \ 272 + LIMA_VM_FLAG_READ_PERMISSION | \ 273 + LIMA_VM_FLAG_WRITE_PERMISSION | \ 274 + LIMA_VM_FLAG_OVERRIDE_CACHE | \ 275 + LIMA_VM_FLAG_WRITE_CACHEABLE | \ 276 + LIMA_VM_FLAG_WRITE_BUFFERABLE | \ 277 + LIMA_VM_FLAG_READ_CACHEABLE | \ 278 + LIMA_VM_FLAG_READ_ALLOCATE) 279 + 280 + #define LIMA_VM_FLAGS_UNCACHE ( \ 281 + LIMA_VM_FLAG_PRESENT | \ 282 + LIMA_VM_FLAG_READ_PERMISSION | \ 283 + LIMA_VM_FLAG_WRITE_PERMISSION) 284 + 285 + /* DLBU regs */ 286 + #define LIMA_DLBU_MASTER_TLLIST_PHYS_ADDR 0x0000 287 + #define LIMA_DLBU_MASTER_TLLIST_VADDR 0x0004 288 + #define LIMA_DLBU_TLLIST_VBASEADDR 0x0008 289 + #define LIMA_DLBU_FB_DIM 0x000C 290 + #define LIMA_DLBU_TLLIST_CONF 0x0010 291 + #define LIMA_DLBU_START_TILE_POS 0x0014 292 + #define LIMA_DLBU_PP_ENABLE_MASK 0x0018 293 + 294 + /* BCAST regs */ 295 + #define LIMA_BCAST_BROADCAST_MASK 0x0 296 + #define LIMA_BCAST_INTERRUPT_MASK 0x4 297 + 298 + #endif
+404
drivers/gpu/drm/lima/lima_sched.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/kthread.h> 5 + #include <linux/slab.h> 6 + 7 + #include "lima_drv.h" 8 + #include "lima_sched.h" 9 + #include "lima_vm.h" 10 + #include "lima_mmu.h" 11 + #include "lima_l2_cache.h" 12 + #include "lima_object.h" 13 + 14 + struct lima_fence { 15 + struct dma_fence base; 16 + struct lima_sched_pipe *pipe; 17 + }; 18 + 19 + static struct kmem_cache *lima_fence_slab; 20 + static int lima_fence_slab_refcnt; 21 + 22 + int lima_sched_slab_init(void) 23 + { 24 + if (!lima_fence_slab) { 25 + lima_fence_slab = kmem_cache_create( 26 + "lima_fence", sizeof(struct lima_fence), 0, 27 + SLAB_HWCACHE_ALIGN, NULL); 28 + if (!lima_fence_slab) 29 + return -ENOMEM; 30 + } 31 + 32 + lima_fence_slab_refcnt++; 33 + return 0; 34 + } 35 + 36 + void lima_sched_slab_fini(void) 37 + { 38 + if (!--lima_fence_slab_refcnt) { 39 + kmem_cache_destroy(lima_fence_slab); 40 + lima_fence_slab = NULL; 41 + } 42 + } 43 + 44 + static inline struct lima_fence *to_lima_fence(struct dma_fence *fence) 45 + { 46 + return container_of(fence, struct lima_fence, base); 47 + } 48 + 49 + static const char *lima_fence_get_driver_name(struct dma_fence *fence) 50 + { 51 + return "lima"; 52 + } 53 + 54 + static const char *lima_fence_get_timeline_name(struct dma_fence *fence) 55 + { 56 + struct lima_fence *f = to_lima_fence(fence); 57 + 58 + return f->pipe->base.name; 59 + } 60 + 61 + static void lima_fence_release_rcu(struct rcu_head *rcu) 62 + { 63 + struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 64 + struct lima_fence *fence = to_lima_fence(f); 65 + 66 + kmem_cache_free(lima_fence_slab, fence); 67 + } 68 + 69 + static void lima_fence_release(struct dma_fence *fence) 70 + { 71 + struct lima_fence *f = to_lima_fence(fence); 72 + 73 + call_rcu(&f->base.rcu, lima_fence_release_rcu); 74 + } 75 + 76 + static const struct dma_fence_ops lima_fence_ops = { 77 + .get_driver_name = lima_fence_get_driver_name, 78 + .get_timeline_name = lima_fence_get_timeline_name, 79 + .release = lima_fence_release, 80 + }; 81 + 82 + static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe) 83 + { 84 + struct lima_fence *fence; 85 + 86 + fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL); 87 + if (!fence) 88 + return NULL; 89 + 90 + fence->pipe = pipe; 91 + dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock, 92 + pipe->fence_context, ++pipe->fence_seqno); 93 + 94 + return fence; 95 + } 96 + 97 + static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job) 98 + { 99 + return container_of(job, struct lima_sched_task, base); 100 + } 101 + 102 + static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched) 103 + { 104 + return container_of(sched, struct lima_sched_pipe, base); 105 + } 106 + 107 + int lima_sched_task_init(struct lima_sched_task *task, 108 + struct lima_sched_context *context, 109 + struct lima_bo **bos, int num_bos, 110 + struct lima_vm *vm) 111 + { 112 + int err, i; 113 + 114 + task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL); 115 + if (!task->bos) 116 + return -ENOMEM; 117 + 118 + for (i = 0; i < num_bos; i++) 119 + drm_gem_object_get(&bos[i]->gem); 120 + 121 + err = drm_sched_job_init(&task->base, &context->base, vm); 122 + if (err) { 123 + kfree(task->bos); 124 + return err; 125 + } 126 + 127 + task->num_bos = num_bos; 128 + task->vm = lima_vm_get(vm); 129 + return 0; 130 + } 131 + 132 + void lima_sched_task_fini(struct lima_sched_task *task) 133 + { 134 + int i; 135 + 136 + drm_sched_job_cleanup(&task->base); 137 + 138 + for (i = 0; i < task->num_dep; i++) 139 + dma_fence_put(task->dep[i]); 140 + 141 + kfree(task->dep); 142 + 143 + if (task->bos) { 144 + for (i = 0; i < task->num_bos; i++) 145 + drm_gem_object_put_unlocked(&task->bos[i]->gem); 146 + kfree(task->bos); 147 + } 148 + 149 + lima_vm_put(task->vm); 150 + } 151 + 152 + int lima_sched_task_add_dep(struct lima_sched_task *task, struct dma_fence *fence) 153 + { 154 + int i, new_dep = 4; 155 + 156 + /* same context's fence is definitly earlier then this task */ 157 + if (fence->context == task->base.s_fence->finished.context) { 158 + dma_fence_put(fence); 159 + return 0; 160 + } 161 + 162 + if (task->dep && task->num_dep == task->max_dep) 163 + new_dep = task->max_dep * 2; 164 + 165 + if (task->max_dep < new_dep) { 166 + void *dep = krealloc(task->dep, sizeof(*task->dep) * new_dep, GFP_KERNEL); 167 + 168 + if (!dep) 169 + return -ENOMEM; 170 + 171 + task->max_dep = new_dep; 172 + task->dep = dep; 173 + } 174 + 175 + for (i = 0; i < task->num_dep; i++) { 176 + if (task->dep[i]->context == fence->context && 177 + dma_fence_is_later(fence, task->dep[i])) { 178 + dma_fence_put(task->dep[i]); 179 + task->dep[i] = fence; 180 + return 0; 181 + } 182 + } 183 + 184 + task->dep[task->num_dep++] = fence; 185 + return 0; 186 + } 187 + 188 + int lima_sched_context_init(struct lima_sched_pipe *pipe, 189 + struct lima_sched_context *context, 190 + atomic_t *guilty) 191 + { 192 + struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL; 193 + 194 + return drm_sched_entity_init(&context->base, &rq, 1, guilty); 195 + } 196 + 197 + void lima_sched_context_fini(struct lima_sched_pipe *pipe, 198 + struct lima_sched_context *context) 199 + { 200 + drm_sched_entity_fini(&context->base); 201 + } 202 + 203 + struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context, 204 + struct lima_sched_task *task) 205 + { 206 + struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished); 207 + 208 + drm_sched_entity_push_job(&task->base, &context->base); 209 + return fence; 210 + } 211 + 212 + static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job, 213 + struct drm_sched_entity *entity) 214 + { 215 + struct lima_sched_task *task = to_lima_task(job); 216 + int i; 217 + 218 + for (i = 0; i < task->num_dep; i++) { 219 + struct dma_fence *fence = task->dep[i]; 220 + 221 + if (!task->dep[i]) 222 + continue; 223 + 224 + task->dep[i] = NULL; 225 + 226 + if (!dma_fence_is_signaled(fence)) 227 + return fence; 228 + 229 + dma_fence_put(fence); 230 + } 231 + 232 + return NULL; 233 + } 234 + 235 + static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job) 236 + { 237 + struct lima_sched_task *task = to_lima_task(job); 238 + struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); 239 + struct lima_fence *fence; 240 + struct dma_fence *ret; 241 + struct lima_vm *vm = NULL, *last_vm = NULL; 242 + int i; 243 + 244 + /* after GPU reset */ 245 + if (job->s_fence->finished.error < 0) 246 + return NULL; 247 + 248 + fence = lima_fence_create(pipe); 249 + if (!fence) 250 + return NULL; 251 + task->fence = &fence->base; 252 + 253 + /* for caller usage of the fence, otherwise irq handler 254 + * may consume the fence before caller use it 255 + */ 256 + ret = dma_fence_get(task->fence); 257 + 258 + pipe->current_task = task; 259 + 260 + /* this is needed for MMU to work correctly, otherwise GP/PP 261 + * will hang or page fault for unknown reason after running for 262 + * a while. 263 + * 264 + * Need to investigate: 265 + * 1. is it related to TLB 266 + * 2. how much performance will be affected by L2 cache flush 267 + * 3. can we reduce the calling of this function because all 268 + * GP/PP use the same L2 cache on mali400 269 + * 270 + * TODO: 271 + * 1. move this to task fini to save some wait time? 272 + * 2. when GP/PP use different l2 cache, need PP wait GP l2 273 + * cache flush? 274 + */ 275 + for (i = 0; i < pipe->num_l2_cache; i++) 276 + lima_l2_cache_flush(pipe->l2_cache[i]); 277 + 278 + if (task->vm != pipe->current_vm) { 279 + vm = lima_vm_get(task->vm); 280 + last_vm = pipe->current_vm; 281 + pipe->current_vm = task->vm; 282 + } 283 + 284 + if (pipe->bcast_mmu) 285 + lima_mmu_switch_vm(pipe->bcast_mmu, vm); 286 + else { 287 + for (i = 0; i < pipe->num_mmu; i++) 288 + lima_mmu_switch_vm(pipe->mmu[i], vm); 289 + } 290 + 291 + if (last_vm) 292 + lima_vm_put(last_vm); 293 + 294 + pipe->error = false; 295 + pipe->task_run(pipe, task); 296 + 297 + return task->fence; 298 + } 299 + 300 + static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe, 301 + struct lima_sched_task *task) 302 + { 303 + drm_sched_stop(&pipe->base); 304 + 305 + if (task) 306 + drm_sched_increase_karma(&task->base); 307 + 308 + pipe->task_error(pipe); 309 + 310 + if (pipe->bcast_mmu) 311 + lima_mmu_page_fault_resume(pipe->bcast_mmu); 312 + else { 313 + int i; 314 + 315 + for (i = 0; i < pipe->num_mmu; i++) 316 + lima_mmu_page_fault_resume(pipe->mmu[i]); 317 + } 318 + 319 + if (pipe->current_vm) 320 + lima_vm_put(pipe->current_vm); 321 + 322 + pipe->current_vm = NULL; 323 + pipe->current_task = NULL; 324 + 325 + drm_sched_resubmit_jobs(&pipe->base); 326 + drm_sched_start(&pipe->base, true); 327 + } 328 + 329 + static void lima_sched_timedout_job(struct drm_sched_job *job) 330 + { 331 + struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); 332 + struct lima_sched_task *task = to_lima_task(job); 333 + 334 + DRM_ERROR("lima job timeout\n"); 335 + 336 + lima_sched_handle_error_task(pipe, task); 337 + } 338 + 339 + static void lima_sched_free_job(struct drm_sched_job *job) 340 + { 341 + struct lima_sched_task *task = to_lima_task(job); 342 + struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); 343 + struct lima_vm *vm = task->vm; 344 + struct lima_bo **bos = task->bos; 345 + int i; 346 + 347 + dma_fence_put(task->fence); 348 + 349 + for (i = 0; i < task->num_bos; i++) 350 + lima_vm_bo_del(vm, bos[i]); 351 + 352 + lima_sched_task_fini(task); 353 + kmem_cache_free(pipe->task_slab, task); 354 + } 355 + 356 + const struct drm_sched_backend_ops lima_sched_ops = { 357 + .dependency = lima_sched_dependency, 358 + .run_job = lima_sched_run_job, 359 + .timedout_job = lima_sched_timedout_job, 360 + .free_job = lima_sched_free_job, 361 + }; 362 + 363 + static void lima_sched_error_work(struct work_struct *work) 364 + { 365 + struct lima_sched_pipe *pipe = 366 + container_of(work, struct lima_sched_pipe, error_work); 367 + struct lima_sched_task *task = pipe->current_task; 368 + 369 + lima_sched_handle_error_task(pipe, task); 370 + } 371 + 372 + int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) 373 + { 374 + long timeout; 375 + 376 + if (lima_sched_timeout_ms <= 0) 377 + timeout = MAX_SCHEDULE_TIMEOUT; 378 + else 379 + timeout = msecs_to_jiffies(lima_sched_timeout_ms); 380 + 381 + pipe->fence_context = dma_fence_context_alloc(1); 382 + spin_lock_init(&pipe->fence_lock); 383 + 384 + INIT_WORK(&pipe->error_work, lima_sched_error_work); 385 + 386 + return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0, timeout, name); 387 + } 388 + 389 + void lima_sched_pipe_fini(struct lima_sched_pipe *pipe) 390 + { 391 + drm_sched_fini(&pipe->base); 392 + } 393 + 394 + void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe) 395 + { 396 + if (pipe->error) 397 + schedule_work(&pipe->error_work); 398 + else { 399 + struct lima_sched_task *task = pipe->current_task; 400 + 401 + pipe->task_fini(pipe); 402 + dma_fence_signal(task->fence); 403 + } 404 + }
+104
drivers/gpu/drm/lima/lima_sched.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_SCHED_H__ 5 + #define __LIMA_SCHED_H__ 6 + 7 + #include <drm/gpu_scheduler.h> 8 + 9 + struct lima_vm; 10 + 11 + struct lima_sched_task { 12 + struct drm_sched_job base; 13 + 14 + struct lima_vm *vm; 15 + void *frame; 16 + 17 + struct dma_fence **dep; 18 + int num_dep; 19 + int max_dep; 20 + 21 + struct lima_bo **bos; 22 + int num_bos; 23 + 24 + /* pipe fence */ 25 + struct dma_fence *fence; 26 + }; 27 + 28 + struct lima_sched_context { 29 + struct drm_sched_entity base; 30 + }; 31 + 32 + #define LIMA_SCHED_PIPE_MAX_MMU 8 33 + #define LIMA_SCHED_PIPE_MAX_L2_CACHE 2 34 + #define LIMA_SCHED_PIPE_MAX_PROCESSOR 8 35 + 36 + struct lima_ip; 37 + 38 + struct lima_sched_pipe { 39 + struct drm_gpu_scheduler base; 40 + 41 + u64 fence_context; 42 + u32 fence_seqno; 43 + spinlock_t fence_lock; 44 + 45 + struct lima_sched_task *current_task; 46 + struct lima_vm *current_vm; 47 + 48 + struct lima_ip *mmu[LIMA_SCHED_PIPE_MAX_MMU]; 49 + int num_mmu; 50 + 51 + struct lima_ip *l2_cache[LIMA_SCHED_PIPE_MAX_L2_CACHE]; 52 + int num_l2_cache; 53 + 54 + struct lima_ip *processor[LIMA_SCHED_PIPE_MAX_PROCESSOR]; 55 + int num_processor; 56 + 57 + struct lima_ip *bcast_processor; 58 + struct lima_ip *bcast_mmu; 59 + 60 + u32 done; 61 + bool error; 62 + atomic_t task; 63 + 64 + int frame_size; 65 + struct kmem_cache *task_slab; 66 + 67 + int (*task_validate)(struct lima_sched_pipe *pipe, struct lima_sched_task *task); 68 + void (*task_run)(struct lima_sched_pipe *pipe, struct lima_sched_task *task); 69 + void (*task_fini)(struct lima_sched_pipe *pipe); 70 + void (*task_error)(struct lima_sched_pipe *pipe); 71 + void (*task_mmu_error)(struct lima_sched_pipe *pipe); 72 + 73 + struct work_struct error_work; 74 + }; 75 + 76 + int lima_sched_task_init(struct lima_sched_task *task, 77 + struct lima_sched_context *context, 78 + struct lima_bo **bos, int num_bos, 79 + struct lima_vm *vm); 80 + void lima_sched_task_fini(struct lima_sched_task *task); 81 + int lima_sched_task_add_dep(struct lima_sched_task *task, struct dma_fence *fence); 82 + 83 + int lima_sched_context_init(struct lima_sched_pipe *pipe, 84 + struct lima_sched_context *context, 85 + atomic_t *guilty); 86 + void lima_sched_context_fini(struct lima_sched_pipe *pipe, 87 + struct lima_sched_context *context); 88 + struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context, 89 + struct lima_sched_task *task); 90 + 91 + int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name); 92 + void lima_sched_pipe_fini(struct lima_sched_pipe *pipe); 93 + void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe); 94 + 95 + static inline void lima_sched_pipe_mmu_error(struct lima_sched_pipe *pipe) 96 + { 97 + pipe->error = true; 98 + pipe->task_mmu_error(pipe); 99 + } 100 + 101 + int lima_sched_slab_init(void); 102 + void lima_sched_slab_fini(void); 103 + 104 + #endif
+282
drivers/gpu/drm/lima/lima_vm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #include <linux/slab.h> 5 + #include <linux/dma-mapping.h> 6 + 7 + #include "lima_device.h" 8 + #include "lima_vm.h" 9 + #include "lima_object.h" 10 + #include "lima_regs.h" 11 + 12 + struct lima_bo_va { 13 + struct list_head list; 14 + unsigned int ref_count; 15 + 16 + struct drm_mm_node node; 17 + 18 + struct lima_vm *vm; 19 + }; 20 + 21 + #define LIMA_VM_PD_SHIFT 22 22 + #define LIMA_VM_PT_SHIFT 12 23 + #define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT) 24 + #define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT 25 + 26 + #define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1) 27 + #define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1) 28 + 29 + #define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT) 30 + #define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT) 31 + #define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT) 32 + #define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT) 33 + 34 + 35 + static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end) 36 + { 37 + u32 addr; 38 + 39 + for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) { 40 + u32 pbe = LIMA_PBE(addr); 41 + u32 bte = LIMA_BTE(addr); 42 + 43 + vm->bts[pbe].cpu[bte] = 0; 44 + } 45 + } 46 + 47 + static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma, 48 + u32 start, u32 end) 49 + { 50 + u64 addr; 51 + int i = 0; 52 + 53 + for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) { 54 + u32 pbe = LIMA_PBE(addr); 55 + u32 bte = LIMA_BTE(addr); 56 + 57 + if (!vm->bts[pbe].cpu) { 58 + dma_addr_t pts; 59 + u32 *pd; 60 + int j; 61 + 62 + vm->bts[pbe].cpu = dma_alloc_wc( 63 + vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, 64 + &vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO); 65 + if (!vm->bts[pbe].cpu) { 66 + if (addr != start) 67 + lima_vm_unmap_page_table(vm, start, addr - 1); 68 + return -ENOMEM; 69 + } 70 + 71 + pts = vm->bts[pbe].dma; 72 + pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT); 73 + for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) { 74 + pd[j] = pts | LIMA_VM_FLAG_PRESENT; 75 + pts += LIMA_PAGE_SIZE; 76 + } 77 + } 78 + 79 + vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE; 80 + } 81 + 82 + return 0; 83 + } 84 + 85 + static struct lima_bo_va * 86 + lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo) 87 + { 88 + struct lima_bo_va *bo_va, *ret = NULL; 89 + 90 + list_for_each_entry(bo_va, &bo->va, list) { 91 + if (bo_va->vm == vm) { 92 + ret = bo_va; 93 + break; 94 + } 95 + } 96 + 97 + return ret; 98 + } 99 + 100 + int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create) 101 + { 102 + struct lima_bo_va *bo_va; 103 + int err; 104 + 105 + mutex_lock(&bo->lock); 106 + 107 + bo_va = lima_vm_bo_find(vm, bo); 108 + if (bo_va) { 109 + bo_va->ref_count++; 110 + mutex_unlock(&bo->lock); 111 + return 0; 112 + } 113 + 114 + /* should not create new bo_va if not asked by caller */ 115 + if (!create) { 116 + mutex_unlock(&bo->lock); 117 + return -ENOENT; 118 + } 119 + 120 + bo_va = kzalloc(sizeof(*bo_va), GFP_KERNEL); 121 + if (!bo_va) { 122 + err = -ENOMEM; 123 + goto err_out0; 124 + } 125 + 126 + bo_va->vm = vm; 127 + bo_va->ref_count = 1; 128 + 129 + mutex_lock(&vm->lock); 130 + 131 + err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size); 132 + if (err) 133 + goto err_out1; 134 + 135 + err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start, 136 + bo_va->node.start + bo_va->node.size - 1); 137 + if (err) 138 + goto err_out2; 139 + 140 + mutex_unlock(&vm->lock); 141 + 142 + list_add_tail(&bo_va->list, &bo->va); 143 + 144 + mutex_unlock(&bo->lock); 145 + return 0; 146 + 147 + err_out2: 148 + drm_mm_remove_node(&bo_va->node); 149 + err_out1: 150 + mutex_unlock(&vm->lock); 151 + kfree(bo_va); 152 + err_out0: 153 + mutex_unlock(&bo->lock); 154 + return err; 155 + } 156 + 157 + void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo) 158 + { 159 + struct lima_bo_va *bo_va; 160 + 161 + mutex_lock(&bo->lock); 162 + 163 + bo_va = lima_vm_bo_find(vm, bo); 164 + if (--bo_va->ref_count > 0) { 165 + mutex_unlock(&bo->lock); 166 + return; 167 + } 168 + 169 + mutex_lock(&vm->lock); 170 + 171 + lima_vm_unmap_page_table(vm, bo_va->node.start, 172 + bo_va->node.start + bo_va->node.size - 1); 173 + 174 + drm_mm_remove_node(&bo_va->node); 175 + 176 + mutex_unlock(&vm->lock); 177 + 178 + list_del(&bo_va->list); 179 + 180 + mutex_unlock(&bo->lock); 181 + 182 + kfree(bo_va); 183 + } 184 + 185 + u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo) 186 + { 187 + struct lima_bo_va *bo_va; 188 + u32 ret; 189 + 190 + mutex_lock(&bo->lock); 191 + 192 + bo_va = lima_vm_bo_find(vm, bo); 193 + ret = bo_va->node.start; 194 + 195 + mutex_unlock(&bo->lock); 196 + 197 + return ret; 198 + } 199 + 200 + struct lima_vm *lima_vm_create(struct lima_device *dev) 201 + { 202 + struct lima_vm *vm; 203 + 204 + vm = kzalloc(sizeof(*vm), GFP_KERNEL); 205 + if (!vm) 206 + return NULL; 207 + 208 + vm->dev = dev; 209 + mutex_init(&vm->lock); 210 + kref_init(&vm->refcount); 211 + 212 + vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma, 213 + GFP_KERNEL | __GFP_ZERO); 214 + if (!vm->pd.cpu) 215 + goto err_out0; 216 + 217 + if (dev->dlbu_cpu) { 218 + int err = lima_vm_map_page_table( 219 + vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU, 220 + LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1); 221 + if (err) 222 + goto err_out1; 223 + } 224 + 225 + drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start); 226 + 227 + return vm; 228 + 229 + err_out1: 230 + dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma); 231 + err_out0: 232 + kfree(vm); 233 + return NULL; 234 + } 235 + 236 + void lima_vm_release(struct kref *kref) 237 + { 238 + struct lima_vm *vm = container_of(kref, struct lima_vm, refcount); 239 + int i; 240 + 241 + drm_mm_takedown(&vm->mm); 242 + 243 + for (i = 0; i < LIMA_VM_NUM_BT; i++) { 244 + if (vm->bts[i].cpu) 245 + dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, 246 + vm->bts[i].cpu, vm->bts[i].dma); 247 + } 248 + 249 + if (vm->pd.cpu) 250 + dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma); 251 + 252 + kfree(vm); 253 + } 254 + 255 + void lima_vm_print(struct lima_vm *vm) 256 + { 257 + int i, j, k; 258 + u32 *pd, *pt; 259 + 260 + if (!vm->pd.cpu) 261 + return; 262 + 263 + pd = vm->pd.cpu; 264 + for (i = 0; i < LIMA_VM_NUM_BT; i++) { 265 + if (!vm->bts[i].cpu) 266 + continue; 267 + 268 + pt = vm->bts[i].cpu; 269 + for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) { 270 + int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j; 271 + 272 + printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]); 273 + 274 + for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) { 275 + u32 pte = *pt++; 276 + 277 + if (pte) 278 + printk(KERN_INFO " pt %03x:%08x\n", k, pte); 279 + } 280 + } 281 + } 282 + }
+62
drivers/gpu/drm/lima/lima_vm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_VM_H__ 5 + #define __LIMA_VM_H__ 6 + 7 + #include <drm/drm_mm.h> 8 + #include <linux/kref.h> 9 + 10 + #define LIMA_PAGE_SIZE 4096 11 + #define LIMA_PAGE_MASK (LIMA_PAGE_SIZE - 1) 12 + #define LIMA_PAGE_ENT_NUM (LIMA_PAGE_SIZE / sizeof(u32)) 13 + 14 + #define LIMA_VM_NUM_PT_PER_BT_SHIFT 3 15 + #define LIMA_VM_NUM_PT_PER_BT (1 << LIMA_VM_NUM_PT_PER_BT_SHIFT) 16 + #define LIMA_VM_NUM_BT (LIMA_PAGE_ENT_NUM >> LIMA_VM_NUM_PT_PER_BT_SHIFT) 17 + 18 + #define LIMA_VA_RESERVE_START 0xFFF00000 19 + #define LIMA_VA_RESERVE_DLBU LIMA_VA_RESERVE_START 20 + #define LIMA_VA_RESERVE_END 0x100000000 21 + 22 + struct lima_device; 23 + 24 + struct lima_vm_page { 25 + u32 *cpu; 26 + dma_addr_t dma; 27 + }; 28 + 29 + struct lima_vm { 30 + struct mutex lock; 31 + struct kref refcount; 32 + 33 + struct drm_mm mm; 34 + 35 + struct lima_device *dev; 36 + 37 + struct lima_vm_page pd; 38 + struct lima_vm_page bts[LIMA_VM_NUM_BT]; 39 + }; 40 + 41 + int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create); 42 + void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo); 43 + 44 + u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo); 45 + 46 + struct lima_vm *lima_vm_create(struct lima_device *dev); 47 + void lima_vm_release(struct kref *kref); 48 + 49 + static inline struct lima_vm *lima_vm_get(struct lima_vm *vm) 50 + { 51 + kref_get(&vm->refcount); 52 + return vm; 53 + } 54 + 55 + static inline void lima_vm_put(struct lima_vm *vm) 56 + { 57 + kref_put(&vm->refcount, lima_vm_release); 58 + } 59 + 60 + void lima_vm_print(struct lima_vm *vm); 61 + 62 + #endif
+22
drivers/gpu/drm/panel/Kconfig
··· 38 38 that it can be automatically turned off when the panel goes into a 39 39 low power state. 40 40 41 + config DRM_PANEL_FEIYANG_FY07024DI26A30D 42 + tristate "Feiyang FY07024DI26A30-D MIPI-DSI LCD panel" 43 + depends on OF 44 + depends on DRM_MIPI_DSI 45 + depends on BACKLIGHT_CLASS_DEVICE 46 + help 47 + Say Y if you want to enable support for panels based on the 48 + Feiyang FY07024DI26A30-D MIPI-DSI interface. 49 + 41 50 config DRM_PANEL_ILITEK_IL9322 42 51 tristate "Ilitek ILI9322 320x240 QVGA panels" 43 52 depends on OF && SPI ··· 157 148 help 158 149 Say Y here if you want to enable support for Raydium RM68200 159 150 720x1280 DSI video mode panel. 151 + 152 + config DRM_PANEL_ROCKTECH_JH057N00900 153 + tristate "Rocktech JH057N00900 MIPI touchscreen panel" 154 + depends on OF 155 + depends on DRM_MIPI_DSI 156 + depends on BACKLIGHT_CLASS_DEVICE 157 + help 158 + Say Y here if you want to enable support for Rocktech JH057N00900 159 + MIPI DSI panel as e.g. used in the Librem 5 devkit. It has a 160 + resolution of 720x1440 pixels, a built in backlight and touch 161 + controller. 162 + Touch input support is provided by the goodix driver and needs to be 163 + selected separately. 160 164 161 165 config DRM_PANEL_RONBO_RB070D30 162 166 tristate "Ronbo Electronics RB070D30 panel"
+2
drivers/gpu/drm/panel/Makefile
··· 2 2 obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o 3 3 obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o 4 4 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o 5 + obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o 5 6 obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o 6 7 obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o 7 8 obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o ··· 14 13 obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o 15 14 obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o 16 15 obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o 16 + obj-$(CONFIG_DRM_PANEL_ROCKTECH_JH057N00900) += panel-rocktech-jh057n00900.o 17 17 obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o 18 18 obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o 19 19 obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
+272
drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright (C) 2018 Amarula Solutions 4 + * Author: Jagan Teki <jagan@amarulasolutions.com> 5 + */ 6 + 7 + #include <drm/drm_mipi_dsi.h> 8 + #include <drm/drm_modes.h> 9 + #include <drm/drm_panel.h> 10 + #include <drm/drm_print.h> 11 + 12 + #include <linux/backlight.h> 13 + #include <linux/gpio/consumer.h> 14 + #include <linux/delay.h> 15 + #include <linux/module.h> 16 + #include <linux/of_device.h> 17 + #include <linux/regulator/consumer.h> 18 + 19 + #define FEIYANG_INIT_CMD_LEN 2 20 + 21 + struct feiyang { 22 + struct drm_panel panel; 23 + struct mipi_dsi_device *dsi; 24 + 25 + struct backlight_device *backlight; 26 + struct regulator *dvdd; 27 + struct regulator *avdd; 28 + struct gpio_desc *reset; 29 + }; 30 + 31 + static inline struct feiyang *panel_to_feiyang(struct drm_panel *panel) 32 + { 33 + return container_of(panel, struct feiyang, panel); 34 + } 35 + 36 + struct feiyang_init_cmd { 37 + u8 data[FEIYANG_INIT_CMD_LEN]; 38 + }; 39 + 40 + static const struct feiyang_init_cmd feiyang_init_cmds[] = { 41 + { .data = { 0x80, 0x58 } }, 42 + { .data = { 0x81, 0x47 } }, 43 + { .data = { 0x82, 0xD4 } }, 44 + { .data = { 0x83, 0x88 } }, 45 + { .data = { 0x84, 0xA9 } }, 46 + { .data = { 0x85, 0xC3 } }, 47 + { .data = { 0x86, 0x82 } }, 48 + }; 49 + 50 + static int feiyang_prepare(struct drm_panel *panel) 51 + { 52 + struct feiyang *ctx = panel_to_feiyang(panel); 53 + struct mipi_dsi_device *dsi = ctx->dsi; 54 + unsigned int i; 55 + int ret; 56 + 57 + ret = regulator_enable(ctx->dvdd); 58 + if (ret) 59 + return ret; 60 + 61 + /* T1 (dvdd start + dvdd rise) 0 < T1 <= 10ms */ 62 + msleep(10); 63 + 64 + ret = regulator_enable(ctx->avdd); 65 + if (ret) 66 + return ret; 67 + 68 + /* T3 (dvdd rise + avdd start + avdd rise) T3 >= 20ms */ 69 + msleep(20); 70 + 71 + gpiod_set_value(ctx->reset, 0); 72 + 73 + /* 74 + * T5 + T6 (avdd rise + video & logic signal rise) 75 + * T5 >= 10ms, 0 < T6 <= 10ms 76 + */ 77 + msleep(20); 78 + 79 + gpiod_set_value(ctx->reset, 1); 80 + 81 + /* T12 (video & logic signal rise + backlight rise) T12 >= 200ms */ 82 + msleep(200); 83 + 84 + for (i = 0; i < ARRAY_SIZE(feiyang_init_cmds); i++) { 85 + const struct feiyang_init_cmd *cmd = 86 + &feiyang_init_cmds[i]; 87 + 88 + ret = mipi_dsi_dcs_write_buffer(dsi, cmd->data, 89 + FEIYANG_INIT_CMD_LEN); 90 + if (ret < 0) 91 + return ret; 92 + } 93 + 94 + return 0; 95 + } 96 + 97 + static int feiyang_enable(struct drm_panel *panel) 98 + { 99 + struct feiyang *ctx = panel_to_feiyang(panel); 100 + 101 + /* T12 (video & logic signal rise + backlight rise) T12 >= 200ms */ 102 + msleep(200); 103 + 104 + mipi_dsi_dcs_set_display_on(ctx->dsi); 105 + backlight_enable(ctx->backlight); 106 + 107 + return 0; 108 + } 109 + 110 + static int feiyang_disable(struct drm_panel *panel) 111 + { 112 + struct feiyang *ctx = panel_to_feiyang(panel); 113 + 114 + backlight_disable(ctx->backlight); 115 + return mipi_dsi_dcs_set_display_off(ctx->dsi); 116 + } 117 + 118 + static int feiyang_unprepare(struct drm_panel *panel) 119 + { 120 + struct feiyang *ctx = panel_to_feiyang(panel); 121 + int ret; 122 + 123 + ret = mipi_dsi_dcs_set_display_off(ctx->dsi); 124 + if (ret < 0) 125 + DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n", 126 + ret); 127 + 128 + ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi); 129 + if (ret < 0) 130 + DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n", 131 + ret); 132 + 133 + /* T13 (backlight fall + video & logic signal fall) T13 >= 200ms */ 134 + msleep(200); 135 + 136 + gpiod_set_value(ctx->reset, 0); 137 + 138 + regulator_disable(ctx->avdd); 139 + 140 + /* T11 (dvdd rise to fall) 0 < T11 <= 10ms */ 141 + msleep(10); 142 + 143 + regulator_disable(ctx->dvdd); 144 + 145 + return 0; 146 + } 147 + 148 + static const struct drm_display_mode feiyang_default_mode = { 149 + .clock = 55000, 150 + 151 + .hdisplay = 1024, 152 + .hsync_start = 1024 + 310, 153 + .hsync_end = 1024 + 310 + 20, 154 + .htotal = 1024 + 310 + 20 + 90, 155 + 156 + .vdisplay = 600, 157 + .vsync_start = 600 + 12, 158 + .vsync_end = 600 + 12 + 2, 159 + .vtotal = 600 + 12 + 2 + 21, 160 + .vrefresh = 60, 161 + 162 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 163 + }; 164 + 165 + static int feiyang_get_modes(struct drm_panel *panel) 166 + { 167 + struct drm_connector *connector = panel->connector; 168 + struct feiyang *ctx = panel_to_feiyang(panel); 169 + struct drm_display_mode *mode; 170 + 171 + mode = drm_mode_duplicate(panel->drm, &feiyang_default_mode); 172 + if (!mode) { 173 + DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n", 174 + feiyang_default_mode.hdisplay, 175 + feiyang_default_mode.vdisplay, 176 + feiyang_default_mode.vrefresh); 177 + return -ENOMEM; 178 + } 179 + 180 + drm_mode_set_name(mode); 181 + 182 + drm_mode_probed_add(connector, mode); 183 + 184 + return 1; 185 + } 186 + 187 + static const struct drm_panel_funcs feiyang_funcs = { 188 + .disable = feiyang_disable, 189 + .unprepare = feiyang_unprepare, 190 + .prepare = feiyang_prepare, 191 + .enable = feiyang_enable, 192 + .get_modes = feiyang_get_modes, 193 + }; 194 + 195 + static int feiyang_dsi_probe(struct mipi_dsi_device *dsi) 196 + { 197 + struct feiyang *ctx; 198 + int ret; 199 + 200 + ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL); 201 + if (!ctx) 202 + return -ENOMEM; 203 + 204 + mipi_dsi_set_drvdata(dsi, ctx); 205 + ctx->dsi = dsi; 206 + 207 + drm_panel_init(&ctx->panel); 208 + ctx->panel.dev = &dsi->dev; 209 + ctx->panel.funcs = &feiyang_funcs; 210 + 211 + ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd"); 212 + if (IS_ERR(ctx->dvdd)) { 213 + DRM_DEV_ERROR(&dsi->dev, "Couldn't get dvdd regulator\n"); 214 + return PTR_ERR(ctx->dvdd); 215 + } 216 + 217 + ctx->avdd = devm_regulator_get(&dsi->dev, "avdd"); 218 + if (IS_ERR(ctx->avdd)) { 219 + DRM_DEV_ERROR(&dsi->dev, "Couldn't get avdd regulator\n"); 220 + return PTR_ERR(ctx->avdd); 221 + } 222 + 223 + ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); 224 + if (IS_ERR(ctx->reset)) { 225 + DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n"); 226 + return PTR_ERR(ctx->reset); 227 + } 228 + 229 + ctx->backlight = devm_of_find_backlight(&dsi->dev); 230 + if (IS_ERR(ctx->backlight)) 231 + return PTR_ERR(ctx->backlight); 232 + 233 + ret = drm_panel_add(&ctx->panel); 234 + if (ret < 0) 235 + return ret; 236 + 237 + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST; 238 + dsi->format = MIPI_DSI_FMT_RGB888; 239 + dsi->lanes = 4; 240 + 241 + return mipi_dsi_attach(dsi); 242 + } 243 + 244 + static int feiyang_dsi_remove(struct mipi_dsi_device *dsi) 245 + { 246 + struct feiyang *ctx = mipi_dsi_get_drvdata(dsi); 247 + 248 + mipi_dsi_detach(dsi); 249 + drm_panel_remove(&ctx->panel); 250 + 251 + return 0; 252 + } 253 + 254 + static const struct of_device_id feiyang_of_match[] = { 255 + { .compatible = "feiyang,fy07024di26a30d", }, 256 + { /* sentinel */ } 257 + }; 258 + MODULE_DEVICE_TABLE(of, feiyang_of_match); 259 + 260 + static struct mipi_dsi_driver feiyang_driver = { 261 + .probe = feiyang_dsi_probe, 262 + .remove = feiyang_dsi_remove, 263 + .driver = { 264 + .name = "feiyang-fy07024di26a30d", 265 + .of_match_table = feiyang_of_match, 266 + }, 267 + }; 268 + module_mipi_dsi_driver(feiyang_driver); 269 + 270 + MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>"); 271 + MODULE_DESCRIPTION("Feiyang FY07024DI26A30-D MIPI-DSI LCD panel"); 272 + MODULE_LICENSE("GPL");
+12 -8
drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
··· 67 67 }; 68 68 69 69 static const struct drm_display_mode default_mode = { 70 - .clock = 32729, 70 + .clock = 29700, 71 71 .hdisplay = 480, 72 - .hsync_start = 480 + 120, 73 - .hsync_end = 480 + 120 + 63, 74 - .htotal = 480 + 120 + 63 + 120, 72 + .hsync_start = 480 + 98, 73 + .hsync_end = 480 + 98 + 32, 74 + .htotal = 480 + 98 + 32 + 98, 75 75 .vdisplay = 800, 76 - .vsync_start = 800 + 12, 77 - .vsync_end = 800 + 12 + 12, 78 - .vtotal = 800 + 12 + 12 + 12, 76 + .vsync_start = 800 + 15, 77 + .vsync_end = 800 + 15 + 10, 78 + .vtotal = 800 + 15 + 10 + 14, 79 79 .vrefresh = 50, 80 80 .flags = 0, 81 81 .width_mm = 52, ··· 247 247 248 248 /* Send Command GRAM memory write (no parameters) */ 249 249 dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START); 250 + 251 + /* Wait a short while to let the panel be ready before the 1st frame */ 252 + mdelay(10); 250 253 251 254 return 0; 252 255 } ··· 436 433 ctx->supply = devm_regulator_get(dev, "power"); 437 434 if (IS_ERR(ctx->supply)) { 438 435 ret = PTR_ERR(ctx->supply); 439 - dev_err(dev, "failed to request regulator: %d\n", ret); 436 + if (ret != -EPROBE_DEFER) 437 + dev_err(dev, "failed to request regulator: %d\n", ret); 440 438 return ret; 441 439 } 442 440
+2 -1
drivers/gpu/drm/panel/panel-raydium-rm68200.c
··· 383 383 ctx->supply = devm_regulator_get(dev, "power"); 384 384 if (IS_ERR(ctx->supply)) { 385 385 ret = PTR_ERR(ctx->supply); 386 - dev_err(dev, "cannot get regulator: %d\n", ret); 386 + if (ret != -EPROBE_DEFER) 387 + dev_err(dev, "cannot get regulator: %d\n", ret); 387 388 return ret; 388 389 } 389 390
+386
drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Rockteck jh057n00900 5.5" MIPI-DSI panel driver 4 + * 5 + * Copyright (C) Purism SPC 2019 6 + */ 7 + 8 + #include <drm/drm_mipi_dsi.h> 9 + #include <drm/drm_modes.h> 10 + #include <drm/drm_panel.h> 11 + #include <drm/drm_print.h> 12 + #include <linux/backlight.h> 13 + #include <linux/debugfs.h> 14 + #include <linux/delay.h> 15 + #include <linux/gpio/consumer.h> 16 + #include <linux/media-bus-format.h> 17 + #include <linux/module.h> 18 + #include <video/display_timing.h> 19 + #include <video/mipi_display.h> 20 + 21 + #define DRV_NAME "panel-rocktech-jh057n00900" 22 + 23 + /* Manufacturer specific Commands send via DSI */ 24 + #define ST7703_CMD_ALL_PIXEL_OFF 0x22 25 + #define ST7703_CMD_ALL_PIXEL_ON 0x23 26 + #define ST7703_CMD_SETDISP 0xB2 27 + #define ST7703_CMD_SETRGBIF 0xB3 28 + #define ST7703_CMD_SETCYC 0xB4 29 + #define ST7703_CMD_SETBGP 0xB5 30 + #define ST7703_CMD_SETVCOM 0xB6 31 + #define ST7703_CMD_SETOTP 0xB7 32 + #define ST7703_CMD_SETPOWER_EXT 0xB8 33 + #define ST7703_CMD_SETEXTC 0xB9 34 + #define ST7703_CMD_SETMIPI 0xBA 35 + #define ST7703_CMD_SETVDC 0xBC 36 + #define ST7703_CMD_SETSCR 0xC0 37 + #define ST7703_CMD_SETPOWER 0xC1 38 + #define ST7703_CMD_SETPANEL 0xCC 39 + #define ST7703_CMD_SETGAMMA 0xE0 40 + #define ST7703_CMD_SETEQ 0xE3 41 + #define ST7703_CMD_SETGIP1 0xE9 42 + #define ST7703_CMD_SETGIP2 0xEA 43 + 44 + struct jh057n { 45 + struct device *dev; 46 + struct drm_panel panel; 47 + struct gpio_desc *reset_gpio; 48 + struct backlight_device *backlight; 49 + bool prepared; 50 + 51 + struct dentry *debugfs; 52 + }; 53 + 54 + static inline struct jh057n *panel_to_jh057n(struct drm_panel *panel) 55 + { 56 + return container_of(panel, struct jh057n, panel); 57 + } 58 + 59 + #define dsi_generic_write_seq(dsi, seq...) do { \ 60 + static const u8 d[] = { seq }; \ 61 + int ret; \ 62 + ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \ 63 + if (ret < 0) \ 64 + return ret; \ 65 + } while (0) 66 + 67 + static int jh057n_init_sequence(struct jh057n *ctx) 68 + { 69 + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); 70 + struct device *dev = ctx->dev; 71 + int ret; 72 + 73 + /* 74 + * Init sequence was supplied by the panel vendor. Most of the commands 75 + * resemble the ST7703 but the number of parameters often don't match 76 + * so it's likely a clone. 77 + */ 78 + dsi_generic_write_seq(dsi, ST7703_CMD_SETEXTC, 79 + 0xF1, 0x12, 0x83); 80 + dsi_generic_write_seq(dsi, ST7703_CMD_SETRGBIF, 81 + 0x10, 0x10, 0x05, 0x05, 0x03, 0xFF, 0x00, 0x00, 82 + 0x00, 0x00); 83 + dsi_generic_write_seq(dsi, ST7703_CMD_SETSCR, 84 + 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70, 85 + 0x00); 86 + dsi_generic_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E); 87 + dsi_generic_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B); 88 + dsi_generic_write_seq(dsi, ST7703_CMD_SETCYC, 0x80); 89 + dsi_generic_write_seq(dsi, ST7703_CMD_SETDISP, 0xF0, 0x12, 0x30); 90 + dsi_generic_write_seq(dsi, ST7703_CMD_SETEQ, 91 + 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00, 92 + 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10); 93 + dsi_generic_write_seq(dsi, ST7703_CMD_SETBGP, 0x08, 0x08); 94 + msleep(20); 95 + 96 + dsi_generic_write_seq(dsi, ST7703_CMD_SETVCOM, 0x3F, 0x3F); 97 + dsi_generic_write_seq(dsi, 0xBF, 0x02, 0x11, 0x00); 98 + dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP1, 99 + 0x82, 0x10, 0x06, 0x05, 0x9E, 0x0A, 0xA5, 0x12, 100 + 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38, 101 + 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00, 102 + 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88, 103 + 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64, 104 + 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 105 + 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 106 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); 107 + dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP2, 108 + 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 109 + 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88, 110 + 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13, 111 + 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 112 + 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00, 113 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 114 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0A, 115 + 0xA5, 0x00, 0x00, 0x00, 0x00); 116 + dsi_generic_write_seq(dsi, ST7703_CMD_SETGAMMA, 117 + 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41, 0x37, 118 + 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10, 0x11, 119 + 0x18, 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41, 120 + 0x37, 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10, 121 + 0x11, 0x18); 122 + msleep(20); 123 + 124 + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); 125 + if (ret < 0) { 126 + DRM_DEV_ERROR(dev, "Failed to exit sleep mode"); 127 + return ret; 128 + } 129 + /* Panel is operational 120 msec after reset */ 130 + msleep(60); 131 + ret = mipi_dsi_dcs_set_display_on(dsi); 132 + if (ret) 133 + return ret; 134 + 135 + DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done"); 136 + return 0; 137 + } 138 + 139 + static int jh057n_enable(struct drm_panel *panel) 140 + { 141 + struct jh057n *ctx = panel_to_jh057n(panel); 142 + 143 + return backlight_enable(ctx->backlight); 144 + } 145 + 146 + static int jh057n_disable(struct drm_panel *panel) 147 + { 148 + struct jh057n *ctx = panel_to_jh057n(panel); 149 + 150 + return backlight_disable(ctx->backlight); 151 + } 152 + 153 + static int jh057n_unprepare(struct drm_panel *panel) 154 + { 155 + struct jh057n *ctx = panel_to_jh057n(panel); 156 + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); 157 + 158 + if (!ctx->prepared) 159 + return 0; 160 + 161 + mipi_dsi_dcs_set_display_off(dsi); 162 + ctx->prepared = false; 163 + 164 + return 0; 165 + } 166 + 167 + static int jh057n_prepare(struct drm_panel *panel) 168 + { 169 + struct jh057n *ctx = panel_to_jh057n(panel); 170 + int ret; 171 + 172 + if (ctx->prepared) 173 + return 0; 174 + 175 + DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel."); 176 + gpiod_set_value_cansleep(ctx->reset_gpio, 1); 177 + usleep_range(20, 40); 178 + gpiod_set_value_cansleep(ctx->reset_gpio, 0); 179 + msleep(20); 180 + 181 + ret = jh057n_init_sequence(ctx); 182 + if (ret < 0) { 183 + DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d", ret); 184 + return ret; 185 + } 186 + 187 + ctx->prepared = true; 188 + 189 + return 0; 190 + } 191 + 192 + static const struct drm_display_mode default_mode = { 193 + .hdisplay = 720, 194 + .hsync_start = 720 + 90, 195 + .hsync_end = 720 + 90 + 20, 196 + .htotal = 720 + 90 + 20 + 20, 197 + .vdisplay = 1440, 198 + .vsync_start = 1440 + 20, 199 + .vsync_end = 1440 + 20 + 4, 200 + .vtotal = 1440 + 20 + 4 + 12, 201 + .vrefresh = 60, 202 + .clock = 75276, 203 + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 204 + .width_mm = 65, 205 + .height_mm = 130, 206 + }; 207 + 208 + static int jh057n_get_modes(struct drm_panel *panel) 209 + { 210 + struct jh057n *ctx = panel_to_jh057n(panel); 211 + struct drm_display_mode *mode; 212 + 213 + mode = drm_mode_duplicate(panel->drm, &default_mode); 214 + if (!mode) { 215 + DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u", 216 + default_mode.hdisplay, default_mode.vdisplay, 217 + default_mode.vrefresh); 218 + return -ENOMEM; 219 + } 220 + 221 + drm_mode_set_name(mode); 222 + 223 + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; 224 + panel->connector->display_info.width_mm = mode->width_mm; 225 + panel->connector->display_info.height_mm = mode->height_mm; 226 + drm_mode_probed_add(panel->connector, mode); 227 + 228 + return 1; 229 + } 230 + 231 + static const struct drm_panel_funcs jh057n_drm_funcs = { 232 + .disable = jh057n_disable, 233 + .unprepare = jh057n_unprepare, 234 + .prepare = jh057n_prepare, 235 + .enable = jh057n_enable, 236 + .get_modes = jh057n_get_modes, 237 + }; 238 + 239 + static int allpixelson_set(void *data, u64 val) 240 + { 241 + struct jh057n *ctx = data; 242 + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); 243 + 244 + DRM_DEV_DEBUG_DRIVER(ctx->dev, "Setting all pixels on"); 245 + dsi_generic_write_seq(dsi, ST7703_CMD_ALL_PIXEL_ON); 246 + msleep(val * 1000); 247 + /* Reset the panel to get video back */ 248 + drm_panel_disable(&ctx->panel); 249 + drm_panel_unprepare(&ctx->panel); 250 + drm_panel_prepare(&ctx->panel); 251 + drm_panel_enable(&ctx->panel); 252 + 253 + return 0; 254 + } 255 + 256 + DEFINE_SIMPLE_ATTRIBUTE(allpixelson_fops, NULL, 257 + allpixelson_set, "%llu\n"); 258 + 259 + static int jh057n_debugfs_init(struct jh057n *ctx) 260 + { 261 + struct dentry *f; 262 + 263 + ctx->debugfs = debugfs_create_dir(DRV_NAME, NULL); 264 + if (!ctx->debugfs) 265 + return -ENOMEM; 266 + 267 + f = debugfs_create_file("allpixelson", 0600, 268 + ctx->debugfs, ctx, &allpixelson_fops); 269 + if (!f) 270 + return -ENOMEM; 271 + 272 + return 0; 273 + } 274 + 275 + static void jh057n_debugfs_remove(struct jh057n *ctx) 276 + { 277 + debugfs_remove_recursive(ctx->debugfs); 278 + ctx->debugfs = NULL; 279 + } 280 + 281 + static int jh057n_probe(struct mipi_dsi_device *dsi) 282 + { 283 + struct device *dev = &dsi->dev; 284 + struct jh057n *ctx; 285 + int ret; 286 + 287 + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 288 + if (!ctx) 289 + return -ENOMEM; 290 + 291 + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); 292 + if (IS_ERR(ctx->reset_gpio)) { 293 + DRM_DEV_ERROR(dev, "cannot get reset gpio"); 294 + return PTR_ERR(ctx->reset_gpio); 295 + } 296 + 297 + mipi_dsi_set_drvdata(dsi, ctx); 298 + 299 + ctx->dev = dev; 300 + 301 + dsi->lanes = 4; 302 + dsi->format = MIPI_DSI_FMT_RGB888; 303 + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | 304 + MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_SYNC_PULSE; 305 + 306 + ctx->backlight = devm_of_find_backlight(dev); 307 + if (IS_ERR(ctx->backlight)) 308 + return PTR_ERR(ctx->backlight); 309 + 310 + drm_panel_init(&ctx->panel); 311 + ctx->panel.dev = dev; 312 + ctx->panel.funcs = &jh057n_drm_funcs; 313 + 314 + drm_panel_add(&ctx->panel); 315 + 316 + ret = mipi_dsi_attach(dsi); 317 + if (ret < 0) { 318 + DRM_DEV_ERROR(dev, "mipi_dsi_attach failed. Is host ready?"); 319 + drm_panel_remove(&ctx->panel); 320 + return ret; 321 + } 322 + 323 + DRM_DEV_INFO(dev, "%ux%u@%u %ubpp dsi %udl - ready", 324 + default_mode.hdisplay, default_mode.vdisplay, 325 + default_mode.vrefresh, 326 + mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes); 327 + 328 + jh057n_debugfs_init(ctx); 329 + return 0; 330 + } 331 + 332 + static void jh057n_shutdown(struct mipi_dsi_device *dsi) 333 + { 334 + struct jh057n *ctx = mipi_dsi_get_drvdata(dsi); 335 + int ret; 336 + 337 + ret = jh057n_unprepare(&ctx->panel); 338 + if (ret < 0) 339 + DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n", 340 + ret); 341 + 342 + ret = jh057n_disable(&ctx->panel); 343 + if (ret < 0) 344 + DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n", 345 + ret); 346 + } 347 + 348 + static int jh057n_remove(struct mipi_dsi_device *dsi) 349 + { 350 + struct jh057n *ctx = mipi_dsi_get_drvdata(dsi); 351 + int ret; 352 + 353 + jh057n_shutdown(dsi); 354 + 355 + ret = mipi_dsi_detach(dsi); 356 + if (ret < 0) 357 + DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n", 358 + ret); 359 + 360 + drm_panel_remove(&ctx->panel); 361 + 362 + jh057n_debugfs_remove(ctx); 363 + 364 + return 0; 365 + } 366 + 367 + static const struct of_device_id jh057n_of_match[] = { 368 + { .compatible = "rocktech,jh057n00900" }, 369 + { /* sentinel */ } 370 + }; 371 + MODULE_DEVICE_TABLE(of, jh057n_of_match); 372 + 373 + static struct mipi_dsi_driver jh057n_driver = { 374 + .probe = jh057n_probe, 375 + .remove = jh057n_remove, 376 + .shutdown = jh057n_shutdown, 377 + .driver = { 378 + .name = DRV_NAME, 379 + .of_match_table = jh057n_of_match, 380 + }, 381 + }; 382 + module_mipi_dsi_driver(jh057n_driver); 383 + 384 + MODULE_AUTHOR("Guido Günther <agx@sigxcpu.org>"); 385 + MODULE_DESCRIPTION("DRM driver for Rocktech JH057N00900 MIPI DSI panel"); 386 + MODULE_LICENSE("GPL v2");
+8
drivers/gpu/drm/rockchip/Kconfig
··· 77 77 Some Rockchip CRTCs, like rv1108, can directly output parallel 78 78 and serial RGB format to panel or connect to a conversion chip. 79 79 say Y to enable its driver. 80 + 81 + config ROCKCHIP_RK3066_HDMI 82 + bool "Rockchip specific extensions for RK3066 HDMI" 83 + depends on DRM_ROCKCHIP 84 + help 85 + This selects support for Rockchip SoC specific extensions 86 + for the RK3066 HDMI driver. If you want to enable 87 + HDMI on RK3066 based SoC, you should select this option. 80 88 endif
+1
drivers/gpu/drm/rockchip/Makefile
··· 15 15 rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o 16 16 rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o 17 17 rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o 18 + rockchipdrm-$(CONFIG_ROCKCHIP_RK3066_HDMI) += rk3066_hdmi.o 18 19 19 20 obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o
+876
drivers/gpu/drm/rockchip/rk3066_hdmi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 4 + * Zheng Yang <zhengyang@rock-chips.com> 5 + */ 6 + 7 + #include <drm/drm_of.h> 8 + #include <drm/drm_probe_helper.h> 9 + 10 + #include <linux/clk.h> 11 + #include <linux/mfd/syscon.h> 12 + #include <linux/platform_device.h> 13 + #include <linux/regmap.h> 14 + 15 + #include "rk3066_hdmi.h" 16 + 17 + #include "rockchip_drm_drv.h" 18 + #include "rockchip_drm_vop.h" 19 + 20 + #define DEFAULT_PLLA_RATE 30000000 21 + 22 + struct hdmi_data_info { 23 + int vic; /* The CEA Video ID (VIC) of the current drm display mode. */ 24 + bool sink_is_hdmi; 25 + unsigned int enc_out_format; 26 + unsigned int colorimetry; 27 + }; 28 + 29 + struct rk3066_hdmi_i2c { 30 + struct i2c_adapter adap; 31 + 32 + u8 ddc_addr; 33 + u8 segment_addr; 34 + u8 stat; 35 + 36 + struct mutex i2c_lock; /* For i2c operation. */ 37 + struct completion cmpltn; 38 + }; 39 + 40 + struct rk3066_hdmi { 41 + struct device *dev; 42 + struct drm_device *drm_dev; 43 + struct regmap *grf_regmap; 44 + int irq; 45 + struct clk *hclk; 46 + void __iomem *regs; 47 + 48 + struct drm_connector connector; 49 + struct drm_encoder encoder; 50 + 51 + struct rk3066_hdmi_i2c *i2c; 52 + struct i2c_adapter *ddc; 53 + 54 + unsigned int tmdsclk; 55 + 56 + struct hdmi_data_info hdmi_data; 57 + struct drm_display_mode previous_mode; 58 + }; 59 + 60 + #define to_rk3066_hdmi(x) container_of(x, struct rk3066_hdmi, x) 61 + 62 + static inline u8 hdmi_readb(struct rk3066_hdmi *hdmi, u16 offset) 63 + { 64 + return readl_relaxed(hdmi->regs + offset); 65 + } 66 + 67 + static inline void hdmi_writeb(struct rk3066_hdmi *hdmi, u16 offset, u32 val) 68 + { 69 + writel_relaxed(val, hdmi->regs + offset); 70 + } 71 + 72 + static inline void hdmi_modb(struct rk3066_hdmi *hdmi, u16 offset, 73 + u32 msk, u32 val) 74 + { 75 + u8 temp = hdmi_readb(hdmi, offset) & ~msk; 76 + 77 + temp |= val & msk; 78 + hdmi_writeb(hdmi, offset, temp); 79 + } 80 + 81 + static void rk3066_hdmi_i2c_init(struct rk3066_hdmi *hdmi) 82 + { 83 + int ddc_bus_freq; 84 + 85 + ddc_bus_freq = (hdmi->tmdsclk >> 2) / HDMI_SCL_RATE; 86 + 87 + hdmi_writeb(hdmi, HDMI_DDC_BUS_FREQ_L, ddc_bus_freq & 0xFF); 88 + hdmi_writeb(hdmi, HDMI_DDC_BUS_FREQ_H, (ddc_bus_freq >> 8) & 0xFF); 89 + 90 + /* Clear the EDID interrupt flag and mute the interrupt. */ 91 + hdmi_modb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_EDID_MASK, 0); 92 + hdmi_writeb(hdmi, HDMI_INTR_STATUS1, HDMI_INTR_EDID_MASK); 93 + } 94 + 95 + static inline u8 rk3066_hdmi_get_power_mode(struct rk3066_hdmi *hdmi) 96 + { 97 + return hdmi_readb(hdmi, HDMI_SYS_CTRL) & HDMI_SYS_POWER_MODE_MASK; 98 + } 99 + 100 + static void rk3066_hdmi_set_power_mode(struct rk3066_hdmi *hdmi, int mode) 101 + { 102 + u8 current_mode, next_mode; 103 + u8 i = 0; 104 + 105 + current_mode = rk3066_hdmi_get_power_mode(hdmi); 106 + 107 + DRM_DEV_DEBUG(hdmi->dev, "mode :%d\n", mode); 108 + DRM_DEV_DEBUG(hdmi->dev, "current_mode :%d\n", current_mode); 109 + 110 + if (current_mode == mode) 111 + return; 112 + 113 + do { 114 + if (current_mode > mode) { 115 + next_mode = current_mode / 2; 116 + } else { 117 + if (current_mode < HDMI_SYS_POWER_MODE_A) 118 + next_mode = HDMI_SYS_POWER_MODE_A; 119 + else 120 + next_mode = current_mode * 2; 121 + } 122 + 123 + DRM_DEV_DEBUG(hdmi->dev, "%d: next_mode :%d\n", i, next_mode); 124 + 125 + if (next_mode != HDMI_SYS_POWER_MODE_D) { 126 + hdmi_modb(hdmi, HDMI_SYS_CTRL, 127 + HDMI_SYS_POWER_MODE_MASK, next_mode); 128 + } else { 129 + hdmi_writeb(hdmi, HDMI_SYS_CTRL, 130 + HDMI_SYS_POWER_MODE_D | 131 + HDMI_SYS_PLL_RESET_MASK); 132 + usleep_range(90, 100); 133 + hdmi_writeb(hdmi, HDMI_SYS_CTRL, 134 + HDMI_SYS_POWER_MODE_D | 135 + HDMI_SYS_PLLB_RESET); 136 + usleep_range(90, 100); 137 + hdmi_writeb(hdmi, HDMI_SYS_CTRL, 138 + HDMI_SYS_POWER_MODE_D); 139 + } 140 + current_mode = next_mode; 141 + i = i + 1; 142 + } while ((next_mode != mode) && (i < 5)); 143 + 144 + /* 145 + * When the IP controller isn't configured with accurate video timing, 146 + * DDC_CLK should be equal to the PLLA frequency, which is 30MHz, 147 + * so we need to init the TMDS rate to the PCLK rate and reconfigure 148 + * the DDC clock. 149 + */ 150 + if (mode < HDMI_SYS_POWER_MODE_D) 151 + hdmi->tmdsclk = DEFAULT_PLLA_RATE; 152 + } 153 + 154 + static int 155 + rk3066_hdmi_upload_frame(struct rk3066_hdmi *hdmi, int setup_rc, 156 + union hdmi_infoframe *frame, u32 frame_index, 157 + u32 mask, u32 disable, u32 enable) 158 + { 159 + if (mask) 160 + hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, disable); 161 + 162 + hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, frame_index); 163 + 164 + if (setup_rc >= 0) { 165 + u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE]; 166 + ssize_t rc, i; 167 + 168 + rc = hdmi_infoframe_pack(frame, packed_frame, 169 + sizeof(packed_frame)); 170 + if (rc < 0) 171 + return rc; 172 + 173 + for (i = 0; i < rc; i++) 174 + hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4, 175 + packed_frame[i]); 176 + 177 + if (mask) 178 + hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, enable); 179 + } 180 + 181 + return setup_rc; 182 + } 183 + 184 + static int rk3066_hdmi_config_avi(struct rk3066_hdmi *hdmi, 185 + struct drm_display_mode *mode) 186 + { 187 + union hdmi_infoframe frame; 188 + int rc; 189 + 190 + rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 191 + &hdmi->connector, mode); 192 + 193 + if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444) 194 + frame.avi.colorspace = HDMI_COLORSPACE_YUV444; 195 + else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422) 196 + frame.avi.colorspace = HDMI_COLORSPACE_YUV422; 197 + else 198 + frame.avi.colorspace = HDMI_COLORSPACE_RGB; 199 + 200 + frame.avi.colorimetry = hdmi->hdmi_data.colorimetry; 201 + frame.avi.scan_mode = HDMI_SCAN_MODE_NONE; 202 + 203 + return rk3066_hdmi_upload_frame(hdmi, rc, &frame, 204 + HDMI_INFOFRAME_AVI, 0, 0, 0); 205 + } 206 + 207 + static int rk3066_hdmi_config_video_timing(struct rk3066_hdmi *hdmi, 208 + struct drm_display_mode *mode) 209 + { 210 + int value, vsync_offset; 211 + 212 + /* Set the details for the external polarity and interlace mode. */ 213 + value = HDMI_EXT_VIDEO_SET_EN; 214 + value |= mode->flags & DRM_MODE_FLAG_PHSYNC ? 215 + HDMI_VIDEO_HSYNC_ACTIVE_HIGH : HDMI_VIDEO_HSYNC_ACTIVE_LOW; 216 + value |= mode->flags & DRM_MODE_FLAG_PVSYNC ? 217 + HDMI_VIDEO_VSYNC_ACTIVE_HIGH : HDMI_VIDEO_VSYNC_ACTIVE_LOW; 218 + value |= mode->flags & DRM_MODE_FLAG_INTERLACE ? 219 + HDMI_VIDEO_MODE_INTERLACE : HDMI_VIDEO_MODE_PROGRESSIVE; 220 + 221 + if (hdmi->hdmi_data.vic == 2 || hdmi->hdmi_data.vic == 3) 222 + vsync_offset = 6; 223 + else 224 + vsync_offset = 0; 225 + 226 + value |= vsync_offset << HDMI_VIDEO_VSYNC_OFFSET_SHIFT; 227 + hdmi_writeb(hdmi, HDMI_EXT_VIDEO_PARA, value); 228 + 229 + /* Set the details for the external video timing. */ 230 + value = mode->htotal; 231 + hdmi_writeb(hdmi, HDMI_EXT_HTOTAL_L, value & 0xFF); 232 + hdmi_writeb(hdmi, HDMI_EXT_HTOTAL_H, (value >> 8) & 0xFF); 233 + 234 + value = mode->htotal - mode->hdisplay; 235 + hdmi_writeb(hdmi, HDMI_EXT_HBLANK_L, value & 0xFF); 236 + hdmi_writeb(hdmi, HDMI_EXT_HBLANK_H, (value >> 8) & 0xFF); 237 + 238 + value = mode->htotal - mode->hsync_start; 239 + hdmi_writeb(hdmi, HDMI_EXT_HDELAY_L, value & 0xFF); 240 + hdmi_writeb(hdmi, HDMI_EXT_HDELAY_H, (value >> 8) & 0xFF); 241 + 242 + value = mode->hsync_end - mode->hsync_start; 243 + hdmi_writeb(hdmi, HDMI_EXT_HDURATION_L, value & 0xFF); 244 + hdmi_writeb(hdmi, HDMI_EXT_HDURATION_H, (value >> 8) & 0xFF); 245 + 246 + value = mode->vtotal; 247 + hdmi_writeb(hdmi, HDMI_EXT_VTOTAL_L, value & 0xFF); 248 + hdmi_writeb(hdmi, HDMI_EXT_VTOTAL_H, (value >> 8) & 0xFF); 249 + 250 + value = mode->vtotal - mode->vdisplay; 251 + hdmi_writeb(hdmi, HDMI_EXT_VBLANK_L, value & 0xFF); 252 + 253 + value = mode->vtotal - mode->vsync_start + vsync_offset; 254 + hdmi_writeb(hdmi, HDMI_EXT_VDELAY, value & 0xFF); 255 + 256 + value = mode->vsync_end - mode->vsync_start; 257 + hdmi_writeb(hdmi, HDMI_EXT_VDURATION, value & 0xFF); 258 + 259 + return 0; 260 + } 261 + 262 + static void 263 + rk3066_hdmi_phy_write(struct rk3066_hdmi *hdmi, u16 offset, u8 value) 264 + { 265 + hdmi_writeb(hdmi, offset, value); 266 + hdmi_modb(hdmi, HDMI_SYS_CTRL, 267 + HDMI_SYS_PLL_RESET_MASK, HDMI_SYS_PLL_RESET); 268 + usleep_range(90, 100); 269 + hdmi_modb(hdmi, HDMI_SYS_CTRL, HDMI_SYS_PLL_RESET_MASK, 0); 270 + usleep_range(900, 1000); 271 + } 272 + 273 + static void rk3066_hdmi_config_phy(struct rk3066_hdmi *hdmi) 274 + { 275 + /* TMDS uses the same frequency as dclk. */ 276 + hdmi_writeb(hdmi, HDMI_DEEP_COLOR_MODE, 0x22); 277 + 278 + /* 279 + * The semi-public documentation does not describe the hdmi registers 280 + * used by the function rk3066_hdmi_phy_write(), so we keep using 281 + * these magic values for now. 282 + */ 283 + if (hdmi->tmdsclk > 100000000) { 284 + rk3066_hdmi_phy_write(hdmi, 0x158, 0x0E); 285 + rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00); 286 + rk3066_hdmi_phy_write(hdmi, 0x160, 0x60); 287 + rk3066_hdmi_phy_write(hdmi, 0x164, 0x00); 288 + rk3066_hdmi_phy_write(hdmi, 0x168, 0xDA); 289 + rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA1); 290 + rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e); 291 + rk3066_hdmi_phy_write(hdmi, 0x174, 0x22); 292 + rk3066_hdmi_phy_write(hdmi, 0x178, 0x00); 293 + } else if (hdmi->tmdsclk > 50000000) { 294 + rk3066_hdmi_phy_write(hdmi, 0x158, 0x06); 295 + rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00); 296 + rk3066_hdmi_phy_write(hdmi, 0x160, 0x60); 297 + rk3066_hdmi_phy_write(hdmi, 0x164, 0x00); 298 + rk3066_hdmi_phy_write(hdmi, 0x168, 0xCA); 299 + rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA3); 300 + rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e); 301 + rk3066_hdmi_phy_write(hdmi, 0x174, 0x20); 302 + rk3066_hdmi_phy_write(hdmi, 0x178, 0x00); 303 + } else { 304 + rk3066_hdmi_phy_write(hdmi, 0x158, 0x02); 305 + rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00); 306 + rk3066_hdmi_phy_write(hdmi, 0x160, 0x60); 307 + rk3066_hdmi_phy_write(hdmi, 0x164, 0x00); 308 + rk3066_hdmi_phy_write(hdmi, 0x168, 0xC2); 309 + rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA2); 310 + rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e); 311 + rk3066_hdmi_phy_write(hdmi, 0x174, 0x20); 312 + rk3066_hdmi_phy_write(hdmi, 0x178, 0x00); 313 + } 314 + } 315 + 316 + static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi, 317 + struct drm_display_mode *mode) 318 + { 319 + hdmi->hdmi_data.vic = drm_match_cea_mode(mode); 320 + hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB; 321 + 322 + if (hdmi->hdmi_data.vic == 6 || hdmi->hdmi_data.vic == 7 || 323 + hdmi->hdmi_data.vic == 21 || hdmi->hdmi_data.vic == 22 || 324 + hdmi->hdmi_data.vic == 2 || hdmi->hdmi_data.vic == 3 || 325 + hdmi->hdmi_data.vic == 17 || hdmi->hdmi_data.vic == 18) 326 + hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601; 327 + else 328 + hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709; 329 + 330 + hdmi->tmdsclk = mode->clock * 1000; 331 + 332 + /* Mute video and audio output. */ 333 + hdmi_modb(hdmi, HDMI_VIDEO_CTRL2, HDMI_VIDEO_AUDIO_DISABLE_MASK, 334 + HDMI_AUDIO_DISABLE | HDMI_VIDEO_DISABLE); 335 + 336 + /* Set power state to mode B. */ 337 + if (rk3066_hdmi_get_power_mode(hdmi) != HDMI_SYS_POWER_MODE_B) 338 + rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B); 339 + 340 + /* Input video mode is RGB 24 bit. Use external data enable signal. */ 341 + hdmi_modb(hdmi, HDMI_AV_CTRL1, 342 + HDMI_VIDEO_DE_MASK, HDMI_VIDEO_EXTERNAL_DE); 343 + hdmi_writeb(hdmi, HDMI_VIDEO_CTRL1, 344 + HDMI_VIDEO_OUTPUT_RGB444 | 345 + HDMI_VIDEO_INPUT_DATA_DEPTH_8BIT | 346 + HDMI_VIDEO_INPUT_COLOR_RGB); 347 + hdmi_writeb(hdmi, HDMI_DEEP_COLOR_MODE, 0x20); 348 + 349 + rk3066_hdmi_config_video_timing(hdmi, mode); 350 + 351 + if (hdmi->hdmi_data.sink_is_hdmi) { 352 + hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK, 353 + HDMI_VIDEO_MODE_HDMI); 354 + rk3066_hdmi_config_avi(hdmi, mode); 355 + } else { 356 + hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK, 0); 357 + } 358 + 359 + rk3066_hdmi_config_phy(hdmi); 360 + 361 + rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_E); 362 + 363 + /* 364 + * When the IP controller is configured with accurate video 365 + * timing, the TMDS clock source should be switched to 366 + * DCLK_LCDC, so we need to init the TMDS rate to the pixel mode 367 + * clock rate and reconfigure the DDC clock. 368 + */ 369 + rk3066_hdmi_i2c_init(hdmi); 370 + 371 + /* Unmute video output. */ 372 + hdmi_modb(hdmi, HDMI_VIDEO_CTRL2, 373 + HDMI_VIDEO_AUDIO_DISABLE_MASK, HDMI_AUDIO_DISABLE); 374 + return 0; 375 + } 376 + 377 + static void 378 + rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder, 379 + struct drm_display_mode *mode, 380 + struct drm_display_mode *adj_mode) 381 + { 382 + struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder); 383 + 384 + /* Store the display mode for plugin/DPMS poweron events. */ 385 + memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode)); 386 + } 387 + 388 + static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder) 389 + { 390 + struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder); 391 + int mux, val; 392 + 393 + mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder); 394 + if (mux) 395 + val = (HDMI_VIDEO_SEL << 16) | HDMI_VIDEO_SEL; 396 + else 397 + val = HDMI_VIDEO_SEL << 16; 398 + 399 + regmap_write(hdmi->grf_regmap, GRF_SOC_CON0, val); 400 + 401 + DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder enable select: vop%s\n", 402 + (mux) ? "1" : "0"); 403 + 404 + rk3066_hdmi_setup(hdmi, &hdmi->previous_mode); 405 + } 406 + 407 + static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder) 408 + { 409 + struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder); 410 + 411 + DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder disable\n"); 412 + 413 + if (rk3066_hdmi_get_power_mode(hdmi) == HDMI_SYS_POWER_MODE_E) { 414 + hdmi_writeb(hdmi, HDMI_VIDEO_CTRL2, 415 + HDMI_VIDEO_AUDIO_DISABLE_MASK); 416 + hdmi_modb(hdmi, HDMI_VIDEO_CTRL2, 417 + HDMI_AUDIO_CP_LOGIC_RESET_MASK, 418 + HDMI_AUDIO_CP_LOGIC_RESET); 419 + usleep_range(500, 510); 420 + } 421 + rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_A); 422 + } 423 + 424 + static bool 425 + rk3066_hdmi_encoder_mode_fixup(struct drm_encoder *encoder, 426 + const struct drm_display_mode *mode, 427 + struct drm_display_mode *adj_mode) 428 + { 429 + return true; 430 + } 431 + 432 + static int 433 + rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder, 434 + struct drm_crtc_state *crtc_state, 435 + struct drm_connector_state *conn_state) 436 + { 437 + struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 438 + 439 + s->output_mode = ROCKCHIP_OUT_MODE_P888; 440 + s->output_type = DRM_MODE_CONNECTOR_HDMIA; 441 + 442 + return 0; 443 + } 444 + 445 + static const 446 + struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = { 447 + .enable = rk3066_hdmi_encoder_enable, 448 + .disable = rk3066_hdmi_encoder_disable, 449 + .mode_fixup = rk3066_hdmi_encoder_mode_fixup, 450 + .mode_set = rk3066_hdmi_encoder_mode_set, 451 + .atomic_check = rk3066_hdmi_encoder_atomic_check, 452 + }; 453 + 454 + static const struct drm_encoder_funcs rk3066_hdmi_encoder_funcs = { 455 + .destroy = drm_encoder_cleanup, 456 + }; 457 + 458 + static enum drm_connector_status 459 + rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force) 460 + { 461 + struct rk3066_hdmi *hdmi = to_rk3066_hdmi(connector); 462 + 463 + return (hdmi_readb(hdmi, HDMI_HPG_MENS_STA) & HDMI_HPG_IN_STATUS_HIGH) ? 464 + connector_status_connected : connector_status_disconnected; 465 + } 466 + 467 + static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector) 468 + { 469 + struct rk3066_hdmi *hdmi = to_rk3066_hdmi(connector); 470 + struct edid *edid; 471 + int ret = 0; 472 + 473 + if (!hdmi->ddc) 474 + return 0; 475 + 476 + edid = drm_get_edid(connector, hdmi->ddc); 477 + if (edid) { 478 + hdmi->hdmi_data.sink_is_hdmi = drm_detect_hdmi_monitor(edid); 479 + drm_connector_update_edid_property(connector, edid); 480 + ret = drm_add_edid_modes(connector, edid); 481 + kfree(edid); 482 + } 483 + 484 + return ret; 485 + } 486 + 487 + static enum drm_mode_status 488 + rk3066_hdmi_connector_mode_valid(struct drm_connector *connector, 489 + struct drm_display_mode *mode) 490 + { 491 + u32 vic = drm_match_cea_mode(mode); 492 + 493 + if (vic > 1) 494 + return MODE_OK; 495 + else 496 + return MODE_BAD; 497 + } 498 + 499 + static struct drm_encoder * 500 + rk3066_hdmi_connector_best_encoder(struct drm_connector *connector) 501 + { 502 + struct rk3066_hdmi *hdmi = to_rk3066_hdmi(connector); 503 + 504 + return &hdmi->encoder; 505 + } 506 + 507 + static int 508 + rk3066_hdmi_probe_single_connector_modes(struct drm_connector *connector, 509 + uint32_t maxX, uint32_t maxY) 510 + { 511 + if (maxX > 1920) 512 + maxX = 1920; 513 + if (maxY > 1080) 514 + maxY = 1080; 515 + 516 + return drm_helper_probe_single_connector_modes(connector, maxX, maxY); 517 + } 518 + 519 + static void rk3066_hdmi_connector_destroy(struct drm_connector *connector) 520 + { 521 + drm_connector_unregister(connector); 522 + drm_connector_cleanup(connector); 523 + } 524 + 525 + static const struct drm_connector_funcs rk3066_hdmi_connector_funcs = { 526 + .fill_modes = rk3066_hdmi_probe_single_connector_modes, 527 + .detect = rk3066_hdmi_connector_detect, 528 + .destroy = rk3066_hdmi_connector_destroy, 529 + .reset = drm_atomic_helper_connector_reset, 530 + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 531 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 532 + }; 533 + 534 + static const 535 + struct drm_connector_helper_funcs rk3066_hdmi_connector_helper_funcs = { 536 + .get_modes = rk3066_hdmi_connector_get_modes, 537 + .mode_valid = rk3066_hdmi_connector_mode_valid, 538 + .best_encoder = rk3066_hdmi_connector_best_encoder, 539 + }; 540 + 541 + static int 542 + rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi) 543 + { 544 + struct drm_encoder *encoder = &hdmi->encoder; 545 + struct device *dev = hdmi->dev; 546 + 547 + encoder->possible_crtcs = 548 + drm_of_find_possible_crtcs(drm, dev->of_node); 549 + 550 + /* 551 + * If we failed to find the CRTC(s) which this encoder is 552 + * supposed to be connected to, it's because the CRTC has 553 + * not been registered yet. Defer probing, and hope that 554 + * the required CRTC is added later. 555 + */ 556 + if (encoder->possible_crtcs == 0) 557 + return -EPROBE_DEFER; 558 + 559 + drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs); 560 + drm_encoder_init(drm, encoder, &rk3066_hdmi_encoder_funcs, 561 + DRM_MODE_ENCODER_TMDS, NULL); 562 + 563 + hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD; 564 + 565 + drm_connector_helper_add(&hdmi->connector, 566 + &rk3066_hdmi_connector_helper_funcs); 567 + drm_connector_init(drm, &hdmi->connector, 568 + &rk3066_hdmi_connector_funcs, 569 + DRM_MODE_CONNECTOR_HDMIA); 570 + 571 + drm_connector_attach_encoder(&hdmi->connector, encoder); 572 + 573 + return 0; 574 + } 575 + 576 + static irqreturn_t rk3066_hdmi_hardirq(int irq, void *dev_id) 577 + { 578 + struct rk3066_hdmi *hdmi = dev_id; 579 + irqreturn_t ret = IRQ_NONE; 580 + u8 interrupt; 581 + 582 + if (rk3066_hdmi_get_power_mode(hdmi) == HDMI_SYS_POWER_MODE_A) 583 + hdmi_writeb(hdmi, HDMI_SYS_CTRL, HDMI_SYS_POWER_MODE_B); 584 + 585 + interrupt = hdmi_readb(hdmi, HDMI_INTR_STATUS1); 586 + if (interrupt) 587 + hdmi_writeb(hdmi, HDMI_INTR_STATUS1, interrupt); 588 + 589 + if (interrupt & HDMI_INTR_EDID_MASK) { 590 + hdmi->i2c->stat = interrupt; 591 + complete(&hdmi->i2c->cmpltn); 592 + } 593 + 594 + if (interrupt & (HDMI_INTR_HOTPLUG | HDMI_INTR_MSENS)) 595 + ret = IRQ_WAKE_THREAD; 596 + 597 + return ret; 598 + } 599 + 600 + static irqreturn_t rk3066_hdmi_irq(int irq, void *dev_id) 601 + { 602 + struct rk3066_hdmi *hdmi = dev_id; 603 + 604 + drm_helper_hpd_irq_event(hdmi->connector.dev); 605 + 606 + return IRQ_HANDLED; 607 + } 608 + 609 + static int rk3066_hdmi_i2c_read(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs) 610 + { 611 + int length = msgs->len; 612 + u8 *buf = msgs->buf; 613 + int ret; 614 + 615 + ret = wait_for_completion_timeout(&hdmi->i2c->cmpltn, HZ / 10); 616 + if (!ret || hdmi->i2c->stat & HDMI_INTR_EDID_ERR) 617 + return -EAGAIN; 618 + 619 + while (length--) 620 + *buf++ = hdmi_readb(hdmi, HDMI_DDC_READ_FIFO_ADDR); 621 + 622 + return 0; 623 + } 624 + 625 + static int rk3066_hdmi_i2c_write(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs) 626 + { 627 + /* 628 + * The DDC module only supports read EDID message, so 629 + * we assume that each word write to this i2c adapter 630 + * should be the offset of the EDID word address. 631 + */ 632 + if (msgs->len != 1 || 633 + (msgs->addr != DDC_ADDR && msgs->addr != DDC_SEGMENT_ADDR)) 634 + return -EINVAL; 635 + 636 + reinit_completion(&hdmi->i2c->cmpltn); 637 + 638 + if (msgs->addr == DDC_SEGMENT_ADDR) 639 + hdmi->i2c->segment_addr = msgs->buf[0]; 640 + if (msgs->addr == DDC_ADDR) 641 + hdmi->i2c->ddc_addr = msgs->buf[0]; 642 + 643 + /* Set edid word address 0x00/0x80. */ 644 + hdmi_writeb(hdmi, HDMI_EDID_WORD_ADDR, hdmi->i2c->ddc_addr); 645 + 646 + /* Set edid segment pointer. */ 647 + hdmi_writeb(hdmi, HDMI_EDID_SEGMENT_POINTER, hdmi->i2c->segment_addr); 648 + 649 + return 0; 650 + } 651 + 652 + static int rk3066_hdmi_i2c_xfer(struct i2c_adapter *adap, 653 + struct i2c_msg *msgs, int num) 654 + { 655 + struct rk3066_hdmi *hdmi = i2c_get_adapdata(adap); 656 + struct rk3066_hdmi_i2c *i2c = hdmi->i2c; 657 + int i, ret = 0; 658 + 659 + mutex_lock(&i2c->i2c_lock); 660 + 661 + rk3066_hdmi_i2c_init(hdmi); 662 + 663 + /* Unmute HDMI EDID interrupt. */ 664 + hdmi_modb(hdmi, HDMI_INTR_MASK1, 665 + HDMI_INTR_EDID_MASK, HDMI_INTR_EDID_MASK); 666 + i2c->stat = 0; 667 + 668 + for (i = 0; i < num; i++) { 669 + DRM_DEV_DEBUG(hdmi->dev, 670 + "xfer: num: %d/%d, len: %d, flags: %#x\n", 671 + i + 1, num, msgs[i].len, msgs[i].flags); 672 + 673 + if (msgs[i].flags & I2C_M_RD) 674 + ret = rk3066_hdmi_i2c_read(hdmi, &msgs[i]); 675 + else 676 + ret = rk3066_hdmi_i2c_write(hdmi, &msgs[i]); 677 + 678 + if (ret < 0) 679 + break; 680 + } 681 + 682 + if (!ret) 683 + ret = num; 684 + 685 + /* Mute HDMI EDID interrupt. */ 686 + hdmi_modb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_EDID_MASK, 0); 687 + 688 + mutex_unlock(&i2c->i2c_lock); 689 + 690 + return ret; 691 + } 692 + 693 + static u32 rk3066_hdmi_i2c_func(struct i2c_adapter *adapter) 694 + { 695 + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 696 + } 697 + 698 + static const struct i2c_algorithm rk3066_hdmi_algorithm = { 699 + .master_xfer = rk3066_hdmi_i2c_xfer, 700 + .functionality = rk3066_hdmi_i2c_func, 701 + }; 702 + 703 + static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi) 704 + { 705 + struct i2c_adapter *adap; 706 + struct rk3066_hdmi_i2c *i2c; 707 + int ret; 708 + 709 + i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL); 710 + if (!i2c) 711 + return ERR_PTR(-ENOMEM); 712 + 713 + mutex_init(&i2c->i2c_lock); 714 + init_completion(&i2c->cmpltn); 715 + 716 + adap = &i2c->adap; 717 + adap->class = I2C_CLASS_DDC; 718 + adap->owner = THIS_MODULE; 719 + adap->dev.parent = hdmi->dev; 720 + adap->dev.of_node = hdmi->dev->of_node; 721 + adap->algo = &rk3066_hdmi_algorithm; 722 + strlcpy(adap->name, "RK3066 HDMI", sizeof(adap->name)); 723 + i2c_set_adapdata(adap, hdmi); 724 + 725 + ret = i2c_add_adapter(adap); 726 + if (ret) { 727 + DRM_DEV_ERROR(hdmi->dev, "cannot add %s I2C adapter\n", 728 + adap->name); 729 + devm_kfree(hdmi->dev, i2c); 730 + return ERR_PTR(ret); 731 + } 732 + 733 + hdmi->i2c = i2c; 734 + 735 + DRM_DEV_DEBUG(hdmi->dev, "registered %s I2C bus driver\n", adap->name); 736 + 737 + return adap; 738 + } 739 + 740 + static int rk3066_hdmi_bind(struct device *dev, struct device *master, 741 + void *data) 742 + { 743 + struct platform_device *pdev = to_platform_device(dev); 744 + struct drm_device *drm = data; 745 + struct rk3066_hdmi *hdmi; 746 + struct resource *iores; 747 + int irq; 748 + int ret; 749 + 750 + hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); 751 + if (!hdmi) 752 + return -ENOMEM; 753 + 754 + hdmi->dev = dev; 755 + hdmi->drm_dev = drm; 756 + 757 + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 758 + if (!iores) 759 + return -ENXIO; 760 + 761 + hdmi->regs = devm_ioremap_resource(dev, iores); 762 + if (IS_ERR(hdmi->regs)) 763 + return PTR_ERR(hdmi->regs); 764 + 765 + irq = platform_get_irq(pdev, 0); 766 + if (irq < 0) 767 + return irq; 768 + 769 + hdmi->hclk = devm_clk_get(dev, "hclk"); 770 + if (IS_ERR(hdmi->hclk)) { 771 + DRM_DEV_ERROR(dev, "unable to get HDMI hclk clock\n"); 772 + return PTR_ERR(hdmi->hclk); 773 + } 774 + 775 + ret = clk_prepare_enable(hdmi->hclk); 776 + if (ret) { 777 + DRM_DEV_ERROR(dev, "cannot enable HDMI hclk clock: %d\n", ret); 778 + return ret; 779 + } 780 + 781 + hdmi->grf_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, 782 + "rockchip,grf"); 783 + if (IS_ERR(hdmi->grf_regmap)) { 784 + DRM_DEV_ERROR(dev, "unable to get rockchip,grf\n"); 785 + ret = PTR_ERR(hdmi->grf_regmap); 786 + goto err_disable_hclk; 787 + } 788 + 789 + /* internal hclk = hdmi_hclk / 25 */ 790 + hdmi_writeb(hdmi, HDMI_INTERNAL_CLK_DIVIDER, 25); 791 + 792 + hdmi->ddc = rk3066_hdmi_i2c_adapter(hdmi); 793 + if (IS_ERR(hdmi->ddc)) { 794 + ret = PTR_ERR(hdmi->ddc); 795 + hdmi->ddc = NULL; 796 + goto err_disable_hclk; 797 + } 798 + 799 + rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B); 800 + usleep_range(999, 1000); 801 + hdmi_writeb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_HOTPLUG); 802 + hdmi_writeb(hdmi, HDMI_INTR_MASK2, 0); 803 + hdmi_writeb(hdmi, HDMI_INTR_MASK3, 0); 804 + hdmi_writeb(hdmi, HDMI_INTR_MASK4, 0); 805 + rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_A); 806 + 807 + ret = rk3066_hdmi_register(drm, hdmi); 808 + if (ret) 809 + goto err_disable_i2c; 810 + 811 + dev_set_drvdata(dev, hdmi); 812 + 813 + ret = devm_request_threaded_irq(dev, irq, rk3066_hdmi_hardirq, 814 + rk3066_hdmi_irq, IRQF_SHARED, 815 + dev_name(dev), hdmi); 816 + if (ret) { 817 + DRM_DEV_ERROR(dev, "failed to request hdmi irq: %d\n", ret); 818 + goto err_cleanup_hdmi; 819 + } 820 + 821 + return 0; 822 + 823 + err_cleanup_hdmi: 824 + hdmi->connector.funcs->destroy(&hdmi->connector); 825 + hdmi->encoder.funcs->destroy(&hdmi->encoder); 826 + err_disable_i2c: 827 + i2c_put_adapter(hdmi->ddc); 828 + err_disable_hclk: 829 + clk_disable_unprepare(hdmi->hclk); 830 + 831 + return ret; 832 + } 833 + 834 + static void rk3066_hdmi_unbind(struct device *dev, struct device *master, 835 + void *data) 836 + { 837 + struct rk3066_hdmi *hdmi = dev_get_drvdata(dev); 838 + 839 + hdmi->connector.funcs->destroy(&hdmi->connector); 840 + hdmi->encoder.funcs->destroy(&hdmi->encoder); 841 + 842 + i2c_put_adapter(hdmi->ddc); 843 + clk_disable_unprepare(hdmi->hclk); 844 + } 845 + 846 + static const struct component_ops rk3066_hdmi_ops = { 847 + .bind = rk3066_hdmi_bind, 848 + .unbind = rk3066_hdmi_unbind, 849 + }; 850 + 851 + static int rk3066_hdmi_probe(struct platform_device *pdev) 852 + { 853 + return component_add(&pdev->dev, &rk3066_hdmi_ops); 854 + } 855 + 856 + static int rk3066_hdmi_remove(struct platform_device *pdev) 857 + { 858 + component_del(&pdev->dev, &rk3066_hdmi_ops); 859 + 860 + return 0; 861 + } 862 + 863 + static const struct of_device_id rk3066_hdmi_dt_ids[] = { 864 + { .compatible = "rockchip,rk3066-hdmi" }, 865 + { /* sentinel */ }, 866 + }; 867 + MODULE_DEVICE_TABLE(of, rk3066_hdmi_dt_ids); 868 + 869 + struct platform_driver rk3066_hdmi_driver = { 870 + .probe = rk3066_hdmi_probe, 871 + .remove = rk3066_hdmi_remove, 872 + .driver = { 873 + .name = "rockchip-rk3066-hdmi", 874 + .of_match_table = rk3066_hdmi_dt_ids, 875 + }, 876 + };
+229
drivers/gpu/drm/rockchip/rk3066_hdmi.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 4 + * Zheng Yang <zhengyang@rock-chips.com> 5 + */ 6 + 7 + #ifndef __RK3066_HDMI_H__ 8 + #define __RK3066_HDMI_H__ 9 + 10 + #define GRF_SOC_CON0 0x150 11 + #define HDMI_VIDEO_SEL BIT(14) 12 + 13 + #define DDC_SEGMENT_ADDR 0x30 14 + #define HDMI_SCL_RATE (50 * 1000) 15 + #define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11 16 + 17 + #define N_32K 0x1000 18 + #define N_441K 0x1880 19 + #define N_882K 0x3100 20 + #define N_1764K 0x6200 21 + #define N_48K 0x1800 22 + #define N_96K 0x3000 23 + #define N_192K 0x6000 24 + 25 + #define HDMI_SYS_CTRL 0x000 26 + #define HDMI_LR_SWAP_N3 0x004 27 + #define HDMI_N2 0x008 28 + #define HDMI_N1 0x00c 29 + #define HDMI_SPDIF_FS_CTS_INT3 0x010 30 + #define HDMI_CTS_INT2 0x014 31 + #define HDMI_CTS_INT1 0x018 32 + #define HDMI_CTS_EXT3 0x01c 33 + #define HDMI_CTS_EXT2 0x020 34 + #define HDMI_CTS_EXT1 0x024 35 + #define HDMI_AUDIO_CTRL1 0x028 36 + #define HDMI_AUDIO_CTRL2 0x02c 37 + #define HDMI_I2S_AUDIO_CTRL 0x030 38 + #define HDMI_I2S_SWAP 0x040 39 + #define HDMI_AUDIO_STA_BIT_CTRL1 0x044 40 + #define HDMI_AUDIO_STA_BIT_CTRL2 0x048 41 + #define HDMI_AUDIO_SRC_NUM_AND_LENGTH 0x050 42 + #define HDMI_AV_CTRL1 0x054 43 + #define HDMI_VIDEO_CTRL1 0x058 44 + #define HDMI_DEEP_COLOR_MODE 0x05c 45 + 46 + #define HDMI_EXT_VIDEO_PARA 0x0c0 47 + #define HDMI_EXT_HTOTAL_L 0x0c4 48 + #define HDMI_EXT_HTOTAL_H 0x0c8 49 + #define HDMI_EXT_HBLANK_L 0x0cc 50 + #define HDMI_EXT_HBLANK_H 0x0d0 51 + #define HDMI_EXT_HDELAY_L 0x0d4 52 + #define HDMI_EXT_HDELAY_H 0x0d8 53 + #define HDMI_EXT_HDURATION_L 0x0dc 54 + #define HDMI_EXT_HDURATION_H 0x0e0 55 + #define HDMI_EXT_VTOTAL_L 0x0e4 56 + #define HDMI_EXT_VTOTAL_H 0x0e8 57 + #define HDMI_AV_CTRL2 0x0ec 58 + #define HDMI_EXT_VBLANK_L 0x0f4 59 + #define HDMI_EXT_VBLANK_H 0x10c 60 + #define HDMI_EXT_VDELAY 0x0f8 61 + #define HDMI_EXT_VDURATION 0x0fc 62 + 63 + #define HDMI_CP_MANU_SEND_CTRL 0x100 64 + #define HDMI_CP_AUTO_SEND_CTRL 0x104 65 + #define HDMI_AUTO_CHECKSUM_OPT 0x108 66 + 67 + #define HDMI_VIDEO_CTRL2 0x114 68 + 69 + #define HDMI_PHY_OPTION 0x144 70 + 71 + #define HDMI_CP_BUF_INDEX 0x17c 72 + #define HDMI_CP_BUF_ACC_HB0 0x180 73 + #define HDMI_CP_BUF_ACC_HB1 0x184 74 + #define HDMI_CP_BUF_ACC_HB2 0x188 75 + #define HDMI_CP_BUF_ACC_PB0 0x18c 76 + 77 + #define HDMI_DDC_READ_FIFO_ADDR 0x200 78 + #define HDMI_DDC_BUS_FREQ_L 0x204 79 + #define HDMI_DDC_BUS_FREQ_H 0x208 80 + #define HDMI_DDC_BUS_CTRL 0x2dc 81 + #define HDMI_DDC_I2C_LEN 0x278 82 + #define HDMI_DDC_I2C_OFFSET 0x280 83 + #define HDMI_DDC_I2C_CTRL 0x284 84 + #define HDMI_DDC_I2C_READ_BUF0 0x288 85 + #define HDMI_DDC_I2C_READ_BUF1 0x28c 86 + #define HDMI_DDC_I2C_READ_BUF2 0x290 87 + #define HDMI_DDC_I2C_READ_BUF3 0x294 88 + #define HDMI_DDC_I2C_WRITE_BUF0 0x298 89 + #define HDMI_DDC_I2C_WRITE_BUF1 0x29c 90 + #define HDMI_DDC_I2C_WRITE_BUF2 0x2a0 91 + #define HDMI_DDC_I2C_WRITE_BUF3 0x2a4 92 + #define HDMI_DDC_I2C_WRITE_BUF4 0x2ac 93 + #define HDMI_DDC_I2C_WRITE_BUF5 0x2b0 94 + #define HDMI_DDC_I2C_WRITE_BUF6 0x2b4 95 + 96 + #define HDMI_INTR_MASK1 0x248 97 + #define HDMI_INTR_MASK2 0x24c 98 + #define HDMI_INTR_STATUS1 0x250 99 + #define HDMI_INTR_STATUS2 0x254 100 + #define HDMI_INTR_MASK3 0x258 101 + #define HDMI_INTR_MASK4 0x25c 102 + #define HDMI_INTR_STATUS3 0x260 103 + #define HDMI_INTR_STATUS4 0x264 104 + 105 + #define HDMI_HDCP_CTRL 0x2bc 106 + 107 + #define HDMI_EDID_SEGMENT_POINTER 0x310 108 + #define HDMI_EDID_WORD_ADDR 0x314 109 + #define HDMI_EDID_FIFO_ADDR 0x318 110 + 111 + #define HDMI_HPG_MENS_STA 0x37c 112 + 113 + #define HDMI_INTERNAL_CLK_DIVIDER 0x800 114 + 115 + enum { 116 + /* HDMI_SYS_CTRL */ 117 + HDMI_SYS_POWER_MODE_MASK = 0xf0, 118 + HDMI_SYS_POWER_MODE_A = 0x10, 119 + HDMI_SYS_POWER_MODE_B = 0x20, 120 + HDMI_SYS_POWER_MODE_D = 0x40, 121 + HDMI_SYS_POWER_MODE_E = 0x80, 122 + HDMI_SYS_PLL_RESET_MASK = 0x0c, 123 + HDMI_SYS_PLL_RESET = 0x0c, 124 + HDMI_SYS_PLLB_RESET = 0x08, 125 + 126 + /* HDMI_LR_SWAP_N3 */ 127 + HDMI_AUDIO_LR_SWAP_MASK = 0xf0, 128 + HDMI_AUDIO_LR_SWAP_SUBPACKET0 = 0x10, 129 + HDMI_AUDIO_LR_SWAP_SUBPACKET1 = 0x20, 130 + HDMI_AUDIO_LR_SWAP_SUBPACKET2 = 0x40, 131 + HDMI_AUDIO_LR_SWAP_SUBPACKET3 = 0x80, 132 + HDMI_AUDIO_N_19_16_MASK = 0x0f, 133 + 134 + /* HDMI_AUDIO_CTRL1 */ 135 + HDMI_AUDIO_EXTERNAL_CTS = BIT(7), 136 + HDMI_AUDIO_INPUT_IIS = 0, 137 + HDMI_AUDIO_INPUT_SPDIF = 0x08, 138 + HDMI_AUDIO_INPUT_MCLK_ACTIVE = 0x04, 139 + HDMI_AUDIO_INPUT_MCLK_DEACTIVE = 0, 140 + HDMI_AUDIO_INPUT_MCLK_RATE_128X = 0, 141 + HDMI_AUDIO_INPUT_MCLK_RATE_256X = 1, 142 + HDMI_AUDIO_INPUT_MCLK_RATE_384X = 2, 143 + HDMI_AUDIO_INPUT_MCLK_RATE_512X = 3, 144 + 145 + /* HDMI_I2S_AUDIO_CTRL */ 146 + HDMI_AUDIO_I2S_FORMAT_STANDARD = 0, 147 + HDMI_AUDIO_I2S_CHANNEL_1_2 = 0x04, 148 + HDMI_AUDIO_I2S_CHANNEL_3_4 = 0x0c, 149 + HDMI_AUDIO_I2S_CHANNEL_5_6 = 0x1c, 150 + HDMI_AUDIO_I2S_CHANNEL_7_8 = 0x3c, 151 + 152 + /* HDMI_AV_CTRL1 */ 153 + HDMI_AUDIO_SAMPLE_FRE_MASK = 0xf0, 154 + HDMI_AUDIO_SAMPLE_FRE_32000 = 0x30, 155 + HDMI_AUDIO_SAMPLE_FRE_44100 = 0, 156 + HDMI_AUDIO_SAMPLE_FRE_48000 = 0x20, 157 + HDMI_AUDIO_SAMPLE_FRE_88200 = 0x80, 158 + HDMI_AUDIO_SAMPLE_FRE_96000 = 0xa0, 159 + HDMI_AUDIO_SAMPLE_FRE_176400 = 0xc0, 160 + HDMI_AUDIO_SAMPLE_FRE_192000 = 0xe0, 161 + HDMI_AUDIO_SAMPLE_FRE_768000 = 0x90, 162 + 163 + HDMI_VIDEO_INPUT_FORMAT_MASK = 0x0e, 164 + HDMI_VIDEO_INPUT_RGB_YCBCR444 = 0, 165 + HDMI_VIDEO_INPUT_YCBCR422 = 0x02, 166 + HDMI_VIDEO_DE_MASK = 0x1, 167 + HDMI_VIDEO_INTERNAL_DE = 0, 168 + HDMI_VIDEO_EXTERNAL_DE = 0x01, 169 + 170 + /* HDMI_VIDEO_CTRL1 */ 171 + HDMI_VIDEO_OUTPUT_FORMAT_MASK = 0xc0, 172 + HDMI_VIDEO_OUTPUT_RGB444 = 0, 173 + HDMI_VIDEO_OUTPUT_YCBCR444 = 0x40, 174 + HDMI_VIDEO_OUTPUT_YCBCR422 = 0x80, 175 + HDMI_VIDEO_INPUT_DATA_DEPTH_MASK = 0x30, 176 + HDMI_VIDEO_INPUT_DATA_DEPTH_12BIT = 0, 177 + HDMI_VIDEO_INPUT_DATA_DEPTH_10BIT = 0x10, 178 + HDMI_VIDEO_INPUT_DATA_DEPTH_8BIT = 0x30, 179 + HDMI_VIDEO_INPUT_COLOR_MASK = 1, 180 + HDMI_VIDEO_INPUT_COLOR_RGB = 0, 181 + HDMI_VIDEO_INPUT_COLOR_YCBCR = 1, 182 + 183 + /* HDMI_EXT_VIDEO_PARA */ 184 + HDMI_VIDEO_VSYNC_OFFSET_SHIFT = 4, 185 + HDMI_VIDEO_VSYNC_ACTIVE_HIGH = BIT(3), 186 + HDMI_VIDEO_VSYNC_ACTIVE_LOW = 0, 187 + HDMI_VIDEO_HSYNC_ACTIVE_HIGH = BIT(2), 188 + HDMI_VIDEO_HSYNC_ACTIVE_LOW = 0, 189 + HDMI_VIDEO_MODE_INTERLACE = BIT(1), 190 + HDMI_VIDEO_MODE_PROGRESSIVE = 0, 191 + HDMI_EXT_VIDEO_SET_EN = BIT(0), 192 + 193 + /* HDMI_CP_AUTO_SEND_CTRL */ 194 + 195 + /* HDMI_VIDEO_CTRL2 */ 196 + HDMI_VIDEO_AV_MUTE_MASK = 0xc0, 197 + HDMI_VIDEO_CLR_AV_MUTE = BIT(7), 198 + HDMI_VIDEO_SET_AV_MUTE = BIT(6), 199 + HDMI_AUDIO_CP_LOGIC_RESET_MASK = BIT(2), 200 + HDMI_AUDIO_CP_LOGIC_RESET = BIT(2), 201 + HDMI_VIDEO_AUDIO_DISABLE_MASK = 0x3, 202 + HDMI_AUDIO_DISABLE = BIT(1), 203 + HDMI_VIDEO_DISABLE = BIT(0), 204 + 205 + /* HDMI_CP_BUF_INDEX */ 206 + HDMI_INFOFRAME_VSI = 0x05, 207 + HDMI_INFOFRAME_AVI = 0x06, 208 + HDMI_INFOFRAME_AAI = 0x08, 209 + 210 + /* HDMI_INTR_MASK1 */ 211 + /* HDMI_INTR_STATUS1 */ 212 + HDMI_INTR_HOTPLUG = BIT(7), 213 + HDMI_INTR_MSENS = BIT(6), 214 + HDMI_INTR_VSYNC = BIT(5), 215 + HDMI_INTR_AUDIO_FIFO_FULL = BIT(4), 216 + HDMI_INTR_EDID_MASK = 0x6, 217 + HDMI_INTR_EDID_READY = BIT(2), 218 + HDMI_INTR_EDID_ERR = BIT(1), 219 + 220 + /* HDMI_HDCP_CTRL */ 221 + HDMI_VIDEO_MODE_MASK = BIT(1), 222 + HDMI_VIDEO_MODE_HDMI = BIT(1), 223 + 224 + /* HDMI_HPG_MENS_STA */ 225 + HDMI_HPG_IN_STATUS_HIGH = BIT(7), 226 + HDMI_MSENS_IN_STATUS_HIGH = BIT(6), 227 + }; 228 + 229 + #endif /* __RK3066_HDMI_H__ */
+2
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
··· 486 486 ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver, 487 487 CONFIG_ROCKCHIP_DW_MIPI_DSI); 488 488 ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI); 489 + ADD_ROCKCHIP_SUB_DRIVER(rk3066_hdmi_driver, 490 + CONFIG_ROCKCHIP_RK3066_HDMI); 489 491 490 492 ret = platform_register_drivers(rockchip_sub_drivers, 491 493 num_rockchip_sub_drivers);
+1
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
··· 73 73 extern struct platform_driver rockchip_dp_driver; 74 74 extern struct platform_driver rockchip_lvds_driver; 75 75 extern struct platform_driver vop_platform_driver; 76 + extern struct platform_driver rk3066_hdmi_driver; 76 77 #endif /* _ROCKCHIP_DRM_DRV_H_ */
+10 -1
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
··· 1029 1029 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start; 1030 1030 u16 vact_end = vact_st + vdisplay; 1031 1031 uint32_t pin_pol, val; 1032 + int dither_bpc = s->output_bpc ? s->output_bpc : 10; 1032 1033 int ret; 1033 1034 1034 1035 mutex_lock(&vop->vop_lock); ··· 1087 1086 !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10)) 1088 1087 s->output_mode = ROCKCHIP_OUT_MODE_P888; 1089 1088 1090 - if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && s->output_bpc == 8) 1089 + if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && dither_bpc <= 8) 1091 1090 VOP_REG_SET(vop, common, pre_dither_down, 1); 1092 1091 else 1093 1092 VOP_REG_SET(vop, common, pre_dither_down, 0); 1093 + 1094 + if (dither_bpc == 6) { 1095 + VOP_REG_SET(vop, common, dither_down_sel, DITHER_DOWN_ALLEGRO); 1096 + VOP_REG_SET(vop, common, dither_down_mode, RGB888_TO_RGB666); 1097 + VOP_REG_SET(vop, common, dither_down_en, 1); 1098 + } else { 1099 + VOP_REG_SET(vop, common, dither_down_en, 0); 1100 + } 1094 1101 1095 1102 VOP_REG_SET(vop, common, out_mode, s->output_mode); 1096 1103
+13 -1
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
··· 71 71 struct vop_reg dsp_blank; 72 72 struct vop_reg data_blank; 73 73 struct vop_reg pre_dither_down; 74 - struct vop_reg dither_down; 74 + struct vop_reg dither_down_sel; 75 + struct vop_reg dither_down_mode; 76 + struct vop_reg dither_down_en; 75 77 struct vop_reg dither_up; 76 78 struct vop_reg gate_en; 77 79 struct vop_reg mmu_en; ··· 287 285 enum scale_down_mode { 288 286 SCALE_DOWN_BIL = 0x0, 289 287 SCALE_DOWN_AVG = 0x1 288 + }; 289 + 290 + enum dither_down_mode { 291 + RGB888_TO_RGB565 = 0x0, 292 + RGB888_TO_RGB666 = 0x1 293 + }; 294 + 295 + enum dither_down_mode_sel { 296 + DITHER_DOWN_ALLEGRO = 0x0, 297 + DITHER_DOWN_FRC = 0x1 290 298 }; 291 299 292 300 enum vop_pol {
+18 -2
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
··· 137 137 .standby = VOP_REG_SYNC(RK3036_SYS_CTRL, 0x1, 30), 138 138 .out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0), 139 139 .dsp_blank = VOP_REG(RK3036_DSP_CTRL1, 0x1, 24), 140 + .dither_down_sel = VOP_REG(RK3036_DSP_CTRL0, 0x1, 27), 141 + .dither_down_en = VOP_REG(RK3036_DSP_CTRL0, 0x1, 11), 142 + .dither_down_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 10), 140 143 .cfg_done = VOP_REG_SYNC(RK3036_REG_CFG_DONE, 0x1, 0), 141 144 }; 142 145 ··· 203 200 .standby = VOP_REG_SYNC(PX30_SYS_CTRL2, 0x1, 1), 204 201 .out_mode = VOP_REG(PX30_DSP_CTRL2, 0xf, 16), 205 202 .dsp_blank = VOP_REG(PX30_DSP_CTRL2, 0x1, 14), 203 + .dither_down_en = VOP_REG(PX30_DSP_CTRL2, 0x1, 8), 204 + .dither_down_sel = VOP_REG(PX30_DSP_CTRL2, 0x1, 7), 205 + .dither_down_mode = VOP_REG(PX30_DSP_CTRL2, 0x1, 6), 206 206 .cfg_done = VOP_REG_SYNC(PX30_REG_CFG_DONE, 0x1, 0), 207 207 }; 208 208 ··· 371 365 .standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1), 372 366 .out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0), 373 367 .cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0), 368 + .dither_down_en = VOP_REG(RK3066_DSP_CTRL0, 0x1, 11), 369 + .dither_down_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 10), 374 370 .dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24), 375 371 }; 376 372 ··· 466 458 .standby = VOP_REG(RK3188_SYS_CTRL, 0x1, 30), 467 459 .out_mode = VOP_REG(RK3188_DSP_CTRL0, 0xf, 0), 468 460 .cfg_done = VOP_REG(RK3188_REG_CFG_DONE, 0x1, 0), 461 + .dither_down_sel = VOP_REG(RK3188_DSP_CTRL0, 0x1, 27), 462 + .dither_down_en = VOP_REG(RK3188_DSP_CTRL0, 0x1, 11), 463 + .dither_down_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 10), 469 464 .dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x3, 24), 470 465 }; 471 466 ··· 596 585 .standby = VOP_REG_SYNC(RK3288_SYS_CTRL, 0x1, 22), 597 586 .gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23), 598 587 .mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20), 588 + .dither_down_sel = VOP_REG(RK3288_DSP_CTRL1, 0x1, 4), 589 + .dither_down_mode = VOP_REG(RK3288_DSP_CTRL1, 0x1, 3), 590 + .dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2), 599 591 .pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1), 600 - .dither_down = VOP_REG(RK3288_DSP_CTRL1, 0xf, 1), 601 592 .dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6), 602 593 .data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19), 603 594 .dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18), ··· 891 878 892 879 static const struct vop_common rk3328_common = { 893 880 .standby = VOP_REG_SYNC(RK3328_SYS_CTRL, 0x1, 22), 894 - .dither_down = VOP_REG(RK3328_DSP_CTRL1, 0xf, 1), 881 + .dither_down_sel = VOP_REG(RK3328_DSP_CTRL1, 0x1, 4), 882 + .dither_down_mode = VOP_REG(RK3328_DSP_CTRL1, 0x1, 3), 883 + .dither_down_en = VOP_REG(RK3328_DSP_CTRL1, 0x1, 2), 884 + .pre_dither_down = VOP_REG(RK3328_DSP_CTRL1, 0x1, 1), 895 885 .dither_up = VOP_REG(RK3328_DSP_CTRL1, 0x1, 6), 896 886 .dsp_blank = VOP_REG(RK3328_DSP_CTRL0, 0x3, 18), 897 887 .out_mode = VOP_REG(RK3328_DSP_CTRL0, 0xf, 0),
+35
drivers/gpu/drm/stm/drv.c
··· 129 129 drm_mode_config_cleanup(ddev); 130 130 } 131 131 132 + static __maybe_unused int drv_suspend(struct device *dev) 133 + { 134 + struct drm_device *ddev = dev_get_drvdata(dev); 135 + struct ltdc_device *ldev = ddev->dev_private; 136 + struct drm_atomic_state *state; 137 + 138 + drm_kms_helper_poll_disable(ddev); 139 + state = drm_atomic_helper_suspend(ddev); 140 + if (IS_ERR(state)) { 141 + drm_kms_helper_poll_enable(ddev); 142 + return PTR_ERR(state); 143 + } 144 + ldev->suspend_state = state; 145 + ltdc_suspend(ddev); 146 + 147 + return 0; 148 + } 149 + 150 + static __maybe_unused int drv_resume(struct device *dev) 151 + { 152 + struct drm_device *ddev = dev_get_drvdata(dev); 153 + struct ltdc_device *ldev = ddev->dev_private; 154 + 155 + ltdc_resume(ddev); 156 + drm_atomic_helper_resume(ddev, ldev->suspend_state); 157 + drm_kms_helper_poll_enable(ddev); 158 + 159 + return 0; 160 + } 161 + 162 + static const struct dev_pm_ops drv_pm_ops = { 163 + SET_SYSTEM_SLEEP_PM_OPS(drv_suspend, drv_resume) 164 + }; 165 + 132 166 static int stm_drm_platform_probe(struct platform_device *pdev) 133 167 { 134 168 struct device *dev = &pdev->dev; ··· 220 186 .driver = { 221 187 .name = "stm32-display", 222 188 .of_match_table = drv_dt_ids, 189 + .pm = &drv_pm_ops, 223 190 }, 224 191 }; 225 192
+28
drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
··· 356 356 return 0; 357 357 } 358 358 359 + static int __maybe_unused dw_mipi_dsi_stm_suspend(struct device *dev) 360 + { 361 + struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data; 362 + 363 + DRM_DEBUG_DRIVER("\n"); 364 + 365 + clk_disable_unprepare(dsi->pllref_clk); 366 + 367 + return 0; 368 + } 369 + 370 + static int __maybe_unused dw_mipi_dsi_stm_resume(struct device *dev) 371 + { 372 + struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data; 373 + 374 + DRM_DEBUG_DRIVER("\n"); 375 + 376 + clk_prepare_enable(dsi->pllref_clk); 377 + 378 + return 0; 379 + } 380 + 381 + static const struct dev_pm_ops dw_mipi_dsi_stm_pm_ops = { 382 + SET_SYSTEM_SLEEP_PM_OPS(dw_mipi_dsi_stm_suspend, 383 + dw_mipi_dsi_stm_resume) 384 + }; 385 + 359 386 static struct platform_driver dw_mipi_dsi_stm_driver = { 360 387 .probe = dw_mipi_dsi_stm_probe, 361 388 .remove = dw_mipi_dsi_stm_remove, 362 389 .driver = { 363 390 .of_match_table = dw_mipi_dsi_stm_dt_ids, 364 391 .name = "stm32-display-dsi", 392 + .pm = &dw_mipi_dsi_stm_pm_ops, 365 393 }, 366 394 }; 367 395
+24
drivers/gpu/drm/stm/ltdc.c
··· 1062 1062 return 0; 1063 1063 } 1064 1064 1065 + void ltdc_suspend(struct drm_device *ddev) 1066 + { 1067 + struct ltdc_device *ldev = ddev->dev_private; 1068 + 1069 + DRM_DEBUG_DRIVER("\n"); 1070 + clk_disable_unprepare(ldev->pixel_clk); 1071 + } 1072 + 1073 + int ltdc_resume(struct drm_device *ddev) 1074 + { 1075 + struct ltdc_device *ldev = ddev->dev_private; 1076 + int ret; 1077 + 1078 + DRM_DEBUG_DRIVER("\n"); 1079 + 1080 + ret = clk_prepare_enable(ldev->pixel_clk); 1081 + if (ret) { 1082 + DRM_ERROR("failed to enable pixel clock (%d)\n", ret); 1083 + return ret; 1084 + } 1085 + 1086 + return 0; 1087 + } 1088 + 1065 1089 int ltdc_load(struct drm_device *ddev) 1066 1090 { 1067 1091 struct platform_device *pdev = to_platform_device(ddev->dev);
+3
drivers/gpu/drm/stm/ltdc.h
··· 36 36 u32 error_status; 37 37 u32 irq_status; 38 38 struct fps_info plane_fpsi[LTDC_MAX_LAYER]; 39 + struct drm_atomic_state *suspend_state; 39 40 }; 40 41 41 42 bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, ··· 46 45 47 46 int ltdc_load(struct drm_device *ddev); 48 47 void ltdc_unload(struct drm_device *ddev); 48 + void ltdc_suspend(struct drm_device *ddev); 49 + int ltdc_resume(struct drm_device *ddev); 49 50 50 51 #endif
+1
drivers/gpu/drm/sun4i/sun4i_hdmi.h
··· 269 269 struct clk *tmds_clk; 270 270 271 271 struct i2c_adapter *i2c; 272 + struct i2c_adapter *ddc_i2c; 272 273 273 274 /* Regmap fields for I2C adapter */ 274 275 struct regmap_field *field_ddc_en;
+37 -3
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
··· 217 217 struct edid *edid; 218 218 int ret; 219 219 220 - edid = drm_get_edid(connector, hdmi->i2c); 220 + edid = drm_get_edid(connector, hdmi->ddc_i2c ?: hdmi->i2c); 221 221 if (!edid) 222 222 return 0; 223 223 ··· 231 231 kfree(edid); 232 232 233 233 return ret; 234 + } 235 + 236 + static struct i2c_adapter *sun4i_hdmi_get_ddc(struct device *dev) 237 + { 238 + struct device_node *phandle, *remote; 239 + struct i2c_adapter *ddc; 240 + 241 + remote = of_graph_get_remote_node(dev->of_node, 1, -1); 242 + if (!remote) 243 + return ERR_PTR(-EINVAL); 244 + 245 + phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0); 246 + of_node_put(remote); 247 + if (!phandle) 248 + return ERR_PTR(-ENODEV); 249 + 250 + ddc = of_get_i2c_adapter_by_node(phandle); 251 + of_node_put(phandle); 252 + if (!ddc) 253 + return ERR_PTR(-EPROBE_DEFER); 254 + 255 + return ddc; 234 256 } 235 257 236 258 static const struct drm_connector_helper_funcs sun4i_hdmi_connector_helper_funcs = { ··· 602 580 goto err_disable_mod_clk; 603 581 } 604 582 583 + hdmi->ddc_i2c = sun4i_hdmi_get_ddc(dev); 584 + if (IS_ERR(hdmi->ddc_i2c)) { 585 + ret = PTR_ERR(hdmi->ddc_i2c); 586 + if (ret == -ENODEV) 587 + hdmi->ddc_i2c = NULL; 588 + else 589 + goto err_del_i2c_adapter; 590 + } 591 + 605 592 drm_encoder_helper_add(&hdmi->encoder, 606 593 &sun4i_hdmi_helper_funcs); 607 594 ret = drm_encoder_init(drm, ··· 620 589 NULL); 621 590 if (ret) { 622 591 dev_err(dev, "Couldn't initialise the HDMI encoder\n"); 623 - goto err_del_i2c_adapter; 592 + goto err_put_ddc_i2c; 624 593 } 625 594 626 595 hdmi->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm, 627 596 dev->of_node); 628 597 if (!hdmi->encoder.possible_crtcs) { 629 598 ret = -EPROBE_DEFER; 630 - goto err_del_i2c_adapter; 599 + goto err_put_ddc_i2c; 631 600 } 632 601 633 602 #ifdef CONFIG_DRM_SUN4I_HDMI_CEC ··· 666 635 err_cleanup_connector: 667 636 cec_delete_adapter(hdmi->cec_adap); 668 637 drm_encoder_cleanup(&hdmi->encoder); 638 + err_put_ddc_i2c: 639 + i2c_put_adapter(hdmi->ddc_i2c); 669 640 err_del_i2c_adapter: 670 641 i2c_del_adapter(hdmi->i2c); 671 642 err_disable_mod_clk: ··· 688 655 drm_connector_cleanup(&hdmi->connector); 689 656 drm_encoder_cleanup(&hdmi->encoder); 690 657 i2c_del_adapter(hdmi->i2c); 658 + i2c_put_adapter(hdmi->ddc_i2c); 691 659 clk_disable_unprepare(hdmi->mod_clk); 692 660 clk_disable_unprepare(hdmi->bus_clk); 693 661 }
+2 -2
drivers/gpu/drm/v3d/v3d_bo.c
··· 130 130 int ret; 131 131 132 132 shmem_obj = drm_gem_shmem_create(dev, unaligned_size); 133 - if (!shmem_obj) 134 - return NULL; 133 + if (IS_ERR(shmem_obj)) 134 + return ERR_CAST(shmem_obj); 135 135 bo = to_v3d_bo(&shmem_obj->base); 136 136 137 137 ret = v3d_bo_create_finish(&shmem_obj->base);
+2
drivers/gpu/drm/v3d/v3d_drv.c
··· 102 102 return -EINVAL; 103 103 104 104 ret = pm_runtime_get_sync(v3d->dev); 105 + if (ret < 0) 106 + return ret; 105 107 if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 && 106 108 args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) { 107 109 args->value = V3D_CORE_READ(0, offset);
+2 -2
drivers/gpu/drm/v3d/v3d_drv.h
··· 163 163 struct dma_fence *in_fence; 164 164 165 165 /* v3d fence to be signaled by IRQ handler when the job is complete. */ 166 - struct dma_fence *done_fence; 166 + struct dma_fence *irq_fence; 167 167 168 168 /* GPU virtual addresses of the start/end of the CL job. */ 169 169 u32 start, end; ··· 210 210 struct dma_fence *in_fence; 211 211 212 212 /* v3d fence to be signaled by IRQ handler when the job is complete. */ 213 - struct dma_fence *done_fence; 213 + struct dma_fence *irq_fence; 214 214 215 215 struct v3d_dev *v3d; 216 216
+3 -3
drivers/gpu/drm/v3d/v3d_gem.c
··· 340 340 dma_fence_put(exec->bin.in_fence); 341 341 dma_fence_put(exec->render.in_fence); 342 342 343 - dma_fence_put(exec->bin.done_fence); 344 - dma_fence_put(exec->render.done_fence); 343 + dma_fence_put(exec->bin.irq_fence); 344 + dma_fence_put(exec->render.irq_fence); 345 345 346 346 dma_fence_put(exec->bin_done_fence); 347 347 dma_fence_put(exec->render_done_fence); ··· 374 374 unsigned int i; 375 375 376 376 dma_fence_put(job->in_fence); 377 - dma_fence_put(job->done_fence); 377 + dma_fence_put(job->irq_fence); 378 378 379 379 for (i = 0; i < ARRAY_SIZE(job->bo); i++) { 380 380 if (job->bo[i])
+5 -4
drivers/gpu/drm/v3d/v3d_irq.c
··· 87 87 if (intsts & V3D_INT_OUTOMEM) { 88 88 /* Note that the OOM status is edge signaled, so the 89 89 * interrupt won't happen again until the we actually 90 - * add more memory. 90 + * add more memory. Also, as of V3D 4.1, FLDONE won't 91 + * be reported until any OOM state has been cleared. 91 92 */ 92 93 schedule_work(&v3d->overflow_mem_work); 93 94 status = IRQ_HANDLED; ··· 96 95 97 96 if (intsts & V3D_INT_FLDONE) { 98 97 struct v3d_fence *fence = 99 - to_v3d_fence(v3d->bin_job->bin.done_fence); 98 + to_v3d_fence(v3d->bin_job->bin.irq_fence); 100 99 101 100 trace_v3d_bcl_irq(&v3d->drm, fence->seqno); 102 101 dma_fence_signal(&fence->base); ··· 105 104 106 105 if (intsts & V3D_INT_FRDONE) { 107 106 struct v3d_fence *fence = 108 - to_v3d_fence(v3d->render_job->render.done_fence); 107 + to_v3d_fence(v3d->render_job->render.irq_fence); 109 108 110 109 trace_v3d_rcl_irq(&v3d->drm, fence->seqno); 111 110 dma_fence_signal(&fence->base); ··· 141 140 142 141 if (intsts & V3D_HUB_INT_TFUC) { 143 142 struct v3d_fence *fence = 144 - to_v3d_fence(v3d->tfu_job->done_fence); 143 + to_v3d_fence(v3d->tfu_job->irq_fence); 145 144 146 145 trace_v3d_tfu_irq(&v3d->drm, fence->seqno); 147 146 dma_fence_signal(&fence->base);
+6 -6
drivers/gpu/drm/v3d/v3d_sched.c
··· 156 156 if (IS_ERR(fence)) 157 157 return NULL; 158 158 159 - if (job->done_fence) 160 - dma_fence_put(job->done_fence); 161 - job->done_fence = dma_fence_get(fence); 159 + if (job->irq_fence) 160 + dma_fence_put(job->irq_fence); 161 + job->irq_fence = dma_fence_get(fence); 162 162 163 163 trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno, 164 164 job->start, job->end); ··· 199 199 return NULL; 200 200 201 201 v3d->tfu_job = job; 202 - if (job->done_fence) 203 - dma_fence_put(job->done_fence); 204 - job->done_fence = dma_fence_get(fence); 202 + if (job->irq_fence) 203 + dma_fence_put(job->irq_fence); 204 + job->irq_fence = dma_fence_get(fence); 205 205 206 206 trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno); 207 207
+18 -38
drivers/gpu/drm/vc4/vc4_bo.c
··· 40 40 return label >= VC4_BO_TYPE_COUNT; 41 41 } 42 42 43 - static void vc4_bo_stats_dump(struct vc4_dev *vc4) 43 + static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4) 44 44 { 45 45 int i; 46 46 ··· 48 48 if (!vc4->bo_labels[i].num_allocated) 49 49 continue; 50 50 51 - DRM_INFO("%30s: %6dkb BOs (%d)\n", 52 - vc4->bo_labels[i].name, 53 - vc4->bo_labels[i].size_allocated / 1024, 54 - vc4->bo_labels[i].num_allocated); 55 - } 56 - 57 - mutex_lock(&vc4->purgeable.lock); 58 - if (vc4->purgeable.num) 59 - DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache", 60 - vc4->purgeable.size / 1024, vc4->purgeable.num); 61 - 62 - if (vc4->purgeable.purged_num) 63 - DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO", 64 - vc4->purgeable.purged_size / 1024, 65 - vc4->purgeable.purged_num); 66 - mutex_unlock(&vc4->purgeable.lock); 67 - } 68 - 69 - #ifdef CONFIG_DEBUG_FS 70 - int vc4_bo_stats_debugfs(struct seq_file *m, void *unused) 71 - { 72 - struct drm_info_node *node = (struct drm_info_node *)m->private; 73 - struct drm_device *dev = node->minor->dev; 74 - struct vc4_dev *vc4 = to_vc4_dev(dev); 75 - int i; 76 - 77 - mutex_lock(&vc4->bo_lock); 78 - for (i = 0; i < vc4->num_labels; i++) { 79 - if (!vc4->bo_labels[i].num_allocated) 80 - continue; 81 - 82 - seq_printf(m, "%30s: %6dkb BOs (%d)\n", 51 + drm_printf(p, "%30s: %6dkb BOs (%d)\n", 83 52 vc4->bo_labels[i].name, 84 53 vc4->bo_labels[i].size_allocated / 1024, 85 54 vc4->bo_labels[i].num_allocated); 86 55 } 87 - mutex_unlock(&vc4->bo_lock); 88 56 89 57 mutex_lock(&vc4->purgeable.lock); 90 58 if (vc4->purgeable.num) 91 - seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache", 59 + drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache", 92 60 vc4->purgeable.size / 1024, vc4->purgeable.num); 93 61 94 62 if (vc4->purgeable.purged_num) 95 - seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "total purged BO", 63 + drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO", 96 64 vc4->purgeable.purged_size / 1024, 97 65 vc4->purgeable.purged_num); 98 66 mutex_unlock(&vc4->purgeable.lock); 67 + } 68 + 69 + static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused) 70 + { 71 + struct drm_info_node *node = (struct drm_info_node *)m->private; 72 + struct drm_device *dev = node->minor->dev; 73 + struct vc4_dev *vc4 = to_vc4_dev(dev); 74 + struct drm_printer p = drm_seq_file_printer(m); 75 + 76 + vc4_bo_stats_print(&p, vc4); 99 77 100 78 return 0; 101 79 } 102 - #endif 103 80 104 81 /* Takes ownership of *name and returns the appropriate slot for it in 105 82 * the bo_labels[] array, extending it as necessary. ··· 452 475 } 453 476 454 477 if (IS_ERR(cma_obj)) { 478 + struct drm_printer p = drm_info_printer(vc4->dev->dev); 455 479 DRM_ERROR("Failed to allocate from CMA:\n"); 456 - vc4_bo_stats_dump(vc4); 480 + vc4_bo_stats_print(&p, vc4); 457 481 return ERR_PTR(-ENOMEM); 458 482 } 459 483 bo = to_vc4_bo(&cma_obj->base); ··· 1002 1024 vc4->bo_labels[i].name = bo_type_names[i]; 1003 1025 1004 1026 mutex_init(&vc4->bo_lock); 1027 + 1028 + vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL); 1005 1029 1006 1030 INIT_LIST_HEAD(&vc4->bo_cache.time_list); 1007 1031
+34 -63
drivers/gpu/drm/vc4/vc4_crtc.c
··· 35 35 #include <drm/drm_atomic.h> 36 36 #include <drm/drm_atomic_helper.h> 37 37 #include <drm/drm_atomic_uapi.h> 38 + #include <drm/drm_print.h> 38 39 #include <drm/drm_probe_helper.h> 39 40 #include <linux/clk.h> 40 41 #include <drm/drm_fb_cma_helper.h> ··· 68 67 #define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset)) 69 68 #define CRTC_READ(offset) readl(vc4_crtc->regs + (offset)) 70 69 71 - #define CRTC_REG(reg) { reg, #reg } 72 - static const struct { 73 - u32 reg; 74 - const char *name; 75 - } crtc_regs[] = { 76 - CRTC_REG(PV_CONTROL), 77 - CRTC_REG(PV_V_CONTROL), 78 - CRTC_REG(PV_VSYNCD_EVEN), 79 - CRTC_REG(PV_HORZA), 80 - CRTC_REG(PV_HORZB), 81 - CRTC_REG(PV_VERTA), 82 - CRTC_REG(PV_VERTB), 83 - CRTC_REG(PV_VERTA_EVEN), 84 - CRTC_REG(PV_VERTB_EVEN), 85 - CRTC_REG(PV_INTEN), 86 - CRTC_REG(PV_INTSTAT), 87 - CRTC_REG(PV_STAT), 88 - CRTC_REG(PV_HACT_ACT), 70 + static const struct debugfs_reg32 crtc_regs[] = { 71 + VC4_REG32(PV_CONTROL), 72 + VC4_REG32(PV_V_CONTROL), 73 + VC4_REG32(PV_VSYNCD_EVEN), 74 + VC4_REG32(PV_HORZA), 75 + VC4_REG32(PV_HORZB), 76 + VC4_REG32(PV_VERTA), 77 + VC4_REG32(PV_VERTB), 78 + VC4_REG32(PV_VERTA_EVEN), 79 + VC4_REG32(PV_VERTB_EVEN), 80 + VC4_REG32(PV_INTEN), 81 + VC4_REG32(PV_INTSTAT), 82 + VC4_REG32(PV_STAT), 83 + VC4_REG32(PV_HACT_ACT), 89 84 }; 90 - 91 - static void vc4_crtc_dump_regs(struct vc4_crtc *vc4_crtc) 92 - { 93 - int i; 94 - 95 - for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) { 96 - DRM_INFO("0x%04x (%s): 0x%08x\n", 97 - crtc_regs[i].reg, crtc_regs[i].name, 98 - CRTC_READ(crtc_regs[i].reg)); 99 - } 100 - } 101 - 102 - #ifdef CONFIG_DEBUG_FS 103 - int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused) 104 - { 105 - struct drm_info_node *node = (struct drm_info_node *)m->private; 106 - struct drm_device *dev = node->minor->dev; 107 - int crtc_index = (uintptr_t)node->info_ent->data; 108 - struct drm_crtc *crtc; 109 - struct vc4_crtc *vc4_crtc; 110 - int i; 111 - 112 - i = 0; 113 - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 114 - if (i == crtc_index) 115 - break; 116 - i++; 117 - } 118 - if (!crtc) 119 - return 0; 120 - vc4_crtc = to_vc4_crtc(crtc); 121 - 122 - for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) { 123 - seq_printf(m, "%s (0x%04x): 0x%08x\n", 124 - crtc_regs[i].name, crtc_regs[i].reg, 125 - CRTC_READ(crtc_regs[i].reg)); 126 - } 127 - 128 - return 0; 129 - } 130 - #endif 131 85 132 86 bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, 133 87 bool in_vblank_irq, int *vpos, int *hpos, ··· 390 434 bool debug_dump_regs = false; 391 435 392 436 if (debug_dump_regs) { 393 - DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc)); 394 - vc4_crtc_dump_regs(vc4_crtc); 437 + struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev); 438 + dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n", 439 + drm_crtc_index(crtc)); 440 + drm_print_regset32(&p, &vc4_crtc->regset); 395 441 } 396 442 397 443 if (vc4_crtc->channel == 2) { ··· 434 476 vc4_crtc_lut_load(crtc); 435 477 436 478 if (debug_dump_regs) { 437 - DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc)); 438 - vc4_crtc_dump_regs(vc4_crtc); 479 + struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev); 480 + dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs after:\n", 481 + drm_crtc_index(crtc)); 482 + drm_print_regset32(&p, &vc4_crtc->regset); 439 483 } 440 484 } 441 485 ··· 1043 1083 1044 1084 static const struct vc4_crtc_data pv0_data = { 1045 1085 .hvs_channel = 0, 1086 + .debugfs_name = "crtc0_regs", 1046 1087 .encoder_types = { 1047 1088 [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0, 1048 1089 [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI, ··· 1052 1091 1053 1092 static const struct vc4_crtc_data pv1_data = { 1054 1093 .hvs_channel = 2, 1094 + .debugfs_name = "crtc1_regs", 1055 1095 .encoder_types = { 1056 1096 [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1, 1057 1097 [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI, ··· 1061 1099 1062 1100 static const struct vc4_crtc_data pv2_data = { 1063 1101 .hvs_channel = 1, 1102 + .debugfs_name = "crtc2_regs", 1064 1103 .encoder_types = { 1065 1104 [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI, 1066 1105 [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC, ··· 1140 1177 if (!match) 1141 1178 return -ENODEV; 1142 1179 vc4_crtc->data = match->data; 1180 + vc4_crtc->pdev = pdev; 1143 1181 1144 1182 vc4_crtc->regs = vc4_ioremap_regs(pdev, 0); 1145 1183 if (IS_ERR(vc4_crtc->regs)) 1146 1184 return PTR_ERR(vc4_crtc->regs); 1185 + 1186 + vc4_crtc->regset.base = vc4_crtc->regs; 1187 + vc4_crtc->regset.regs = crtc_regs; 1188 + vc4_crtc->regset.nregs = ARRAY_SIZE(crtc_regs); 1147 1189 1148 1190 /* For now, we create just the primary and the legacy cursor 1149 1191 * planes. We should be able to stack more planes on easily, ··· 1222 1254 } 1223 1255 1224 1256 platform_set_drvdata(pdev, vc4_crtc); 1257 + 1258 + vc4_debugfs_add_regset32(drm, vc4_crtc->data->debugfs_name, 1259 + &vc4_crtc->regset); 1225 1260 1226 1261 return 0; 1227 1262
+64 -18
drivers/gpu/drm/vc4/vc4_debugfs.c
··· 15 15 #include "vc4_drv.h" 16 16 #include "vc4_regs.h" 17 17 18 - static const struct drm_info_list vc4_debugfs_list[] = { 19 - {"bo_stats", vc4_bo_stats_debugfs, 0}, 20 - {"dpi_regs", vc4_dpi_debugfs_regs, 0}, 21 - {"dsi1_regs", vc4_dsi_debugfs_regs, 0, (void *)(uintptr_t)1}, 22 - {"hdmi_regs", vc4_hdmi_debugfs_regs, 0}, 23 - {"vec_regs", vc4_vec_debugfs_regs, 0}, 24 - {"txp_regs", vc4_txp_debugfs_regs, 0}, 25 - {"hvs_regs", vc4_hvs_debugfs_regs, 0}, 26 - {"hvs_underrun", vc4_hvs_debugfs_underrun, 0}, 27 - {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0}, 28 - {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1}, 29 - {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2}, 30 - {"v3d_ident", vc4_v3d_debugfs_ident, 0}, 31 - {"v3d_regs", vc4_v3d_debugfs_regs, 0}, 18 + struct vc4_debugfs_info_entry { 19 + struct list_head link; 20 + struct drm_info_list info; 32 21 }; 33 22 34 - #define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list) 35 - 23 + /** 24 + * Called at drm_dev_register() time on each of the minors registered 25 + * by the DRM device, to attach the debugfs files. 26 + */ 36 27 int 37 28 vc4_debugfs_init(struct drm_minor *minor) 38 29 { 39 30 struct vc4_dev *vc4 = to_vc4_dev(minor->dev); 31 + struct vc4_debugfs_info_entry *entry; 40 32 struct dentry *dentry; 41 33 42 34 dentry = debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR, ··· 37 45 if (!dentry) 38 46 return -ENOMEM; 39 47 40 - return drm_debugfs_create_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES, 41 - minor->debugfs_root, minor); 48 + list_for_each_entry(entry, &vc4->debugfs_list, link) { 49 + int ret = drm_debugfs_create_files(&entry->info, 1, 50 + minor->debugfs_root, minor); 51 + 52 + if (ret) 53 + return ret; 54 + } 55 + 56 + return 0; 57 + } 58 + 59 + int vc4_debugfs_regset32(struct seq_file *m, void *unused) 60 + { 61 + struct drm_info_node *node = (struct drm_info_node *)m->private; 62 + struct debugfs_regset32 *regset = node->info_ent->data; 63 + struct drm_printer p = drm_seq_file_printer(m); 64 + 65 + drm_print_regset32(&p, regset); 66 + 67 + return 0; 68 + } 69 + 70 + /** 71 + * Registers a debugfs file with a callback function for a vc4 component. 72 + * 73 + * This is like drm_debugfs_create_files(), but that can only be 74 + * called a given DRM minor, while the various VC4 components want to 75 + * register their debugfs files during the component bind process. We 76 + * track the request and delay it to be called on each minor during 77 + * vc4_debugfs_init(). 78 + */ 79 + void vc4_debugfs_add_file(struct drm_device *dev, 80 + const char *name, 81 + int (*show)(struct seq_file*, void*), 82 + void *data) 83 + { 84 + struct vc4_dev *vc4 = to_vc4_dev(dev); 85 + 86 + struct vc4_debugfs_info_entry *entry = 87 + devm_kzalloc(dev->dev, sizeof(*entry), GFP_KERNEL); 88 + 89 + if (!entry) 90 + return; 91 + 92 + entry->info.name = name; 93 + entry->info.show = show; 94 + entry->info.data = data; 95 + 96 + list_add(&entry->link, &vc4->debugfs_list); 97 + } 98 + 99 + void vc4_debugfs_add_regset32(struct drm_device *drm, 100 + const char *name, 101 + struct debugfs_regset32 *regset) 102 + { 103 + vc4_debugfs_add_file(drm, name, vc4_debugfs_regset32, regset); 42 104 }
+10 -29
drivers/gpu/drm/vc4/vc4_dpi.c
··· 101 101 102 102 struct clk *pixel_clock; 103 103 struct clk *core_clock; 104 + 105 + struct debugfs_regset32 regset; 104 106 }; 105 107 106 108 #define DPI_READ(offset) readl(dpi->regs + (offset)) ··· 120 118 return container_of(encoder, struct vc4_dpi_encoder, base.base); 121 119 } 122 120 123 - #define DPI_REG(reg) { reg, #reg } 124 - static const struct { 125 - u32 reg; 126 - const char *name; 127 - } dpi_regs[] = { 128 - DPI_REG(DPI_C), 129 - DPI_REG(DPI_ID), 121 + static const struct debugfs_reg32 dpi_regs[] = { 122 + VC4_REG32(DPI_C), 123 + VC4_REG32(DPI_ID), 130 124 }; 131 - 132 - #ifdef CONFIG_DEBUG_FS 133 - int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused) 134 - { 135 - struct drm_info_node *node = (struct drm_info_node *)m->private; 136 - struct drm_device *dev = node->minor->dev; 137 - struct vc4_dev *vc4 = to_vc4_dev(dev); 138 - struct vc4_dpi *dpi = vc4->dpi; 139 - int i; 140 - 141 - if (!dpi) 142 - return 0; 143 - 144 - for (i = 0; i < ARRAY_SIZE(dpi_regs); i++) { 145 - seq_printf(m, "%s (0x%04x): 0x%08x\n", 146 - dpi_regs[i].name, dpi_regs[i].reg, 147 - DPI_READ(dpi_regs[i].reg)); 148 - } 149 - 150 - return 0; 151 - } 152 - #endif 153 125 154 126 static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = { 155 127 .destroy = drm_encoder_cleanup, ··· 290 314 dpi->regs = vc4_ioremap_regs(pdev, 0); 291 315 if (IS_ERR(dpi->regs)) 292 316 return PTR_ERR(dpi->regs); 317 + dpi->regset.base = dpi->regs; 318 + dpi->regset.regs = dpi_regs; 319 + dpi->regset.nregs = ARRAY_SIZE(dpi_regs); 293 320 294 321 if (DPI_READ(DPI_ID) != DPI_ID_VALUE) { 295 322 dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n", ··· 330 351 dev_set_drvdata(dev, dpi); 331 352 332 353 vc4->dpi = dpi; 354 + 355 + vc4_debugfs_add_regset32(drm, "dpi_regs", &dpi->regset); 333 356 334 357 return 0; 335 358
+24 -14
drivers/gpu/drm/vc4/vc4_drv.c
··· 72 72 if (args->pad != 0) 73 73 return -EINVAL; 74 74 75 + if (!vc4->v3d) 76 + return -ENODEV; 77 + 75 78 switch (args->param) { 76 79 case DRM_VC4_PARAM_V3D_IDENT0: 77 - ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); 78 - if (ret < 0) 80 + ret = vc4_v3d_pm_get(vc4); 81 + if (ret) 79 82 return ret; 80 83 args->value = V3D_READ(V3D_IDENT0); 81 - pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); 82 - pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); 84 + vc4_v3d_pm_put(vc4); 83 85 break; 84 86 case DRM_VC4_PARAM_V3D_IDENT1: 85 - ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); 86 - if (ret < 0) 87 + ret = vc4_v3d_pm_get(vc4); 88 + if (ret) 87 89 return ret; 88 90 args->value = V3D_READ(V3D_IDENT1); 89 - pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); 90 - pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); 91 + vc4_v3d_pm_put(vc4); 91 92 break; 92 93 case DRM_VC4_PARAM_V3D_IDENT2: 93 - ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); 94 - if (ret < 0) 94 + ret = vc4_v3d_pm_get(vc4); 95 + if (ret) 95 96 return ret; 96 97 args->value = V3D_READ(V3D_IDENT2); 97 - pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); 98 - pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); 98 + vc4_v3d_pm_put(vc4); 99 99 break; 100 100 case DRM_VC4_PARAM_SUPPORTS_BRANCHES: 101 101 case DRM_VC4_PARAM_SUPPORTS_ETC1: ··· 251 251 struct platform_device *pdev = to_platform_device(dev); 252 252 struct drm_device *drm; 253 253 struct vc4_dev *vc4; 254 + struct device_node *node; 254 255 int ret = 0; 255 256 256 257 dev->coherent_dma_mask = DMA_BIT_MASK(32); ··· 260 259 if (!vc4) 261 260 return -ENOMEM; 262 261 262 + /* If VC4 V3D is missing, don't advertise render nodes. */ 263 + node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL); 264 + if (!node || !of_device_is_available(node)) 265 + vc4_drm_driver.driver_features &= ~DRIVER_RENDER; 266 + of_node_put(node); 267 + 263 268 drm = drm_dev_alloc(&vc4_drm_driver, dev); 264 269 if (IS_ERR(drm)) 265 270 return PTR_ERR(drm); 266 271 platform_set_drvdata(pdev, drm); 267 272 vc4->dev = drm; 268 273 drm->dev_private = vc4; 274 + INIT_LIST_HEAD(&vc4->debugfs_list); 269 275 270 276 ret = vc4_bo_cache_init(drm); 271 277 if (ret) ··· 288 280 289 281 drm_fb_helper_remove_conflicting_framebuffers(NULL, "vc4drmfb", false); 290 282 291 - ret = drm_dev_register(drm, 0); 283 + ret = vc4_kms_load(drm); 292 284 if (ret < 0) 293 285 goto unbind_all; 294 286 295 - vc4_kms_load(drm); 287 + ret = drm_dev_register(drm, 0); 288 + if (ret < 0) 289 + goto unbind_all; 296 290 297 291 drm_fbdev_generic_setup(drm, 16); 298 292
+38 -11
drivers/gpu/drm/vc4/vc4_drv.h
··· 211 211 struct drm_modeset_lock ctm_state_lock; 212 212 struct drm_private_obj ctm_manager; 213 213 struct drm_private_obj load_tracker; 214 + 215 + /* List of vc4_debugfs_info_entry for adding to debugfs once 216 + * the minor is available (after drm_dev_register()). 217 + */ 218 + struct list_head debugfs_list; 214 219 }; 215 220 216 221 static inline struct vc4_dev * ··· 301 296 struct platform_device *pdev; 302 297 void __iomem *regs; 303 298 struct clk *clk; 299 + struct debugfs_regset32 regset; 304 300 }; 305 301 306 302 struct vc4_hvs { ··· 318 312 spinlock_t mm_lock; 319 313 320 314 struct drm_mm_node mitchell_netravali_filter; 315 + struct debugfs_regset32 regset; 321 316 }; 322 317 323 318 struct vc4_plane { ··· 434 427 int hvs_channel; 435 428 436 429 enum vc4_encoder_type encoder_types[4]; 430 + const char *debugfs_name; 437 431 }; 438 432 439 433 struct vc4_crtc { 440 434 struct drm_crtc base; 435 + struct platform_device *pdev; 441 436 const struct vc4_crtc_data *data; 442 437 void __iomem *regs; 443 438 ··· 456 447 u32 cob_size; 457 448 458 449 struct drm_pending_vblank_event *event; 450 + 451 + struct debugfs_regset32 regset; 459 452 }; 460 453 461 454 static inline struct vc4_crtc * ··· 470 459 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset) 471 460 #define HVS_READ(offset) readl(vc4->hvs->regs + offset) 472 461 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) 462 + 463 + #define VC4_REG32(reg) { .name = #reg, .offset = reg } 473 464 474 465 struct vc4_exec_info { 475 466 /* Sequence number for this bin/render job. */ ··· 721 708 void *vc4_prime_vmap(struct drm_gem_object *obj); 722 709 int vc4_bo_cache_init(struct drm_device *dev); 723 710 void vc4_bo_cache_destroy(struct drm_device *dev); 724 - int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); 725 711 int vc4_bo_inc_usecnt(struct vc4_bo *bo); 726 712 void vc4_bo_dec_usecnt(struct vc4_bo *bo); 727 713 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); ··· 728 716 729 717 /* vc4_crtc.c */ 730 718 extern struct platform_driver vc4_crtc_driver; 731 - int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg); 732 719 bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, 733 720 bool in_vblank_irq, int *vpos, int *hpos, 734 721 ktime_t *stime, ktime_t *etime, ··· 740 729 741 730 /* vc4_debugfs.c */ 742 731 int vc4_debugfs_init(struct drm_minor *minor); 732 + #ifdef CONFIG_DEBUG_FS 733 + void vc4_debugfs_add_file(struct drm_device *drm, 734 + const char *filename, 735 + int (*show)(struct seq_file*, void*), 736 + void *data); 737 + void vc4_debugfs_add_regset32(struct drm_device *drm, 738 + const char *filename, 739 + struct debugfs_regset32 *regset); 740 + #else 741 + static inline void vc4_debugfs_add_file(struct drm_device *drm, 742 + const char *filename, 743 + int (*show)(struct seq_file*, void*), 744 + void *data) 745 + { 746 + } 747 + 748 + static inline void vc4_debugfs_add_regset32(struct drm_device *drm, 749 + const char *filename, 750 + struct debugfs_regset32 *regset) 751 + { 752 + } 753 + #endif 743 754 744 755 /* vc4_drv.c */ 745 756 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 746 757 747 758 /* vc4_dpi.c */ 748 759 extern struct platform_driver vc4_dpi_driver; 749 - int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused); 750 760 751 761 /* vc4_dsi.c */ 752 762 extern struct platform_driver vc4_dsi_driver; 753 - int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused); 754 763 755 764 /* vc4_fence.c */ 756 765 extern const struct dma_fence_ops vc4_fence_ops; ··· 798 767 799 768 /* vc4_hdmi.c */ 800 769 extern struct platform_driver vc4_hdmi_driver; 801 - int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused); 802 770 803 771 /* vc4_vec.c */ 804 772 extern struct platform_driver vc4_vec_driver; 805 - int vc4_vec_debugfs_regs(struct seq_file *m, void *unused); 806 773 807 774 /* vc4_txp.c */ 808 775 extern struct platform_driver vc4_txp_driver; 809 - int vc4_txp_debugfs_regs(struct seq_file *m, void *unused); 810 776 811 777 /* vc4_irq.c */ 812 778 irqreturn_t vc4_irq(int irq, void *arg); ··· 815 787 /* vc4_hvs.c */ 816 788 extern struct platform_driver vc4_hvs_driver; 817 789 void vc4_hvs_dump_state(struct drm_device *dev); 818 - int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused); 819 - int vc4_hvs_debugfs_underrun(struct seq_file *m, void *unused); 820 790 void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel); 821 791 void vc4_hvs_mask_underrun(struct drm_device *dev, int channel); 822 792 ··· 831 805 832 806 /* vc4_v3d.c */ 833 807 extern struct platform_driver vc4_v3d_driver; 834 - int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); 835 - int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); 808 + extern const struct of_device_id vc4_v3d_dt_match[]; 836 809 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4); 810 + int vc4_v3d_pm_get(struct vc4_dev *vc4); 811 + void vc4_v3d_pm_put(struct vc4_dev *vc4); 837 812 838 813 /* vc4_validate.c */ 839 814 int
+68 -107
drivers/gpu/drm/vc4/vc4_dsi.c
··· 545 545 546 546 struct completion xfer_completion; 547 547 int xfer_result; 548 + 549 + struct debugfs_regset32 regset; 548 550 }; 549 551 550 552 #define host_to_dsi(host) container_of(host, struct vc4_dsi, dsi_host) ··· 607 605 return container_of(encoder, struct vc4_dsi_encoder, base.base); 608 606 } 609 607 610 - #define DSI_REG(reg) { reg, #reg } 611 - static const struct { 612 - u32 reg; 613 - const char *name; 614 - } dsi0_regs[] = { 615 - DSI_REG(DSI0_CTRL), 616 - DSI_REG(DSI0_STAT), 617 - DSI_REG(DSI0_HSTX_TO_CNT), 618 - DSI_REG(DSI0_LPRX_TO_CNT), 619 - DSI_REG(DSI0_TA_TO_CNT), 620 - DSI_REG(DSI0_PR_TO_CNT), 621 - DSI_REG(DSI0_DISP0_CTRL), 622 - DSI_REG(DSI0_DISP1_CTRL), 623 - DSI_REG(DSI0_INT_STAT), 624 - DSI_REG(DSI0_INT_EN), 625 - DSI_REG(DSI0_PHYC), 626 - DSI_REG(DSI0_HS_CLT0), 627 - DSI_REG(DSI0_HS_CLT1), 628 - DSI_REG(DSI0_HS_CLT2), 629 - DSI_REG(DSI0_HS_DLT3), 630 - DSI_REG(DSI0_HS_DLT4), 631 - DSI_REG(DSI0_HS_DLT5), 632 - DSI_REG(DSI0_HS_DLT6), 633 - DSI_REG(DSI0_HS_DLT7), 634 - DSI_REG(DSI0_PHY_AFEC0), 635 - DSI_REG(DSI0_PHY_AFEC1), 636 - DSI_REG(DSI0_ID), 608 + static const struct debugfs_reg32 dsi0_regs[] = { 609 + VC4_REG32(DSI0_CTRL), 610 + VC4_REG32(DSI0_STAT), 611 + VC4_REG32(DSI0_HSTX_TO_CNT), 612 + VC4_REG32(DSI0_LPRX_TO_CNT), 613 + VC4_REG32(DSI0_TA_TO_CNT), 614 + VC4_REG32(DSI0_PR_TO_CNT), 615 + VC4_REG32(DSI0_DISP0_CTRL), 616 + VC4_REG32(DSI0_DISP1_CTRL), 617 + VC4_REG32(DSI0_INT_STAT), 618 + VC4_REG32(DSI0_INT_EN), 619 + VC4_REG32(DSI0_PHYC), 620 + VC4_REG32(DSI0_HS_CLT0), 621 + VC4_REG32(DSI0_HS_CLT1), 622 + VC4_REG32(DSI0_HS_CLT2), 623 + VC4_REG32(DSI0_HS_DLT3), 624 + VC4_REG32(DSI0_HS_DLT4), 625 + VC4_REG32(DSI0_HS_DLT5), 626 + VC4_REG32(DSI0_HS_DLT6), 627 + VC4_REG32(DSI0_HS_DLT7), 628 + VC4_REG32(DSI0_PHY_AFEC0), 629 + VC4_REG32(DSI0_PHY_AFEC1), 630 + VC4_REG32(DSI0_ID), 637 631 }; 638 632 639 - static const struct { 640 - u32 reg; 641 - const char *name; 642 - } dsi1_regs[] = { 643 - DSI_REG(DSI1_CTRL), 644 - DSI_REG(DSI1_STAT), 645 - DSI_REG(DSI1_HSTX_TO_CNT), 646 - DSI_REG(DSI1_LPRX_TO_CNT), 647 - DSI_REG(DSI1_TA_TO_CNT), 648 - DSI_REG(DSI1_PR_TO_CNT), 649 - DSI_REG(DSI1_DISP0_CTRL), 650 - DSI_REG(DSI1_DISP1_CTRL), 651 - DSI_REG(DSI1_INT_STAT), 652 - DSI_REG(DSI1_INT_EN), 653 - DSI_REG(DSI1_PHYC), 654 - DSI_REG(DSI1_HS_CLT0), 655 - DSI_REG(DSI1_HS_CLT1), 656 - DSI_REG(DSI1_HS_CLT2), 657 - DSI_REG(DSI1_HS_DLT3), 658 - DSI_REG(DSI1_HS_DLT4), 659 - DSI_REG(DSI1_HS_DLT5), 660 - DSI_REG(DSI1_HS_DLT6), 661 - DSI_REG(DSI1_HS_DLT7), 662 - DSI_REG(DSI1_PHY_AFEC0), 663 - DSI_REG(DSI1_PHY_AFEC1), 664 - DSI_REG(DSI1_ID), 633 + static const struct debugfs_reg32 dsi1_regs[] = { 634 + VC4_REG32(DSI1_CTRL), 635 + VC4_REG32(DSI1_STAT), 636 + VC4_REG32(DSI1_HSTX_TO_CNT), 637 + VC4_REG32(DSI1_LPRX_TO_CNT), 638 + VC4_REG32(DSI1_TA_TO_CNT), 639 + VC4_REG32(DSI1_PR_TO_CNT), 640 + VC4_REG32(DSI1_DISP0_CTRL), 641 + VC4_REG32(DSI1_DISP1_CTRL), 642 + VC4_REG32(DSI1_INT_STAT), 643 + VC4_REG32(DSI1_INT_EN), 644 + VC4_REG32(DSI1_PHYC), 645 + VC4_REG32(DSI1_HS_CLT0), 646 + VC4_REG32(DSI1_HS_CLT1), 647 + VC4_REG32(DSI1_HS_CLT2), 648 + VC4_REG32(DSI1_HS_DLT3), 649 + VC4_REG32(DSI1_HS_DLT4), 650 + VC4_REG32(DSI1_HS_DLT5), 651 + VC4_REG32(DSI1_HS_DLT6), 652 + VC4_REG32(DSI1_HS_DLT7), 653 + VC4_REG32(DSI1_PHY_AFEC0), 654 + VC4_REG32(DSI1_PHY_AFEC1), 655 + VC4_REG32(DSI1_ID), 665 656 }; 666 - 667 - static void vc4_dsi_dump_regs(struct vc4_dsi *dsi) 668 - { 669 - int i; 670 - 671 - if (dsi->port == 0) { 672 - for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) { 673 - DRM_INFO("0x%04x (%s): 0x%08x\n", 674 - dsi0_regs[i].reg, dsi0_regs[i].name, 675 - DSI_READ(dsi0_regs[i].reg)); 676 - } 677 - } else { 678 - for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) { 679 - DRM_INFO("0x%04x (%s): 0x%08x\n", 680 - dsi1_regs[i].reg, dsi1_regs[i].name, 681 - DSI_READ(dsi1_regs[i].reg)); 682 - } 683 - } 684 - } 685 - 686 - #ifdef CONFIG_DEBUG_FS 687 - int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused) 688 - { 689 - struct drm_info_node *node = (struct drm_info_node *)m->private; 690 - struct drm_device *drm = node->minor->dev; 691 - struct vc4_dev *vc4 = to_vc4_dev(drm); 692 - int dsi_index = (uintptr_t)node->info_ent->data; 693 - struct vc4_dsi *dsi = (dsi_index == 1 ? vc4->dsi1 : NULL); 694 - int i; 695 - 696 - if (!dsi) 697 - return 0; 698 - 699 - if (dsi->port == 0) { 700 - for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) { 701 - seq_printf(m, "0x%04x (%s): 0x%08x\n", 702 - dsi0_regs[i].reg, dsi0_regs[i].name, 703 - DSI_READ(dsi0_regs[i].reg)); 704 - } 705 - } else { 706 - for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) { 707 - seq_printf(m, "0x%04x (%s): 0x%08x\n", 708 - dsi1_regs[i].reg, dsi1_regs[i].name, 709 - DSI_READ(dsi1_regs[i].reg)); 710 - } 711 - } 712 - 713 - return 0; 714 - } 715 - #endif 716 657 717 658 static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder) 718 659 { ··· 845 900 } 846 901 847 902 if (debug_dump_regs) { 848 - DRM_INFO("DSI regs before:\n"); 849 - vc4_dsi_dump_regs(dsi); 903 + struct drm_printer p = drm_info_printer(&dsi->pdev->dev); 904 + dev_info(&dsi->pdev->dev, "DSI regs before:\n"); 905 + drm_print_regset32(&p, &dsi->regset); 850 906 } 851 907 852 908 /* Round up the clk_set_rate() request slightly, since ··· 1081 1135 drm_bridge_enable(dsi->bridge); 1082 1136 1083 1137 if (debug_dump_regs) { 1084 - DRM_INFO("DSI regs after:\n"); 1085 - vc4_dsi_dump_regs(dsi); 1138 + struct drm_printer p = drm_info_printer(&dsi->pdev->dev); 1139 + dev_info(&dsi->pdev->dev, "DSI regs after:\n"); 1140 + drm_print_regset32(&p, &dsi->regset); 1086 1141 } 1087 1142 } 1088 1143 ··· 1474 1527 if (IS_ERR(dsi->regs)) 1475 1528 return PTR_ERR(dsi->regs); 1476 1529 1530 + dsi->regset.base = dsi->regs; 1531 + if (dsi->port == 0) { 1532 + dsi->regset.regs = dsi0_regs; 1533 + dsi->regset.nregs = ARRAY_SIZE(dsi0_regs); 1534 + } else { 1535 + dsi->regset.regs = dsi1_regs; 1536 + dsi->regset.nregs = ARRAY_SIZE(dsi1_regs); 1537 + } 1538 + 1477 1539 if (DSI_PORT_READ(ID) != DSI_ID_VALUE) { 1478 1540 dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n", 1479 1541 DSI_PORT_READ(ID), DSI_ID_VALUE); ··· 1617 1661 * encoder's enable/disable paths. 1618 1662 */ 1619 1663 dsi->encoder->bridge = NULL; 1664 + 1665 + if (dsi->port == 0) 1666 + vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset); 1667 + else 1668 + vc4_debugfs_add_regset32(drm, "dsi1_regs", &dsi->regset); 1620 1669 1621 1670 pm_runtime_enable(dev); 1622 1671
+15 -16
drivers/gpu/drm/vc4/vc4_gem.c
··· 74 74 u32 i; 75 75 int ret = 0; 76 76 77 + if (!vc4->v3d) { 78 + DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n"); 79 + return -ENODEV; 80 + } 81 + 77 82 spin_lock_irqsave(&vc4->job_lock, irqflags); 78 83 kernel_state = vc4->hang_state; 79 84 if (!kernel_state) { ··· 969 964 /* Release the reference we had on the perf monitor. */ 970 965 vc4_perfmon_put(exec->perfmon); 971 966 972 - mutex_lock(&vc4->power_lock); 973 - if (--vc4->power_refcount == 0) { 974 - pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); 975 - pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); 976 - } 977 - mutex_unlock(&vc4->power_lock); 967 + vc4_v3d_pm_put(vc4); 978 968 979 969 kfree(exec); 980 970 } ··· 1124 1124 struct dma_fence *in_fence; 1125 1125 int ret = 0; 1126 1126 1127 + if (!vc4->v3d) { 1128 + DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n"); 1129 + return -ENODEV; 1130 + } 1131 + 1127 1132 if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR | 1128 1133 VC4_SUBMIT_CL_FIXED_RCL_ORDER | 1129 1134 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X | ··· 1148 1143 return -ENOMEM; 1149 1144 } 1150 1145 1151 - mutex_lock(&vc4->power_lock); 1152 - if (vc4->power_refcount++ == 0) { 1153 - ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); 1154 - if (ret < 0) { 1155 - mutex_unlock(&vc4->power_lock); 1156 - vc4->power_refcount--; 1157 - kfree(exec); 1158 - return ret; 1159 - } 1146 + ret = vc4_v3d_pm_get(vc4); 1147 + if (ret) { 1148 + kfree(exec); 1149 + return ret; 1160 1150 } 1161 - mutex_unlock(&vc4->power_lock); 1162 1151 1163 1152 exec->args = args; 1164 1153 INIT_LIST_HEAD(&exec->unref_list);
+71 -87
drivers/gpu/drm/vc4/vc4_hdmi.c
··· 97 97 98 98 struct clk *pixel_clock; 99 99 struct clk *hsm_clock; 100 + 101 + struct debugfs_regset32 hdmi_regset; 102 + struct debugfs_regset32 hd_regset; 100 103 }; 101 104 102 105 #define HDMI_READ(offset) readl(vc4->hdmi->hdmicore_regs + offset) ··· 137 134 return container_of(connector, struct vc4_hdmi_connector, base); 138 135 } 139 136 140 - #define HDMI_REG(reg) { reg, #reg } 141 - static const struct { 142 - u32 reg; 143 - const char *name; 144 - } hdmi_regs[] = { 145 - HDMI_REG(VC4_HDMI_CORE_REV), 146 - HDMI_REG(VC4_HDMI_SW_RESET_CONTROL), 147 - HDMI_REG(VC4_HDMI_HOTPLUG_INT), 148 - HDMI_REG(VC4_HDMI_HOTPLUG), 149 - HDMI_REG(VC4_HDMI_MAI_CHANNEL_MAP), 150 - HDMI_REG(VC4_HDMI_MAI_CONFIG), 151 - HDMI_REG(VC4_HDMI_MAI_FORMAT), 152 - HDMI_REG(VC4_HDMI_AUDIO_PACKET_CONFIG), 153 - HDMI_REG(VC4_HDMI_RAM_PACKET_CONFIG), 154 - HDMI_REG(VC4_HDMI_HORZA), 155 - HDMI_REG(VC4_HDMI_HORZB), 156 - HDMI_REG(VC4_HDMI_FIFO_CTL), 157 - HDMI_REG(VC4_HDMI_SCHEDULER_CONTROL), 158 - HDMI_REG(VC4_HDMI_VERTA0), 159 - HDMI_REG(VC4_HDMI_VERTA1), 160 - HDMI_REG(VC4_HDMI_VERTB0), 161 - HDMI_REG(VC4_HDMI_VERTB1), 162 - HDMI_REG(VC4_HDMI_TX_PHY_RESET_CTL), 163 - HDMI_REG(VC4_HDMI_TX_PHY_CTL0), 137 + static const struct debugfs_reg32 hdmi_regs[] = { 138 + VC4_REG32(VC4_HDMI_CORE_REV), 139 + VC4_REG32(VC4_HDMI_SW_RESET_CONTROL), 140 + VC4_REG32(VC4_HDMI_HOTPLUG_INT), 141 + VC4_REG32(VC4_HDMI_HOTPLUG), 142 + VC4_REG32(VC4_HDMI_MAI_CHANNEL_MAP), 143 + VC4_REG32(VC4_HDMI_MAI_CONFIG), 144 + VC4_REG32(VC4_HDMI_MAI_FORMAT), 145 + VC4_REG32(VC4_HDMI_AUDIO_PACKET_CONFIG), 146 + VC4_REG32(VC4_HDMI_RAM_PACKET_CONFIG), 147 + VC4_REG32(VC4_HDMI_HORZA), 148 + VC4_REG32(VC4_HDMI_HORZB), 149 + VC4_REG32(VC4_HDMI_FIFO_CTL), 150 + VC4_REG32(VC4_HDMI_SCHEDULER_CONTROL), 151 + VC4_REG32(VC4_HDMI_VERTA0), 152 + VC4_REG32(VC4_HDMI_VERTA1), 153 + VC4_REG32(VC4_HDMI_VERTB0), 154 + VC4_REG32(VC4_HDMI_VERTB1), 155 + VC4_REG32(VC4_HDMI_TX_PHY_RESET_CTL), 156 + VC4_REG32(VC4_HDMI_TX_PHY_CTL0), 164 157 165 - HDMI_REG(VC4_HDMI_CEC_CNTRL_1), 166 - HDMI_REG(VC4_HDMI_CEC_CNTRL_2), 167 - HDMI_REG(VC4_HDMI_CEC_CNTRL_3), 168 - HDMI_REG(VC4_HDMI_CEC_CNTRL_4), 169 - HDMI_REG(VC4_HDMI_CEC_CNTRL_5), 170 - HDMI_REG(VC4_HDMI_CPU_STATUS), 171 - HDMI_REG(VC4_HDMI_CPU_MASK_STATUS), 158 + VC4_REG32(VC4_HDMI_CEC_CNTRL_1), 159 + VC4_REG32(VC4_HDMI_CEC_CNTRL_2), 160 + VC4_REG32(VC4_HDMI_CEC_CNTRL_3), 161 + VC4_REG32(VC4_HDMI_CEC_CNTRL_4), 162 + VC4_REG32(VC4_HDMI_CEC_CNTRL_5), 163 + VC4_REG32(VC4_HDMI_CPU_STATUS), 164 + VC4_REG32(VC4_HDMI_CPU_MASK_STATUS), 172 165 173 - HDMI_REG(VC4_HDMI_CEC_RX_DATA_1), 174 - HDMI_REG(VC4_HDMI_CEC_RX_DATA_2), 175 - HDMI_REG(VC4_HDMI_CEC_RX_DATA_3), 176 - HDMI_REG(VC4_HDMI_CEC_RX_DATA_4), 177 - HDMI_REG(VC4_HDMI_CEC_TX_DATA_1), 178 - HDMI_REG(VC4_HDMI_CEC_TX_DATA_2), 179 - HDMI_REG(VC4_HDMI_CEC_TX_DATA_3), 180 - HDMI_REG(VC4_HDMI_CEC_TX_DATA_4), 166 + VC4_REG32(VC4_HDMI_CEC_RX_DATA_1), 167 + VC4_REG32(VC4_HDMI_CEC_RX_DATA_2), 168 + VC4_REG32(VC4_HDMI_CEC_RX_DATA_3), 169 + VC4_REG32(VC4_HDMI_CEC_RX_DATA_4), 170 + VC4_REG32(VC4_HDMI_CEC_TX_DATA_1), 171 + VC4_REG32(VC4_HDMI_CEC_TX_DATA_2), 172 + VC4_REG32(VC4_HDMI_CEC_TX_DATA_3), 173 + VC4_REG32(VC4_HDMI_CEC_TX_DATA_4), 181 174 }; 182 175 183 - static const struct { 184 - u32 reg; 185 - const char *name; 186 - } hd_regs[] = { 187 - HDMI_REG(VC4_HD_M_CTL), 188 - HDMI_REG(VC4_HD_MAI_CTL), 189 - HDMI_REG(VC4_HD_MAI_THR), 190 - HDMI_REG(VC4_HD_MAI_FMT), 191 - HDMI_REG(VC4_HD_MAI_SMP), 192 - HDMI_REG(VC4_HD_VID_CTL), 193 - HDMI_REG(VC4_HD_CSC_CTL), 194 - HDMI_REG(VC4_HD_FRAME_COUNT), 176 + static const struct debugfs_reg32 hd_regs[] = { 177 + VC4_REG32(VC4_HD_M_CTL), 178 + VC4_REG32(VC4_HD_MAI_CTL), 179 + VC4_REG32(VC4_HD_MAI_THR), 180 + VC4_REG32(VC4_HD_MAI_FMT), 181 + VC4_REG32(VC4_HD_MAI_SMP), 182 + VC4_REG32(VC4_HD_VID_CTL), 183 + VC4_REG32(VC4_HD_CSC_CTL), 184 + VC4_REG32(VC4_HD_FRAME_COUNT), 195 185 }; 196 186 197 - #ifdef CONFIG_DEBUG_FS 198 - int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) 187 + static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) 199 188 { 200 189 struct drm_info_node *node = (struct drm_info_node *)m->private; 201 190 struct drm_device *dev = node->minor->dev; 202 191 struct vc4_dev *vc4 = to_vc4_dev(dev); 203 - int i; 192 + struct vc4_hdmi *hdmi = vc4->hdmi; 193 + struct drm_printer p = drm_seq_file_printer(m); 204 194 205 - for (i = 0; i < ARRAY_SIZE(hdmi_regs); i++) { 206 - seq_printf(m, "%s (0x%04x): 0x%08x\n", 207 - hdmi_regs[i].name, hdmi_regs[i].reg, 208 - HDMI_READ(hdmi_regs[i].reg)); 209 - } 210 - 211 - for (i = 0; i < ARRAY_SIZE(hd_regs); i++) { 212 - seq_printf(m, "%s (0x%04x): 0x%08x\n", 213 - hd_regs[i].name, hd_regs[i].reg, 214 - HD_READ(hd_regs[i].reg)); 215 - } 195 + drm_print_regset32(&p, &hdmi->hdmi_regset); 196 + drm_print_regset32(&p, &hdmi->hd_regset); 216 197 217 198 return 0; 218 - } 219 - #endif /* CONFIG_DEBUG_FS */ 220 - 221 - static void vc4_hdmi_dump_regs(struct drm_device *dev) 222 - { 223 - struct vc4_dev *vc4 = to_vc4_dev(dev); 224 - int i; 225 - 226 - for (i = 0; i < ARRAY_SIZE(hdmi_regs); i++) { 227 - DRM_INFO("0x%04x (%s): 0x%08x\n", 228 - hdmi_regs[i].reg, hdmi_regs[i].name, 229 - HDMI_READ(hdmi_regs[i].reg)); 230 - } 231 - for (i = 0; i < ARRAY_SIZE(hd_regs); i++) { 232 - DRM_INFO("0x%04x (%s): 0x%08x\n", 233 - hd_regs[i].reg, hd_regs[i].name, 234 - HD_READ(hd_regs[i].reg)); 235 - } 236 199 } 237 200 238 201 static enum drm_connector_status ··· 530 561 HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0); 531 562 532 563 if (debug_dump_regs) { 533 - DRM_INFO("HDMI regs before:\n"); 534 - vc4_hdmi_dump_regs(dev); 564 + struct drm_printer p = drm_info_printer(&hdmi->pdev->dev); 565 + 566 + dev_info(&hdmi->pdev->dev, "HDMI regs before:\n"); 567 + drm_print_regset32(&p, &hdmi->hdmi_regset); 568 + drm_print_regset32(&p, &hdmi->hd_regset); 535 569 } 536 570 537 571 HD_WRITE(VC4_HD_VID_CTL, 0); ··· 609 637 HDMI_WRITE(VC4_HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N); 610 638 611 639 if (debug_dump_regs) { 612 - DRM_INFO("HDMI regs after:\n"); 613 - vc4_hdmi_dump_regs(dev); 640 + struct drm_printer p = drm_info_printer(&hdmi->pdev->dev); 641 + 642 + dev_info(&hdmi->pdev->dev, "HDMI regs after:\n"); 643 + drm_print_regset32(&p, &hdmi->hdmi_regset); 644 + drm_print_regset32(&p, &hdmi->hd_regset); 614 645 } 615 646 616 647 HD_WRITE(VC4_HD_VID_CTL, ··· 1308 1333 if (IS_ERR(hdmi->hd_regs)) 1309 1334 return PTR_ERR(hdmi->hd_regs); 1310 1335 1336 + hdmi->hdmi_regset.base = hdmi->hdmicore_regs; 1337 + hdmi->hdmi_regset.regs = hdmi_regs; 1338 + hdmi->hdmi_regset.nregs = ARRAY_SIZE(hdmi_regs); 1339 + hdmi->hd_regset.base = hdmi->hd_regs; 1340 + hdmi->hd_regset.regs = hd_regs; 1341 + hdmi->hd_regset.nregs = ARRAY_SIZE(hd_regs); 1342 + 1311 1343 hdmi->pixel_clock = devm_clk_get(dev, "pixel"); 1312 1344 if (IS_ERR(hdmi->pixel_clock)) { 1313 1345 DRM_ERROR("Failed to get pixel clock\n"); ··· 1429 1447 ret = vc4_hdmi_audio_init(hdmi); 1430 1448 if (ret) 1431 1449 goto err_destroy_encoder; 1450 + 1451 + vc4_debugfs_add_file(drm, "hdmi_regs", vc4_hdmi_debugfs_regs, hdmi); 1432 1452 1433 1453 return 0; 1434 1454
+43 -60
drivers/gpu/drm/vc4/vc4_hvs.c
··· 27 27 #include "vc4_drv.h" 28 28 #include "vc4_regs.h" 29 29 30 - #define HVS_REG(reg) { reg, #reg } 31 - static const struct { 32 - u32 reg; 33 - const char *name; 34 - } hvs_regs[] = { 35 - HVS_REG(SCALER_DISPCTRL), 36 - HVS_REG(SCALER_DISPSTAT), 37 - HVS_REG(SCALER_DISPID), 38 - HVS_REG(SCALER_DISPECTRL), 39 - HVS_REG(SCALER_DISPPROF), 40 - HVS_REG(SCALER_DISPDITHER), 41 - HVS_REG(SCALER_DISPEOLN), 42 - HVS_REG(SCALER_DISPLIST0), 43 - HVS_REG(SCALER_DISPLIST1), 44 - HVS_REG(SCALER_DISPLIST2), 45 - HVS_REG(SCALER_DISPLSTAT), 46 - HVS_REG(SCALER_DISPLACT0), 47 - HVS_REG(SCALER_DISPLACT1), 48 - HVS_REG(SCALER_DISPLACT2), 49 - HVS_REG(SCALER_DISPCTRL0), 50 - HVS_REG(SCALER_DISPBKGND0), 51 - HVS_REG(SCALER_DISPSTAT0), 52 - HVS_REG(SCALER_DISPBASE0), 53 - HVS_REG(SCALER_DISPCTRL1), 54 - HVS_REG(SCALER_DISPBKGND1), 55 - HVS_REG(SCALER_DISPSTAT1), 56 - HVS_REG(SCALER_DISPBASE1), 57 - HVS_REG(SCALER_DISPCTRL2), 58 - HVS_REG(SCALER_DISPBKGND2), 59 - HVS_REG(SCALER_DISPSTAT2), 60 - HVS_REG(SCALER_DISPBASE2), 61 - HVS_REG(SCALER_DISPALPHA2), 62 - HVS_REG(SCALER_OLEDOFFS), 63 - HVS_REG(SCALER_OLEDCOEF0), 64 - HVS_REG(SCALER_OLEDCOEF1), 65 - HVS_REG(SCALER_OLEDCOEF2), 30 + static const struct debugfs_reg32 hvs_regs[] = { 31 + VC4_REG32(SCALER_DISPCTRL), 32 + VC4_REG32(SCALER_DISPSTAT), 33 + VC4_REG32(SCALER_DISPID), 34 + VC4_REG32(SCALER_DISPECTRL), 35 + VC4_REG32(SCALER_DISPPROF), 36 + VC4_REG32(SCALER_DISPDITHER), 37 + VC4_REG32(SCALER_DISPEOLN), 38 + VC4_REG32(SCALER_DISPLIST0), 39 + VC4_REG32(SCALER_DISPLIST1), 40 + VC4_REG32(SCALER_DISPLIST2), 41 + VC4_REG32(SCALER_DISPLSTAT), 42 + VC4_REG32(SCALER_DISPLACT0), 43 + VC4_REG32(SCALER_DISPLACT1), 44 + VC4_REG32(SCALER_DISPLACT2), 45 + VC4_REG32(SCALER_DISPCTRL0), 46 + VC4_REG32(SCALER_DISPBKGND0), 47 + VC4_REG32(SCALER_DISPSTAT0), 48 + VC4_REG32(SCALER_DISPBASE0), 49 + VC4_REG32(SCALER_DISPCTRL1), 50 + VC4_REG32(SCALER_DISPBKGND1), 51 + VC4_REG32(SCALER_DISPSTAT1), 52 + VC4_REG32(SCALER_DISPBASE1), 53 + VC4_REG32(SCALER_DISPCTRL2), 54 + VC4_REG32(SCALER_DISPBKGND2), 55 + VC4_REG32(SCALER_DISPSTAT2), 56 + VC4_REG32(SCALER_DISPBASE2), 57 + VC4_REG32(SCALER_DISPALPHA2), 58 + VC4_REG32(SCALER_OLEDOFFS), 59 + VC4_REG32(SCALER_OLEDCOEF0), 60 + VC4_REG32(SCALER_OLEDCOEF1), 61 + VC4_REG32(SCALER_OLEDCOEF2), 66 62 }; 67 63 68 64 void vc4_hvs_dump_state(struct drm_device *dev) 69 65 { 70 66 struct vc4_dev *vc4 = to_vc4_dev(dev); 67 + struct drm_printer p = drm_info_printer(&vc4->hvs->pdev->dev); 71 68 int i; 72 69 73 - for (i = 0; i < ARRAY_SIZE(hvs_regs); i++) { 74 - DRM_INFO("0x%04x (%s): 0x%08x\n", 75 - hvs_regs[i].reg, hvs_regs[i].name, 76 - HVS_READ(hvs_regs[i].reg)); 77 - } 70 + drm_print_regset32(&p, &vc4->hvs->regset); 78 71 79 72 DRM_INFO("HVS ctx:\n"); 80 73 for (i = 0; i < 64; i += 4) { ··· 80 87 } 81 88 } 82 89 83 - #ifdef CONFIG_DEBUG_FS 84 - int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused) 85 - { 86 - struct drm_info_node *node = (struct drm_info_node *)m->private; 87 - struct drm_device *dev = node->minor->dev; 88 - struct vc4_dev *vc4 = to_vc4_dev(dev); 89 - int i; 90 - 91 - for (i = 0; i < ARRAY_SIZE(hvs_regs); i++) { 92 - seq_printf(m, "%s (0x%04x): 0x%08x\n", 93 - hvs_regs[i].name, hvs_regs[i].reg, 94 - HVS_READ(hvs_regs[i].reg)); 95 - } 96 - 97 - return 0; 98 - } 99 - 100 - int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data) 90 + static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data) 101 91 { 102 92 struct drm_info_node *node = m->private; 103 93 struct drm_device *dev = node->minor->dev; ··· 91 115 92 116 return 0; 93 117 } 94 - #endif 95 118 96 119 /* The filter kernel is composed of dwords each containing 3 9-bit 97 120 * signed integers packed next to each other. ··· 234 259 if (IS_ERR(hvs->regs)) 235 260 return PTR_ERR(hvs->regs); 236 261 262 + hvs->regset.base = hvs->regs; 263 + hvs->regset.regs = hvs_regs; 264 + hvs->regset.nregs = ARRAY_SIZE(hvs_regs); 265 + 237 266 hvs->dlist = hvs->regs + SCALER_DLIST_START; 238 267 239 268 spin_lock_init(&hvs->mm_lock); ··· 301 322 vc4_hvs_irq_handler, 0, "vc4 hvs", drm); 302 323 if (ret) 303 324 return ret; 325 + 326 + vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset); 327 + vc4_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun, 328 + NULL); 304 329 305 330 return 0; 306 331 }
+9
drivers/gpu/drm/vc4/vc4_irq.c
··· 229 229 { 230 230 struct vc4_dev *vc4 = to_vc4_dev(dev); 231 231 232 + if (!vc4->v3d) 233 + return; 234 + 232 235 init_waitqueue_head(&vc4->job_wait_queue); 233 236 INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work); 234 237 ··· 246 243 { 247 244 struct vc4_dev *vc4 = to_vc4_dev(dev); 248 245 246 + if (!vc4->v3d) 247 + return 0; 248 + 249 249 /* Enable both the render done and out of memory interrupts. */ 250 250 V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); 251 251 ··· 259 253 vc4_irq_uninstall(struct drm_device *dev) 260 254 { 261 255 struct vc4_dev *vc4 = to_vc4_dev(dev); 256 + 257 + if (!vc4->v3d) 258 + return; 262 259 263 260 /* Disable sending interrupts for our driver's IRQs. */ 264 261 V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS);
+1
drivers/gpu/drm/vc4/vc4_kms.c
··· 525 525 /* Set support for vblank irq fast disable, before drm_vblank_init() */ 526 526 dev->vblank_disable_immediate = true; 527 527 528 + dev->irq_enabled = true; 528 529 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 529 530 if (ret < 0) { 530 531 dev_err(dev->dev, "failed to initialize vblank\n");
+18
drivers/gpu/drm/vc4/vc4_perfmon.c
··· 100 100 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, 101 101 struct drm_file *file_priv) 102 102 { 103 + struct vc4_dev *vc4 = to_vc4_dev(dev); 103 104 struct vc4_file *vc4file = file_priv->driver_priv; 104 105 struct drm_vc4_perfmon_create *req = data; 105 106 struct vc4_perfmon *perfmon; 106 107 unsigned int i; 107 108 int ret; 109 + 110 + if (!vc4->v3d) { 111 + DRM_DEBUG("Creating perfmon no VC4 V3D probed\n"); 112 + return -ENODEV; 113 + } 108 114 109 115 /* Number of monitored counters cannot exceed HW limits. */ 110 116 if (req->ncounters > DRM_VC4_MAX_PERF_COUNTERS || ··· 152 146 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, 153 147 struct drm_file *file_priv) 154 148 { 149 + struct vc4_dev *vc4 = to_vc4_dev(dev); 155 150 struct vc4_file *vc4file = file_priv->driver_priv; 156 151 struct drm_vc4_perfmon_destroy *req = data; 157 152 struct vc4_perfmon *perfmon; 153 + 154 + if (!vc4->v3d) { 155 + DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n"); 156 + return -ENODEV; 157 + } 158 158 159 159 mutex_lock(&vc4file->perfmon.lock); 160 160 perfmon = idr_remove(&vc4file->perfmon.idr, req->id); ··· 176 164 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, 177 165 struct drm_file *file_priv) 178 166 { 167 + struct vc4_dev *vc4 = to_vc4_dev(dev); 179 168 struct vc4_file *vc4file = file_priv->driver_priv; 180 169 struct drm_vc4_perfmon_get_values *req = data; 181 170 struct vc4_perfmon *perfmon; 182 171 int ret; 172 + 173 + if (!vc4->v3d) { 174 + DRM_DEBUG("Getting perfmon no VC4 V3D probed\n"); 175 + return -ENODEV; 176 + } 183 177 184 178 mutex_lock(&vc4file->perfmon.lock); 185 179 perfmon = idr_find(&vc4file->perfmon.idr, req->id);
+11 -12
drivers/gpu/drm/vc4/vc4_render_cl.c
··· 148 148 } 149 149 150 150 if (setup->zs_read) { 151 + if (setup->color_read) { 152 + /* Exec previous load. */ 153 + vc4_tile_coordinates(setup, x, y); 154 + vc4_store_before_load(setup); 155 + } 156 + 151 157 if (args->zs_read.flags & 152 158 VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { 153 159 rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER); ··· 162 156 &args->zs_read, x, y) | 163 157 VC4_LOADSTORE_FULL_RES_DISABLE_COLOR); 164 158 } else { 165 - if (setup->color_read) { 166 - /* Exec previous load. */ 167 - vc4_tile_coordinates(setup, x, y); 168 - vc4_store_before_load(setup); 169 - } 170 - 171 159 rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL); 172 160 rcl_u16(setup, args->zs_read.bits); 173 161 rcl_u32(setup, setup->zs_read->paddr + ··· 291 291 } 292 292 } 293 293 if (setup->zs_read) { 294 + if (setup->color_read) { 295 + loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE; 296 + loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE; 297 + } 298 + 294 299 if (args->zs_read.flags & 295 300 VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { 296 301 loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE; 297 302 } else { 298 - if (setup->color_read && 299 - !(args->color_read.flags & 300 - VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES)) { 301 - loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE; 302 - loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE; 303 - } 304 303 loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE; 305 304 } 306 305 }
+12 -32
drivers/gpu/drm/vc4/vc4_txp.c
··· 148 148 struct drm_writeback_connector connector; 149 149 150 150 void __iomem *regs; 151 + struct debugfs_regset32 regset; 151 152 }; 152 153 153 154 static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder) ··· 161 160 return container_of(conn, struct vc4_txp, connector.base); 162 161 } 163 162 164 - #define TXP_REG(reg) { reg, #reg } 165 - static const struct { 166 - u32 reg; 167 - const char *name; 168 - } txp_regs[] = { 169 - TXP_REG(TXP_DST_PTR), 170 - TXP_REG(TXP_DST_PITCH), 171 - TXP_REG(TXP_DIM), 172 - TXP_REG(TXP_DST_CTRL), 173 - TXP_REG(TXP_PROGRESS), 163 + static const struct debugfs_reg32 txp_regs[] = { 164 + VC4_REG32(TXP_DST_PTR), 165 + VC4_REG32(TXP_DST_PITCH), 166 + VC4_REG32(TXP_DIM), 167 + VC4_REG32(TXP_DST_CTRL), 168 + VC4_REG32(TXP_PROGRESS), 174 169 }; 175 - 176 - #ifdef CONFIG_DEBUG_FS 177 - int vc4_txp_debugfs_regs(struct seq_file *m, void *unused) 178 - { 179 - struct drm_info_node *node = (struct drm_info_node *)m->private; 180 - struct drm_device *dev = node->minor->dev; 181 - struct vc4_dev *vc4 = to_vc4_dev(dev); 182 - struct vc4_txp *txp = vc4->txp; 183 - int i; 184 - 185 - if (!txp) 186 - return 0; 187 - 188 - for (i = 0; i < ARRAY_SIZE(txp_regs); i++) { 189 - seq_printf(m, "%s (0x%04x): 0x%08x\n", 190 - txp_regs[i].name, txp_regs[i].reg, 191 - TXP_READ(txp_regs[i].reg)); 192 - } 193 - 194 - return 0; 195 - } 196 - #endif 197 170 198 171 static int vc4_txp_connector_get_modes(struct drm_connector *connector) 199 172 { ··· 385 410 txp->regs = vc4_ioremap_regs(pdev, 0); 386 411 if (IS_ERR(txp->regs)) 387 412 return PTR_ERR(txp->regs); 413 + txp->regset.base = txp->regs; 414 + txp->regset.regs = txp_regs; 415 + txp->regset.nregs = ARRAY_SIZE(txp_regs); 388 416 389 417 drm_connector_helper_add(&txp->connector.base, 390 418 &vc4_txp_connector_helper_funcs); ··· 405 427 406 428 dev_set_drvdata(dev, txp); 407 429 vc4->txp = txp; 430 + 431 + vc4_debugfs_add_regset32(drm, "txp_regs", &txp->regset); 408 432 409 433 return 0; 410 434 }
+129 -107
drivers/gpu/drm/vc4/vc4_v3d.c
··· 22 22 #include "vc4_drv.h" 23 23 #include "vc4_regs.h" 24 24 25 - #ifdef CONFIG_DEBUG_FS 26 - #define REGDEF(reg) { reg, #reg } 27 - static const struct { 28 - uint32_t reg; 29 - const char *name; 30 - } vc4_reg_defs[] = { 31 - REGDEF(V3D_IDENT0), 32 - REGDEF(V3D_IDENT1), 33 - REGDEF(V3D_IDENT2), 34 - REGDEF(V3D_SCRATCH), 35 - REGDEF(V3D_L2CACTL), 36 - REGDEF(V3D_SLCACTL), 37 - REGDEF(V3D_INTCTL), 38 - REGDEF(V3D_INTENA), 39 - REGDEF(V3D_INTDIS), 40 - REGDEF(V3D_CT0CS), 41 - REGDEF(V3D_CT1CS), 42 - REGDEF(V3D_CT0EA), 43 - REGDEF(V3D_CT1EA), 44 - REGDEF(V3D_CT0CA), 45 - REGDEF(V3D_CT1CA), 46 - REGDEF(V3D_CT00RA0), 47 - REGDEF(V3D_CT01RA0), 48 - REGDEF(V3D_CT0LC), 49 - REGDEF(V3D_CT1LC), 50 - REGDEF(V3D_CT0PC), 51 - REGDEF(V3D_CT1PC), 52 - REGDEF(V3D_PCS), 53 - REGDEF(V3D_BFC), 54 - REGDEF(V3D_RFC), 55 - REGDEF(V3D_BPCA), 56 - REGDEF(V3D_BPCS), 57 - REGDEF(V3D_BPOA), 58 - REGDEF(V3D_BPOS), 59 - REGDEF(V3D_BXCF), 60 - REGDEF(V3D_SQRSV0), 61 - REGDEF(V3D_SQRSV1), 62 - REGDEF(V3D_SQCNTL), 63 - REGDEF(V3D_SRQPC), 64 - REGDEF(V3D_SRQUA), 65 - REGDEF(V3D_SRQUL), 66 - REGDEF(V3D_SRQCS), 67 - REGDEF(V3D_VPACNTL), 68 - REGDEF(V3D_VPMBASE), 69 - REGDEF(V3D_PCTRC), 70 - REGDEF(V3D_PCTRE), 71 - REGDEF(V3D_PCTR(0)), 72 - REGDEF(V3D_PCTRS(0)), 73 - REGDEF(V3D_PCTR(1)), 74 - REGDEF(V3D_PCTRS(1)), 75 - REGDEF(V3D_PCTR(2)), 76 - REGDEF(V3D_PCTRS(2)), 77 - REGDEF(V3D_PCTR(3)), 78 - REGDEF(V3D_PCTRS(3)), 79 - REGDEF(V3D_PCTR(4)), 80 - REGDEF(V3D_PCTRS(4)), 81 - REGDEF(V3D_PCTR(5)), 82 - REGDEF(V3D_PCTRS(5)), 83 - REGDEF(V3D_PCTR(6)), 84 - REGDEF(V3D_PCTRS(6)), 85 - REGDEF(V3D_PCTR(7)), 86 - REGDEF(V3D_PCTRS(7)), 87 - REGDEF(V3D_PCTR(8)), 88 - REGDEF(V3D_PCTRS(8)), 89 - REGDEF(V3D_PCTR(9)), 90 - REGDEF(V3D_PCTRS(9)), 91 - REGDEF(V3D_PCTR(10)), 92 - REGDEF(V3D_PCTRS(10)), 93 - REGDEF(V3D_PCTR(11)), 94 - REGDEF(V3D_PCTRS(11)), 95 - REGDEF(V3D_PCTR(12)), 96 - REGDEF(V3D_PCTRS(12)), 97 - REGDEF(V3D_PCTR(13)), 98 - REGDEF(V3D_PCTRS(13)), 99 - REGDEF(V3D_PCTR(14)), 100 - REGDEF(V3D_PCTRS(14)), 101 - REGDEF(V3D_PCTR(15)), 102 - REGDEF(V3D_PCTRS(15)), 103 - REGDEF(V3D_DBGE), 104 - REGDEF(V3D_FDBGO), 105 - REGDEF(V3D_FDBGB), 106 - REGDEF(V3D_FDBGR), 107 - REGDEF(V3D_FDBGS), 108 - REGDEF(V3D_ERRSTAT), 25 + static const struct debugfs_reg32 v3d_regs[] = { 26 + VC4_REG32(V3D_IDENT0), 27 + VC4_REG32(V3D_IDENT1), 28 + VC4_REG32(V3D_IDENT2), 29 + VC4_REG32(V3D_SCRATCH), 30 + VC4_REG32(V3D_L2CACTL), 31 + VC4_REG32(V3D_SLCACTL), 32 + VC4_REG32(V3D_INTCTL), 33 + VC4_REG32(V3D_INTENA), 34 + VC4_REG32(V3D_INTDIS), 35 + VC4_REG32(V3D_CT0CS), 36 + VC4_REG32(V3D_CT1CS), 37 + VC4_REG32(V3D_CT0EA), 38 + VC4_REG32(V3D_CT1EA), 39 + VC4_REG32(V3D_CT0CA), 40 + VC4_REG32(V3D_CT1CA), 41 + VC4_REG32(V3D_CT00RA0), 42 + VC4_REG32(V3D_CT01RA0), 43 + VC4_REG32(V3D_CT0LC), 44 + VC4_REG32(V3D_CT1LC), 45 + VC4_REG32(V3D_CT0PC), 46 + VC4_REG32(V3D_CT1PC), 47 + VC4_REG32(V3D_PCS), 48 + VC4_REG32(V3D_BFC), 49 + VC4_REG32(V3D_RFC), 50 + VC4_REG32(V3D_BPCA), 51 + VC4_REG32(V3D_BPCS), 52 + VC4_REG32(V3D_BPOA), 53 + VC4_REG32(V3D_BPOS), 54 + VC4_REG32(V3D_BXCF), 55 + VC4_REG32(V3D_SQRSV0), 56 + VC4_REG32(V3D_SQRSV1), 57 + VC4_REG32(V3D_SQCNTL), 58 + VC4_REG32(V3D_SRQPC), 59 + VC4_REG32(V3D_SRQUA), 60 + VC4_REG32(V3D_SRQUL), 61 + VC4_REG32(V3D_SRQCS), 62 + VC4_REG32(V3D_VPACNTL), 63 + VC4_REG32(V3D_VPMBASE), 64 + VC4_REG32(V3D_PCTRC), 65 + VC4_REG32(V3D_PCTRE), 66 + VC4_REG32(V3D_PCTR(0)), 67 + VC4_REG32(V3D_PCTRS(0)), 68 + VC4_REG32(V3D_PCTR(1)), 69 + VC4_REG32(V3D_PCTRS(1)), 70 + VC4_REG32(V3D_PCTR(2)), 71 + VC4_REG32(V3D_PCTRS(2)), 72 + VC4_REG32(V3D_PCTR(3)), 73 + VC4_REG32(V3D_PCTRS(3)), 74 + VC4_REG32(V3D_PCTR(4)), 75 + VC4_REG32(V3D_PCTRS(4)), 76 + VC4_REG32(V3D_PCTR(5)), 77 + VC4_REG32(V3D_PCTRS(5)), 78 + VC4_REG32(V3D_PCTR(6)), 79 + VC4_REG32(V3D_PCTRS(6)), 80 + VC4_REG32(V3D_PCTR(7)), 81 + VC4_REG32(V3D_PCTRS(7)), 82 + VC4_REG32(V3D_PCTR(8)), 83 + VC4_REG32(V3D_PCTRS(8)), 84 + VC4_REG32(V3D_PCTR(9)), 85 + VC4_REG32(V3D_PCTRS(9)), 86 + VC4_REG32(V3D_PCTR(10)), 87 + VC4_REG32(V3D_PCTRS(10)), 88 + VC4_REG32(V3D_PCTR(11)), 89 + VC4_REG32(V3D_PCTRS(11)), 90 + VC4_REG32(V3D_PCTR(12)), 91 + VC4_REG32(V3D_PCTRS(12)), 92 + VC4_REG32(V3D_PCTR(13)), 93 + VC4_REG32(V3D_PCTRS(13)), 94 + VC4_REG32(V3D_PCTR(14)), 95 + VC4_REG32(V3D_PCTRS(14)), 96 + VC4_REG32(V3D_PCTR(15)), 97 + VC4_REG32(V3D_PCTRS(15)), 98 + VC4_REG32(V3D_DBGE), 99 + VC4_REG32(V3D_FDBGO), 100 + VC4_REG32(V3D_FDBGB), 101 + VC4_REG32(V3D_FDBGR), 102 + VC4_REG32(V3D_FDBGS), 103 + VC4_REG32(V3D_ERRSTAT), 109 104 }; 110 105 111 - int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused) 106 + static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) 112 107 { 113 108 struct drm_info_node *node = (struct drm_info_node *)m->private; 114 109 struct drm_device *dev = node->minor->dev; 115 110 struct vc4_dev *vc4 = to_vc4_dev(dev); 116 - int i; 111 + int ret = vc4_v3d_pm_get(vc4); 117 112 118 - for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) { 119 - seq_printf(m, "%s (0x%04x): 0x%08x\n", 120 - vc4_reg_defs[i].name, vc4_reg_defs[i].reg, 121 - V3D_READ(vc4_reg_defs[i].reg)); 113 + if (ret == 0) { 114 + uint32_t ident1 = V3D_READ(V3D_IDENT1); 115 + uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC); 116 + uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS); 117 + uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS); 118 + 119 + seq_printf(m, "Revision: %d\n", 120 + VC4_GET_FIELD(ident1, V3D_IDENT1_REV)); 121 + seq_printf(m, "Slices: %d\n", nslc); 122 + seq_printf(m, "TMUs: %d\n", nslc * tups); 123 + seq_printf(m, "QPUs: %d\n", nslc * qups); 124 + seq_printf(m, "Semaphores: %d\n", 125 + VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM)); 126 + vc4_v3d_pm_put(vc4); 122 127 } 123 128 124 129 return 0; 125 130 } 126 131 127 - int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) 132 + /** 133 + * Wraps pm_runtime_get_sync() in a refcount, so that we can reliably 134 + * get the pm_runtime refcount to 0 in vc4_reset(). 135 + */ 136 + int 137 + vc4_v3d_pm_get(struct vc4_dev *vc4) 128 138 { 129 - struct drm_info_node *node = (struct drm_info_node *)m->private; 130 - struct drm_device *dev = node->minor->dev; 131 - struct vc4_dev *vc4 = to_vc4_dev(dev); 132 - uint32_t ident1 = V3D_READ(V3D_IDENT1); 133 - uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC); 134 - uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS); 135 - uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS); 139 + mutex_lock(&vc4->power_lock); 140 + if (vc4->power_refcount++ == 0) { 141 + int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); 136 142 137 - seq_printf(m, "Revision: %d\n", 138 - VC4_GET_FIELD(ident1, V3D_IDENT1_REV)); 139 - seq_printf(m, "Slices: %d\n", nslc); 140 - seq_printf(m, "TMUs: %d\n", nslc * tups); 141 - seq_printf(m, "QPUs: %d\n", nslc * qups); 142 - seq_printf(m, "Semaphores: %d\n", 143 - VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM)); 143 + if (ret < 0) { 144 + vc4->power_refcount--; 145 + mutex_unlock(&vc4->power_lock); 146 + return ret; 147 + } 148 + } 149 + mutex_unlock(&vc4->power_lock); 144 150 145 151 return 0; 146 152 } 147 - #endif /* CONFIG_DEBUG_FS */ 153 + 154 + void 155 + vc4_v3d_pm_put(struct vc4_dev *vc4) 156 + { 157 + mutex_lock(&vc4->power_lock); 158 + if (--vc4->power_refcount == 0) { 159 + pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); 160 + pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); 161 + } 162 + mutex_unlock(&vc4->power_lock); 163 + } 148 164 149 165 static void vc4_v3d_init_hw(struct drm_device *dev) 150 166 { ··· 370 354 v3d->regs = vc4_ioremap_regs(pdev, 0); 371 355 if (IS_ERR(v3d->regs)) 372 356 return PTR_ERR(v3d->regs); 357 + v3d->regset.base = v3d->regs; 358 + v3d->regset.regs = v3d_regs; 359 + v3d->regset.nregs = ARRAY_SIZE(v3d_regs); 373 360 374 361 vc4->v3d = v3d; 375 362 v3d->vc4 = vc4; ··· 428 409 pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */ 429 410 pm_runtime_enable(dev); 430 411 412 + vc4_debugfs_add_file(drm, "v3d_ident", vc4_v3d_debugfs_ident, NULL); 413 + vc4_debugfs_add_regset32(drm, "v3d_regs", &v3d->regset); 414 + 431 415 return 0; 432 416 } 433 417 ··· 474 452 return 0; 475 453 } 476 454 477 - static const struct of_device_id vc4_v3d_dt_match[] = { 455 + const struct of_device_id vc4_v3d_dt_match[] = { 478 456 { .compatible = "brcm,bcm2835-v3d" }, 479 457 { .compatible = "brcm,cygnus-v3d" }, 480 458 { .compatible = "brcm,vc4-v3d" },
+32 -51
drivers/gpu/drm/vc4/vc4_vec.c
··· 176 176 struct clk *clock; 177 177 178 178 const struct vc4_vec_tv_mode *tv_mode; 179 + 180 + struct debugfs_regset32 regset; 179 181 }; 180 182 181 183 #define VEC_READ(offset) readl(vec->regs + (offset)) ··· 225 223 void (*mode_set)(struct vc4_vec *vec); 226 224 }; 227 225 228 - #define VEC_REG(reg) { reg, #reg } 229 - static const struct { 230 - u32 reg; 231 - const char *name; 232 - } vec_regs[] = { 233 - VEC_REG(VEC_WSE_CONTROL), 234 - VEC_REG(VEC_WSE_WSS_DATA), 235 - VEC_REG(VEC_WSE_VPS_DATA1), 236 - VEC_REG(VEC_WSE_VPS_CONTROL), 237 - VEC_REG(VEC_REVID), 238 - VEC_REG(VEC_CONFIG0), 239 - VEC_REG(VEC_SCHPH), 240 - VEC_REG(VEC_CLMP0_START), 241 - VEC_REG(VEC_CLMP0_END), 242 - VEC_REG(VEC_FREQ3_2), 243 - VEC_REG(VEC_FREQ1_0), 244 - VEC_REG(VEC_CONFIG1), 245 - VEC_REG(VEC_CONFIG2), 246 - VEC_REG(VEC_INTERRUPT_CONTROL), 247 - VEC_REG(VEC_INTERRUPT_STATUS), 248 - VEC_REG(VEC_FCW_SECAM_B), 249 - VEC_REG(VEC_SECAM_GAIN_VAL), 250 - VEC_REG(VEC_CONFIG3), 251 - VEC_REG(VEC_STATUS0), 252 - VEC_REG(VEC_MASK0), 253 - VEC_REG(VEC_CFG), 254 - VEC_REG(VEC_DAC_TEST), 255 - VEC_REG(VEC_DAC_CONFIG), 256 - VEC_REG(VEC_DAC_MISC), 226 + static const struct debugfs_reg32 vec_regs[] = { 227 + VC4_REG32(VEC_WSE_CONTROL), 228 + VC4_REG32(VEC_WSE_WSS_DATA), 229 + VC4_REG32(VEC_WSE_VPS_DATA1), 230 + VC4_REG32(VEC_WSE_VPS_CONTROL), 231 + VC4_REG32(VEC_REVID), 232 + VC4_REG32(VEC_CONFIG0), 233 + VC4_REG32(VEC_SCHPH), 234 + VC4_REG32(VEC_CLMP0_START), 235 + VC4_REG32(VEC_CLMP0_END), 236 + VC4_REG32(VEC_FREQ3_2), 237 + VC4_REG32(VEC_FREQ1_0), 238 + VC4_REG32(VEC_CONFIG1), 239 + VC4_REG32(VEC_CONFIG2), 240 + VC4_REG32(VEC_INTERRUPT_CONTROL), 241 + VC4_REG32(VEC_INTERRUPT_STATUS), 242 + VC4_REG32(VEC_FCW_SECAM_B), 243 + VC4_REG32(VEC_SECAM_GAIN_VAL), 244 + VC4_REG32(VEC_CONFIG3), 245 + VC4_REG32(VEC_STATUS0), 246 + VC4_REG32(VEC_MASK0), 247 + VC4_REG32(VEC_CFG), 248 + VC4_REG32(VEC_DAC_TEST), 249 + VC4_REG32(VEC_DAC_CONFIG), 250 + VC4_REG32(VEC_DAC_MISC), 257 251 }; 258 - 259 - #ifdef CONFIG_DEBUG_FS 260 - int vc4_vec_debugfs_regs(struct seq_file *m, void *unused) 261 - { 262 - struct drm_info_node *node = (struct drm_info_node *)m->private; 263 - struct drm_device *dev = node->minor->dev; 264 - struct vc4_dev *vc4 = to_vc4_dev(dev); 265 - struct vc4_vec *vec = vc4->vec; 266 - int i; 267 - 268 - if (!vec) 269 - return 0; 270 - 271 - for (i = 0; i < ARRAY_SIZE(vec_regs); i++) { 272 - seq_printf(m, "%s (0x%04x): 0x%08x\n", 273 - vec_regs[i].name, vec_regs[i].reg, 274 - VEC_READ(vec_regs[i].reg)); 275 - } 276 - 277 - return 0; 278 - } 279 - #endif 280 252 281 253 static void vc4_vec_ntsc_mode_set(struct vc4_vec *vec) 282 254 { ··· 563 587 vec->regs = vc4_ioremap_regs(pdev, 0); 564 588 if (IS_ERR(vec->regs)) 565 589 return PTR_ERR(vec->regs); 590 + vec->regset.base = vec->regs; 591 + vec->regset.regs = vec_regs; 592 + vec->regset.nregs = ARRAY_SIZE(vec_regs); 566 593 567 594 vec->clock = devm_clk_get(dev, NULL); 568 595 if (IS_ERR(vec->clock)) { ··· 590 611 dev_set_drvdata(dev, vec); 591 612 592 613 vc4->vec = vec; 614 + 615 + vc4_debugfs_add_regset32(drm, "vec_regs", &vec->regset); 593 616 594 617 return 0; 595 618
+1
drivers/gpu/drm/virtio/virtgpu_display.c
··· 385 385 386 386 for (i = 0 ; i < vgdev->num_scanouts; ++i) 387 387 kfree(vgdev->outputs[i].edid); 388 + drm_atomic_helper_shutdown(vgdev->ddev); 388 389 drm_mode_config_cleanup(vgdev->ddev); 389 390 }
+2
include/drm/drm_print.h
··· 30 30 #include <linux/printk.h> 31 31 #include <linux/seq_file.h> 32 32 #include <linux/device.h> 33 + #include <linux/debugfs.h> 33 34 34 35 /** 35 36 * DOC: print ··· 85 84 __printf(2, 3) 86 85 void drm_printf(struct drm_printer *p, const char *f, ...); 87 86 void drm_puts(struct drm_printer *p, const char *str); 87 + void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset); 88 88 89 89 __printf(2, 0) 90 90 /**
+5
include/drm/drm_syncobj.h
··· 27 27 #define __DRM_SYNCOBJ_H__ 28 28 29 29 #include <linux/dma-fence.h> 30 + #include <linux/dma-fence-chain.h> 30 31 31 32 struct drm_file; 32 33 ··· 113 112 114 113 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, 115 114 u32 handle); 115 + void drm_syncobj_add_point(struct drm_syncobj *syncobj, 116 + struct dma_fence_chain *chain, 117 + struct dma_fence *fence, 118 + uint64_t point); 116 119 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, 117 120 struct dma_fence *fence); 118 121 int drm_syncobj_find_fence(struct drm_file *file_private,
+81
include/linux/dma-fence-chain.h
··· 1 + /* 2 + * fence-chain: chain fences together in a timeline 3 + * 4 + * Copyright (C) 2018 Advanced Micro Devices, Inc. 5 + * Authors: 6 + * Christian König <christian.koenig@amd.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License version 2 as published by 10 + * the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope that it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + */ 17 + 18 + #ifndef __LINUX_DMA_FENCE_CHAIN_H 19 + #define __LINUX_DMA_FENCE_CHAIN_H 20 + 21 + #include <linux/dma-fence.h> 22 + #include <linux/irq_work.h> 23 + 24 + /** 25 + * struct dma_fence_chain - fence to represent an node of a fence chain 26 + * @base: fence base class 27 + * @lock: spinlock for fence handling 28 + * @prev: previous fence of the chain 29 + * @prev_seqno: original previous seqno before garbage collection 30 + * @fence: encapsulated fence 31 + * @cb: callback structure for signaling 32 + * @work: irq work item for signaling 33 + */ 34 + struct dma_fence_chain { 35 + struct dma_fence base; 36 + spinlock_t lock; 37 + struct dma_fence __rcu *prev; 38 + u64 prev_seqno; 39 + struct dma_fence *fence; 40 + struct dma_fence_cb cb; 41 + struct irq_work work; 42 + }; 43 + 44 + extern const struct dma_fence_ops dma_fence_chain_ops; 45 + 46 + /** 47 + * to_dma_fence_chain - cast a fence to a dma_fence_chain 48 + * @fence: fence to cast to a dma_fence_array 49 + * 50 + * Returns NULL if the fence is not a dma_fence_chain, 51 + * or the dma_fence_chain otherwise. 52 + */ 53 + static inline struct dma_fence_chain * 54 + to_dma_fence_chain(struct dma_fence *fence) 55 + { 56 + if (!fence || fence->ops != &dma_fence_chain_ops) 57 + return NULL; 58 + 59 + return container_of(fence, struct dma_fence_chain, base); 60 + } 61 + 62 + /** 63 + * dma_fence_chain_for_each - iterate over all fences in chain 64 + * @iter: current fence 65 + * @head: starting point 66 + * 67 + * Iterate over all fences in the chain. We keep a reference to the current 68 + * fence while inside the loop which must be dropped when breaking out. 69 + */ 70 + #define dma_fence_chain_for_each(iter, head) \ 71 + for (iter = dma_fence_get(head); iter; \ 72 + iter = dma_fence_chain_walk(iter)) 73 + 74 + struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence); 75 + int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno); 76 + void dma_fence_chain_init(struct dma_fence_chain *chain, 77 + struct dma_fence *prev, 78 + struct dma_fence *fence, 79 + uint64_t seqno); 80 + 81 + #endif /* __LINUX_DMA_FENCE_CHAIN_H */
+36
include/uapi/drm/drm.h
··· 735 735 __u32 pad; 736 736 }; 737 737 738 + struct drm_syncobj_transfer { 739 + __u32 src_handle; 740 + __u32 dst_handle; 741 + __u64 src_point; 742 + __u64 dst_point; 743 + __u32 flags; 744 + __u32 pad; 745 + }; 746 + 738 747 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) 739 748 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) 749 + #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */ 740 750 struct drm_syncobj_wait { 741 751 __u64 handles; 742 752 /* absolute timeout */ ··· 757 747 __u32 pad; 758 748 }; 759 749 750 + struct drm_syncobj_timeline_wait { 751 + __u64 handles; 752 + /* wait on specific timeline point for every handles*/ 753 + __u64 points; 754 + /* absolute timeout */ 755 + __s64 timeout_nsec; 756 + __u32 count_handles; 757 + __u32 flags; 758 + __u32 first_signaled; /* only valid when not waiting all */ 759 + __u32 pad; 760 + }; 761 + 762 + 760 763 struct drm_syncobj_array { 761 764 __u64 handles; 762 765 __u32 count_handles; 763 766 __u32 pad; 764 767 }; 768 + 769 + struct drm_syncobj_timeline_array { 770 + __u64 handles; 771 + __u64 points; 772 + __u32 count_handles; 773 + __u32 pad; 774 + }; 775 + 765 776 766 777 /* Query current scanout sequence number */ 767 778 struct drm_crtc_get_sequence { ··· 939 908 #define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees) 940 909 #define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease) 941 910 #define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease) 911 + 912 + #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait) 913 + #define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array) 914 + #define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer) 915 + #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array) 942 916 943 917 /** 944 918 * Device specific ioctls should only be in their respective headers
+2 -1
include/uapi/drm/drm_mode.h
··· 621 621 622 622 struct drm_color_lut { 623 623 /* 624 - * Data is U0.16 fixed point format. 624 + * Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and 625 + * 0xffff == 1.0. 625 626 */ 626 627 __u16 red; 627 628 __u16 green;
+169
include/uapi/drm/lima_drm.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ 2 + /* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */ 3 + 4 + #ifndef __LIMA_DRM_H__ 5 + #define __LIMA_DRM_H__ 6 + 7 + #include "drm.h" 8 + 9 + #if defined(__cplusplus) 10 + extern "C" { 11 + #endif 12 + 13 + enum drm_lima_param_gpu_id { 14 + DRM_LIMA_PARAM_GPU_ID_UNKNOWN, 15 + DRM_LIMA_PARAM_GPU_ID_MALI400, 16 + DRM_LIMA_PARAM_GPU_ID_MALI450, 17 + }; 18 + 19 + enum drm_lima_param { 20 + DRM_LIMA_PARAM_GPU_ID, 21 + DRM_LIMA_PARAM_NUM_PP, 22 + DRM_LIMA_PARAM_GP_VERSION, 23 + DRM_LIMA_PARAM_PP_VERSION, 24 + }; 25 + 26 + /** 27 + * get various information of the GPU 28 + */ 29 + struct drm_lima_get_param { 30 + __u32 param; /* in, value in enum drm_lima_param */ 31 + __u32 pad; /* pad, must be zero */ 32 + __u64 value; /* out, parameter value */ 33 + }; 34 + 35 + /** 36 + * create a buffer for used by GPU 37 + */ 38 + struct drm_lima_gem_create { 39 + __u32 size; /* in, buffer size */ 40 + __u32 flags; /* in, currently no flags, must be zero */ 41 + __u32 handle; /* out, GEM buffer handle */ 42 + __u32 pad; /* pad, must be zero */ 43 + }; 44 + 45 + /** 46 + * get information of a buffer 47 + */ 48 + struct drm_lima_gem_info { 49 + __u32 handle; /* in, GEM buffer handle */ 50 + __u32 va; /* out, virtual address mapped into GPU MMU */ 51 + __u64 offset; /* out, used to mmap this buffer to CPU */ 52 + }; 53 + 54 + #define LIMA_SUBMIT_BO_READ 0x01 55 + #define LIMA_SUBMIT_BO_WRITE 0x02 56 + 57 + /* buffer information used by one task */ 58 + struct drm_lima_gem_submit_bo { 59 + __u32 handle; /* in, GEM buffer handle */ 60 + __u32 flags; /* in, buffer read/write by GPU */ 61 + }; 62 + 63 + #define LIMA_GP_FRAME_REG_NUM 6 64 + 65 + /* frame used to setup GP for each task */ 66 + struct drm_lima_gp_frame { 67 + __u32 frame[LIMA_GP_FRAME_REG_NUM]; 68 + }; 69 + 70 + #define LIMA_PP_FRAME_REG_NUM 23 71 + #define LIMA_PP_WB_REG_NUM 12 72 + 73 + /* frame used to setup mali400 GPU PP for each task */ 74 + struct drm_lima_m400_pp_frame { 75 + __u32 frame[LIMA_PP_FRAME_REG_NUM]; 76 + __u32 num_pp; 77 + __u32 wb[3 * LIMA_PP_WB_REG_NUM]; 78 + __u32 plbu_array_address[4]; 79 + __u32 fragment_stack_address[4]; 80 + }; 81 + 82 + /* frame used to setup mali450 GPU PP for each task */ 83 + struct drm_lima_m450_pp_frame { 84 + __u32 frame[LIMA_PP_FRAME_REG_NUM]; 85 + __u32 num_pp; 86 + __u32 wb[3 * LIMA_PP_WB_REG_NUM]; 87 + __u32 use_dlbu; 88 + __u32 _pad; 89 + union { 90 + __u32 plbu_array_address[8]; 91 + __u32 dlbu_regs[4]; 92 + }; 93 + __u32 fragment_stack_address[8]; 94 + }; 95 + 96 + #define LIMA_PIPE_GP 0x00 97 + #define LIMA_PIPE_PP 0x01 98 + 99 + #define LIMA_SUBMIT_FLAG_EXPLICIT_FENCE (1 << 0) 100 + 101 + /** 102 + * submit a task to GPU 103 + * 104 + * User can always merge multi sync_file and drm_syncobj 105 + * into one drm_syncobj as in_sync[0], but we reserve 106 + * in_sync[1] for another task's out_sync to avoid the 107 + * export/import/merge pass when explicit sync. 108 + */ 109 + struct drm_lima_gem_submit { 110 + __u32 ctx; /* in, context handle task is submitted to */ 111 + __u32 pipe; /* in, which pipe to use, GP/PP */ 112 + __u32 nr_bos; /* in, array length of bos field */ 113 + __u32 frame_size; /* in, size of frame field */ 114 + __u64 bos; /* in, array of drm_lima_gem_submit_bo */ 115 + __u64 frame; /* in, GP/PP frame */ 116 + __u32 flags; /* in, submit flags */ 117 + __u32 out_sync; /* in, drm_syncobj handle used to wait task finish after submission */ 118 + __u32 in_sync[2]; /* in, drm_syncobj handle used to wait before start this task */ 119 + }; 120 + 121 + #define LIMA_GEM_WAIT_READ 0x01 122 + #define LIMA_GEM_WAIT_WRITE 0x02 123 + 124 + /** 125 + * wait pending GPU task finish of a buffer 126 + */ 127 + struct drm_lima_gem_wait { 128 + __u32 handle; /* in, GEM buffer handle */ 129 + __u32 op; /* in, CPU want to read/write this buffer */ 130 + __s64 timeout_ns; /* in, wait timeout in absulute time */ 131 + }; 132 + 133 + /** 134 + * create a context 135 + */ 136 + struct drm_lima_ctx_create { 137 + __u32 id; /* out, context handle */ 138 + __u32 _pad; /* pad, must be zero */ 139 + }; 140 + 141 + /** 142 + * free a context 143 + */ 144 + struct drm_lima_ctx_free { 145 + __u32 id; /* in, context handle */ 146 + __u32 _pad; /* pad, must be zero */ 147 + }; 148 + 149 + #define DRM_LIMA_GET_PARAM 0x00 150 + #define DRM_LIMA_GEM_CREATE 0x01 151 + #define DRM_LIMA_GEM_INFO 0x02 152 + #define DRM_LIMA_GEM_SUBMIT 0x03 153 + #define DRM_LIMA_GEM_WAIT 0x04 154 + #define DRM_LIMA_CTX_CREATE 0x05 155 + #define DRM_LIMA_CTX_FREE 0x06 156 + 157 + #define DRM_IOCTL_LIMA_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GET_PARAM, struct drm_lima_get_param) 158 + #define DRM_IOCTL_LIMA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_CREATE, struct drm_lima_gem_create) 159 + #define DRM_IOCTL_LIMA_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_INFO, struct drm_lima_gem_info) 160 + #define DRM_IOCTL_LIMA_GEM_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_GEM_SUBMIT, struct drm_lima_gem_submit) 161 + #define DRM_IOCTL_LIMA_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_GEM_WAIT, struct drm_lima_gem_wait) 162 + #define DRM_IOCTL_LIMA_CTX_CREATE DRM_IOR(DRM_COMMAND_BASE + DRM_LIMA_CTX_CREATE, struct drm_lima_ctx_create) 163 + #define DRM_IOCTL_LIMA_CTX_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_CTX_FREE, struct drm_lima_ctx_free) 164 + 165 + #if defined(__cplusplus) 166 + } 167 + #endif 168 + 169 + #endif /* __LIMA_DRM_H__ */