Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2021-07-22' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for v5.15-rc1:

UAPI Changes:
- Remove sysfs stats for dma-buf attachments, as it causes a performance regression.
Previous merge is not in a rc kernel yet, so no userspace regression possible.

Cross-subsystem Changes:
- Sanitize user input in kyro's viewport ioctl.
- Use refcount_t in fb_info->count
- Assorted fixes to dma-buf.
- Extend x86 efifb handling to all archs.
- Fix neofb divide by 0.
- Document corpro,gm7123 bridge dt bindings.

Core Changes:
- Slightly rework drm master handling.
- Cleanup vgaarb handling.
- Assorted fixes.

Driver Changes:
- Add support for ws2401 panel.
- Assorted fixes to stm, ast, bochs.
- Demidlayer ingenic irq.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/2d0d2fe8-01fc-e216-c3fd-38db9e69944e@linux.intel.com

+1570 -640
-28
Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers
··· 22 22 Contact: Hridya Valsaraju <hridya@google.com> 23 23 Description: This file is read-only and specifies the size of the DMA-BUF in 24 24 bytes. 25 - 26 - What: /sys/kernel/dmabuf/buffers/<inode_number>/attachments 27 - Date: May 2021 28 - KernelVersion: v5.13 29 - Contact: Hridya Valsaraju <hridya@google.com> 30 - Description: This directory will contain subdirectories representing every 31 - attachment of the DMA-BUF. 32 - 33 - What: /sys/kernel/dmabuf/buffers/<inode_number>/attachments/<attachment_uid> 34 - Date: May 2021 35 - KernelVersion: v5.13 36 - Contact: Hridya Valsaraju <hridya@google.com> 37 - Description: This directory will contain information on the attached device 38 - and the number of current distinct device mappings. 39 - 40 - What: /sys/kernel/dmabuf/buffers/<inode_number>/attachments/<attachment_uid>/device 41 - Date: May 2021 42 - KernelVersion: v5.13 43 - Contact: Hridya Valsaraju <hridya@google.com> 44 - Description: This file is read-only and is a symlink to the attached device's 45 - sysfs entry. 46 - 47 - What: /sys/kernel/dmabuf/buffers/<inode_number>/attachments/<attachment_uid>/map_counter 48 - Date: May 2021 49 - KernelVersion: v5.13 50 - Contact: Hridya Valsaraju <hridya@google.com> 51 - Description: This file is read-only and contains a map_counter indicating the 52 - number of distinct device mappings of the attachment.
+3
Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
··· 22 22 - ti,ths8134a 23 23 - ti,ths8134b 24 24 - const: ti,ths8134 25 + - items: 26 + - const: corpro,gm7123 27 + - const: adi,adv7123 25 28 - enum: 26 29 - adi,adv7123 27 30 - dumb-vga-dac
+99
Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/panel/samsung,lms380kf01.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Samsung LMS380KF01 display panel 8 + 9 + description: The LMS380KF01 is a 480x800 DPI display panel from Samsung Mobile 10 + Displays (SMD) utilizing the WideChips WS2401 display controller. It can be 11 + used with internal or external backlight control. 12 + The panel must obey the rules for a SPI slave device as specified in 13 + spi/spi-controller.yaml 14 + 15 + maintainers: 16 + - Linus Walleij <linus.walleij@linaro.org> 17 + 18 + allOf: 19 + - $ref: panel-common.yaml# 20 + 21 + properties: 22 + compatible: 23 + const: samsung,lms380kf01 24 + 25 + reg: true 26 + 27 + interrupts: 28 + description: provides an optional ESD (electrostatic discharge) 29 + interrupt that signals abnormalities in the display hardware. 30 + This can also be raised for other reasons like erroneous 31 + configuration. 32 + maxItems: 1 33 + 34 + reset-gpios: true 35 + 36 + vci-supply: 37 + description: regulator that supplies the VCI analog voltage 38 + usually around 3.0 V 39 + 40 + vccio-supply: 41 + description: regulator that supplies the VCCIO voltage usually 42 + around 1.8 V 43 + 44 + backlight: true 45 + 46 + spi-cpha: true 47 + 48 + spi-cpol: true 49 + 50 + spi-max-frequency: 51 + maximum: 1200000 52 + 53 + port: true 54 + 55 + required: 56 + - compatible 57 + - reg 58 + - spi-cpha 59 + - spi-cpol 60 + - port 61 + 62 + additionalProperties: false 63 + 64 + examples: 65 + - | 66 + #include <dt-bindings/gpio/gpio.h> 67 + #include <dt-bindings/interrupt-controller/irq.h> 68 + 69 + spi { 70 + compatible = "spi-gpio"; 71 + sck-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>; 72 + miso-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>; 73 + mosi-gpios = <&gpio 2 GPIO_ACTIVE_HIGH>; 74 + cs-gpios = <&gpio 3 GPIO_ACTIVE_HIGH>; 75 + num-chipselects = <1>; 76 + #address-cells = <1>; 77 + #size-cells = <0>; 78 + 79 + panel@0 { 80 + compatible = "samsung,lms380kf01"; 81 + spi-max-frequency = <1200000>; 82 + spi-cpha; 83 + spi-cpol; 84 + reg = <0>; 85 + vci-supply = <&lcd_3v0_reg>; 86 + vccio-supply = <&lcd_1v8_reg>; 87 + reset-gpios = <&gpio 4 GPIO_ACTIVE_LOW>; 88 + interrupt-parent = <&gpio>; 89 + interrupts = <5 IRQ_TYPE_EDGE_RISING>; 90 + 91 + port { 92 + panel_in: endpoint { 93 + remote-endpoint = <&display_out>; 94 + }; 95 + }; 96 + }; 97 + }; 98 + 99 + ...
+7
MAINTAINERS
··· 6067 6067 F: drivers/gpu/drm/vmwgfx/ 6068 6068 F: include/uapi/drm/vmwgfx_drm.h 6069 6069 6070 + DRM DRIVER FOR WIDECHIPS WS2401 PANELS 6071 + M: Linus Walleij <linus.walleij@linaro.org> 6072 + S: Maintained 6073 + T: git git://anongit.freedesktop.org/drm/drm-misc 6074 + F: Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml 6075 + F: drivers/gpu/drm/panel/panel-widechips-ws2401.c 6076 + 6070 6077 DRM DRIVERS 6071 6078 M: David Airlie <airlied@linux.ie> 6072 6079 M: Daniel Vetter <daniel@ffwll.ch>
+1 -4
arch/arm/include/asm/efi.h
··· 17 17 18 18 #ifdef CONFIG_EFI 19 19 void efi_init(void); 20 + extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); 20 21 21 22 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); 22 23 int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); ··· 52 51 53 52 struct screen_info *alloc_screen_info(void); 54 53 void free_screen_info(struct screen_info *si); 55 - 56 - static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) 57 - { 58 - } 59 54 60 55 /* 61 56 * A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
+1 -4
arch/arm64/include/asm/efi.h
··· 14 14 15 15 #ifdef CONFIG_EFI 16 16 extern void efi_init(void); 17 + extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); 17 18 #else 18 19 #define efi_init() 19 20 #endif ··· 83 82 #define alloc_screen_info(x...) &screen_info 84 83 85 84 static inline void free_screen_info(struct screen_info *si) 86 - { 87 - } 88 - 89 - static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) 90 85 { 91 86 } 92 87
+1 -4
arch/riscv/include/asm/efi.h
··· 13 13 14 14 #ifdef CONFIG_EFI 15 15 extern void efi_init(void); 16 + extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); 16 17 #else 17 18 #define efi_init() 18 19 #endif ··· 37 36 #define alloc_screen_info(x...) (&screen_info) 38 37 39 38 static inline void free_screen_info(struct screen_info *si) 40 - { 41 - } 42 - 43 - static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) 44 39 { 45 40 } 46 41
-26
arch/x86/Kconfig
··· 2767 2767 def_bool y 2768 2768 depends on CPU_SUP_AMD && PCI 2769 2769 2770 - config X86_SYSFB 2771 - bool "Mark VGA/VBE/EFI FB as generic system framebuffer" 2772 - help 2773 - Firmwares often provide initial graphics framebuffers so the BIOS, 2774 - bootloader or kernel can show basic video-output during boot for 2775 - user-guidance and debugging. Historically, x86 used the VESA BIOS 2776 - Extensions and EFI-framebuffers for this, which are mostly limited 2777 - to x86. 2778 - This option, if enabled, marks VGA/VBE/EFI framebuffers as generic 2779 - framebuffers so the new generic system-framebuffer drivers can be 2780 - used on x86. If the framebuffer is not compatible with the generic 2781 - modes, it is advertised as fallback platform framebuffer so legacy 2782 - drivers like efifb, vesafb and uvesafb can pick it up. 2783 - If this option is not selected, all system framebuffers are always 2784 - marked as fallback platform framebuffers as usual. 2785 - 2786 - Note: Legacy fbdev drivers, including vesafb, efifb, uvesafb, will 2787 - not be able to pick up generic system framebuffers if this option 2788 - is selected. You are highly encouraged to enable simplefb as 2789 - replacement if you select this option. simplefb can correctly deal 2790 - with generic system framebuffers. But you should still keep vesafb 2791 - and others enabled as fallback if a system framebuffer is 2792 - incompatible with simplefb. 2793 - 2794 - If unsure, say Y. 2795 - 2796 2770 endmenu 2797 2771 2798 2772
+16 -16
arch/x86/include/asm/sysfb.h include/linux/sysfb.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 - #ifndef _ARCH_X86_KERNEL_SYSFB_H 3 - #define _ARCH_X86_KERNEL_SYSFB_H 2 + #ifndef _LINUX_SYSFB_H 3 + #define _LINUX_SYSFB_H 4 4 5 5 /* 6 6 * Generic System Framebuffers on x86 ··· 58 58 #ifdef CONFIG_EFI 59 59 60 60 extern struct efifb_dmi_info efifb_dmi_list[]; 61 - void sysfb_apply_efi_quirks(void); 61 + void sysfb_apply_efi_quirks(struct platform_device *pd); 62 62 63 63 #else /* CONFIG_EFI */ 64 64 65 - static inline void sysfb_apply_efi_quirks(void) 65 + static inline void sysfb_apply_efi_quirks(struct platform_device *pd) 66 66 { 67 67 } 68 68 69 69 #endif /* CONFIG_EFI */ 70 70 71 - #ifdef CONFIG_X86_SYSFB 71 + #ifdef CONFIG_SYSFB_SIMPLEFB 72 72 73 - bool parse_mode(const struct screen_info *si, 74 - struct simplefb_platform_data *mode); 75 - int create_simplefb(const struct screen_info *si, 76 - const struct simplefb_platform_data *mode); 73 + bool sysfb_parse_mode(const struct screen_info *si, 74 + struct simplefb_platform_data *mode); 75 + int sysfb_create_simplefb(const struct screen_info *si, 76 + const struct simplefb_platform_data *mode); 77 77 78 - #else /* CONFIG_X86_SYSFB */ 78 + #else /* CONFIG_SYSFB_SIMPLE */ 79 79 80 - static inline bool parse_mode(const struct screen_info *si, 81 - struct simplefb_platform_data *mode) 80 + static inline bool sysfb_parse_mode(const struct screen_info *si, 81 + struct simplefb_platform_data *mode) 82 82 { 83 83 return false; 84 84 } 85 85 86 - static inline int create_simplefb(const struct screen_info *si, 87 - const struct simplefb_platform_data *mode) 86 + static inline int sysfb_create_simplefb(const struct screen_info *si, 87 + const struct simplefb_platform_data *mode) 88 88 { 89 89 return -EINVAL; 90 90 } 91 91 92 - #endif /* CONFIG_X86_SYSFB */ 92 + #endif /* CONFIG_SYSFB_SIMPLE */ 93 93 94 - #endif /* _ARCH_X86_KERNEL_SYSFB_H */ 94 + #endif /* _LINUX_SYSFB_H */
-3
arch/x86/kernel/Makefile
··· 136 136 obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 137 137 obj-$(CONFIG_OF) += devicetree.o 138 138 obj-$(CONFIG_UPROBES) += uprobes.o 139 - obj-y += sysfb.o 140 - obj-$(CONFIG_X86_SYSFB) += sysfb_simplefb.o 141 - obj-$(CONFIG_EFI) += sysfb_efi.o 142 139 143 140 obj-$(CONFIG_PERF_EVENTS) += perf_regs.o 144 141 obj-$(CONFIG_TRACING) += tracepoint.o
+25 -12
arch/x86/kernel/sysfb.c drivers/firmware/sysfb.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 /* 3 - * Generic System Framebuffers on x86 3 + * Generic System Framebuffers 4 4 * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com> 5 5 */ 6 6 7 7 /* 8 - * Simple-Framebuffer support for x86 systems 8 + * Simple-Framebuffer support 9 9 * Create a platform-device for any available boot framebuffer. The 10 10 * simple-framebuffer platform device is already available on DT systems, so 11 11 * this module parses the global "screen_info" object and creates a suitable ··· 16 16 * to pick these devices up without messing with simple-framebuffer drivers. 17 17 * The global "screen_info" is still valid at all times. 18 18 * 19 - * If CONFIG_X86_SYSFB is not selected, we never register "simple-framebuffer" 19 + * If CONFIG_SYSFB_SIMPLEFB is not selected, never register "simple-framebuffer" 20 20 * platform devices, but only use legacy framebuffer devices for 21 21 * backwards compatibility. 22 22 * 23 23 * TODO: We set the dev_id field of all platform-devices to 0. This allows 24 - * other x86 OF/DT parsers to create such devices, too. However, they must 24 + * other OF/DT parsers to create such devices, too. However, they must 25 25 * start at offset 1 for this to work. 26 26 */ 27 27 ··· 32 32 #include <linux/platform_data/simplefb.h> 33 33 #include <linux/platform_device.h> 34 34 #include <linux/screen_info.h> 35 - #include <asm/sysfb.h> 35 + #include <linux/sysfb.h> 36 36 37 37 static __init int sysfb_init(void) 38 38 { ··· 43 43 bool compatible; 44 44 int ret; 45 45 46 - sysfb_apply_efi_quirks(); 47 - 48 46 /* try to create a simple-framebuffer device */ 49 - compatible = parse_mode(si, &mode); 47 + compatible = sysfb_parse_mode(si, &mode); 50 48 if (compatible) { 51 - ret = create_simplefb(si, &mode); 49 + ret = sysfb_create_simplefb(si, &mode); 52 50 if (!ret) 53 51 return 0; 54 52 } ··· 59 61 else 60 62 name = "platform-framebuffer"; 61 63 62 - pd = platform_device_register_resndata(NULL, name, 0, 63 - NULL, 0, si, sizeof(*si)); 64 - return PTR_ERR_OR_ZERO(pd); 64 + pd = platform_device_alloc(name, 0); 65 + if (!pd) 66 + return -ENOMEM; 67 + 68 + sysfb_apply_efi_quirks(pd); 69 + 70 + ret = platform_device_add_data(pd, si, sizeof(*si)); 71 + if (ret) 72 + goto err; 73 + 74 + ret = platform_device_add(pd); 75 + if (ret) 76 + goto err; 77 + 78 + return 0; 79 + err: 80 + platform_device_put(pd); 81 + return ret; 65 82 } 66 83 67 84 /* must execute after PCI subsystem for EFI quirks */
+75 -3
arch/x86/kernel/sysfb_efi.c drivers/firmware/efi/sysfb_efi.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 /* 3 - * Generic System Framebuffers on x86 3 + * Generic System Framebuffers 4 4 * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com> 5 5 * 6 6 * EFI Quirks Copyright (c) 2006 Edgar Hucek <gimli@dark-green.com> ··· 19 19 #include <linux/init.h> 20 20 #include <linux/kernel.h> 21 21 #include <linux/mm.h> 22 + #include <linux/of_address.h> 22 23 #include <linux/pci.h> 24 + #include <linux/platform_device.h> 23 25 #include <linux/screen_info.h> 26 + #include <linux/sysfb.h> 24 27 #include <video/vga.h> 25 28 26 29 #include <asm/efi.h> 27 - #include <asm/sysfb.h> 28 30 29 31 enum { 30 32 OVERRIDE_NONE = 0x0, ··· 269 267 {}, 270 268 }; 271 269 272 - __init void sysfb_apply_efi_quirks(void) 270 + static bool efifb_overlaps_pci_range(const struct of_pci_range *range) 271 + { 272 + u64 fb_base = screen_info.lfb_base; 273 + 274 + if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) 275 + fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32; 276 + 277 + return fb_base >= range->cpu_addr && 278 + fb_base < (range->cpu_addr + range->size); 279 + } 280 + 281 + static struct device_node *find_pci_overlap_node(void) 282 + { 283 + struct device_node *np; 284 + 285 + for_each_node_by_type(np, "pci") { 286 + struct of_pci_range_parser parser; 287 + struct of_pci_range range; 288 + int err; 289 + 290 + err = of_pci_range_parser_init(&parser, np); 291 + if (err) { 292 + pr_warn("of_pci_range_parser_init() failed: %d\n", err); 293 + continue; 294 + } 295 + 296 + for_each_of_pci_range(&parser, &range) 297 + if (efifb_overlaps_pci_range(&range)) 298 + return np; 299 + } 300 + return NULL; 301 + } 302 + 303 + /* 304 + * If the efifb framebuffer is backed by a PCI graphics controller, we have 305 + * to ensure that this relation is expressed using a device link when 306 + * running in DT mode, or the probe order may be reversed, resulting in a 307 + * resource reservation conflict on the memory window that the efifb 308 + * framebuffer steals from the PCIe host bridge. 309 + */ 310 + static int efifb_add_links(struct fwnode_handle *fwnode) 311 + { 312 + struct device_node *sup_np; 313 + 314 + sup_np = find_pci_overlap_node(); 315 + 316 + /* 317 + * If there's no PCI graphics controller backing the efifb, we are 318 + * done here. 319 + */ 320 + if (!sup_np) 321 + return 0; 322 + 323 + fwnode_link_add(fwnode, of_fwnode_handle(sup_np)); 324 + of_node_put(sup_np); 325 + 326 + return 0; 327 + } 328 + 329 + static const struct fwnode_operations efifb_fwnode_ops = { 330 + .add_links = efifb_add_links, 331 + }; 332 + 333 + static struct fwnode_handle efifb_fwnode; 334 + 335 + __init void sysfb_apply_efi_quirks(struct platform_device *pd) 273 336 { 274 337 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || 275 338 !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS)) ··· 347 280 screen_info.lfb_width = screen_info.lfb_height; 348 281 screen_info.lfb_height = temp; 349 282 screen_info.lfb_linelength = 4 * screen_info.lfb_width; 283 + } 284 + 285 + if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) { 286 + fwnode_init(&efifb_fwnode, &efifb_fwnode_ops); 287 + pd->dev.fwnode = &efifb_fwnode; 350 288 } 351 289 }
+23 -10
arch/x86/kernel/sysfb_simplefb.c drivers/firmware/sysfb_simplefb.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 /* 3 - * Generic System Framebuffers on x86 3 + * Generic System Framebuffers 4 4 * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com> 5 5 */ 6 6 ··· 18 18 #include <linux/platform_data/simplefb.h> 19 19 #include <linux/platform_device.h> 20 20 #include <linux/screen_info.h> 21 - #include <asm/sysfb.h> 21 + #include <linux/sysfb.h> 22 22 23 23 static const char simplefb_resname[] = "BOOTFB"; 24 24 static const struct simplefb_format formats[] = SIMPLEFB_FORMATS; 25 25 26 - /* try parsing x86 screen_info into a simple-framebuffer mode struct */ 27 - __init bool parse_mode(const struct screen_info *si, 28 - struct simplefb_platform_data *mode) 26 + /* try parsing screen_info into a simple-framebuffer mode struct */ 27 + __init bool sysfb_parse_mode(const struct screen_info *si, 28 + struct simplefb_platform_data *mode) 29 29 { 30 30 const struct simplefb_format *f; 31 31 __u8 type; ··· 57 57 return false; 58 58 } 59 59 60 - __init int create_simplefb(const struct screen_info *si, 61 - const struct simplefb_platform_data *mode) 60 + __init int sysfb_create_simplefb(const struct screen_info *si, 61 + const struct simplefb_platform_data *mode) 62 62 { 63 63 struct platform_device *pd; 64 64 struct resource res; 65 65 u64 base, size; 66 66 u32 length; 67 + int ret; 67 68 68 69 /* 69 70 * If the 64BIT_BASE capability is set, ext_lfb_base will contain the ··· 106 105 if (res.end <= res.start) 107 106 return -EINVAL; 108 107 109 - pd = platform_device_register_resndata(NULL, "simple-framebuffer", 0, 110 - &res, 1, mode, sizeof(*mode)); 111 - return PTR_ERR_OR_ZERO(pd); 108 + pd = platform_device_alloc("simple-framebuffer", 0); 109 + if (!pd) 110 + return -ENOMEM; 111 + 112 + sysfb_apply_efi_quirks(pd); 113 + 114 + ret = platform_device_add_resources(pd, &res, 1); 115 + if (ret) 116 + return ret; 117 + 118 + ret = platform_device_add_data(pd, mode, sizeof(*mode)); 119 + if (ret) 120 + return ret; 121 + 122 + return platform_device_add(pd); 112 123 }
+4 -136
drivers/dma-buf/dma-buf-sysfs-stats.c
··· 40 40 * 41 41 * * ``/sys/kernel/dmabuf/buffers/<inode_number>/exporter_name`` 42 42 * * ``/sys/kernel/dmabuf/buffers/<inode_number>/size`` 43 - * * ``/sys/kernel/dmabuf/buffers/<inode_number>/attachments/<attach_uid>/device`` 44 - * * ``/sys/kernel/dmabuf/buffers/<inode_number>/attachments/<attach_uid>/map_counter`` 45 43 * 46 - * The information in the interface can also be used to derive per-exporter and 47 - * per-device usage statistics. The data from the interface can be gathered 48 - * on error conditions or other important events to provide a snapshot of 49 - * DMA-BUF usage. It can also be collected periodically by telemetry to monitor 50 - * various metrics. 44 + * The information in the interface can also be used to derive per-exporter 45 + * statistics. The data from the interface can be gathered on error conditions 46 + * or other important events to provide a snapshot of DMA-BUF usage. 47 + * It can also be collected periodically by telemetry to monitor various metrics. 51 48 * 52 49 * Detailed documentation about the interface is present in 53 50 * Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers. ··· 118 121 .default_groups = dma_buf_stats_default_groups, 119 122 }; 120 123 121 - #define to_dma_buf_attach_entry_from_kobj(x) container_of(x, struct dma_buf_attach_sysfs_entry, kobj) 122 - 123 - struct dma_buf_attach_stats_attribute { 124 - struct attribute attr; 125 - ssize_t (*show)(struct dma_buf_attach_sysfs_entry *sysfs_entry, 126 - struct dma_buf_attach_stats_attribute *attr, char *buf); 127 - }; 128 - #define to_dma_buf_attach_stats_attr(x) container_of(x, struct dma_buf_attach_stats_attribute, attr) 129 - 130 - static ssize_t dma_buf_attach_stats_attribute_show(struct kobject *kobj, 131 - struct attribute *attr, 132 - char *buf) 133 - { 134 - struct dma_buf_attach_stats_attribute *attribute; 135 - struct dma_buf_attach_sysfs_entry *sysfs_entry; 136 - 137 - attribute = to_dma_buf_attach_stats_attr(attr); 138 - sysfs_entry = to_dma_buf_attach_entry_from_kobj(kobj); 139 - 140 - if (!attribute->show) 141 - return -EIO; 142 - 143 - return attribute->show(sysfs_entry, attribute, buf); 144 - } 145 - 146 - static const struct sysfs_ops dma_buf_attach_stats_sysfs_ops = { 147 - .show = dma_buf_attach_stats_attribute_show, 148 - }; 149 - 150 - static ssize_t map_counter_show(struct dma_buf_attach_sysfs_entry *sysfs_entry, 151 - struct dma_buf_attach_stats_attribute *attr, 152 - char *buf) 153 - { 154 - return sysfs_emit(buf, "%u\n", sysfs_entry->map_counter); 155 - } 156 - 157 - static struct dma_buf_attach_stats_attribute map_counter_attribute = 158 - __ATTR_RO(map_counter); 159 - 160 - static struct attribute *dma_buf_attach_stats_default_attrs[] = { 161 - &map_counter_attribute.attr, 162 - NULL, 163 - }; 164 - ATTRIBUTE_GROUPS(dma_buf_attach_stats_default); 165 - 166 - static void dma_buf_attach_sysfs_release(struct kobject *kobj) 167 - { 168 - struct dma_buf_attach_sysfs_entry *sysfs_entry; 169 - 170 - sysfs_entry = to_dma_buf_attach_entry_from_kobj(kobj); 171 - kfree(sysfs_entry); 172 - } 173 - 174 - static struct kobj_type dma_buf_attach_ktype = { 175 - .sysfs_ops = &dma_buf_attach_stats_sysfs_ops, 176 - .release = dma_buf_attach_sysfs_release, 177 - .default_groups = dma_buf_attach_stats_default_groups, 178 - }; 179 - 180 - void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach) 181 - { 182 - struct dma_buf_attach_sysfs_entry *sysfs_entry; 183 - 184 - sysfs_entry = attach->sysfs_entry; 185 - if (!sysfs_entry) 186 - return; 187 - 188 - sysfs_delete_link(&sysfs_entry->kobj, &attach->dev->kobj, "device"); 189 - 190 - kobject_del(&sysfs_entry->kobj); 191 - kobject_put(&sysfs_entry->kobj); 192 - } 193 - 194 - int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, 195 - unsigned int uid) 196 - { 197 - struct dma_buf_attach_sysfs_entry *sysfs_entry; 198 - int ret; 199 - struct dma_buf *dmabuf; 200 - 201 - if (!attach) 202 - return -EINVAL; 203 - 204 - dmabuf = attach->dmabuf; 205 - 206 - sysfs_entry = kzalloc(sizeof(struct dma_buf_attach_sysfs_entry), 207 - GFP_KERNEL); 208 - if (!sysfs_entry) 209 - return -ENOMEM; 210 - 211 - sysfs_entry->kobj.kset = dmabuf->sysfs_entry->attach_stats_kset; 212 - 213 - attach->sysfs_entry = sysfs_entry; 214 - 215 - ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_attach_ktype, 216 - NULL, "%u", uid); 217 - if (ret) 218 - goto kobj_err; 219 - 220 - ret = sysfs_create_link(&sysfs_entry->kobj, &attach->dev->kobj, 221 - "device"); 222 - if (ret) 223 - goto link_err; 224 - 225 - return 0; 226 - 227 - link_err: 228 - kobject_del(&sysfs_entry->kobj); 229 - kobj_err: 230 - kobject_put(&sysfs_entry->kobj); 231 - attach->sysfs_entry = NULL; 232 - 233 - return ret; 234 - } 235 124 void dma_buf_stats_teardown(struct dma_buf *dmabuf) 236 125 { 237 126 struct dma_buf_sysfs_entry *sysfs_entry; ··· 126 243 if (!sysfs_entry) 127 244 return; 128 245 129 - kset_unregister(sysfs_entry->attach_stats_kset); 130 246 kobject_del(&sysfs_entry->kobj); 131 247 kobject_put(&sysfs_entry->kobj); 132 248 } ··· 172 290 { 173 291 struct dma_buf_sysfs_entry *sysfs_entry; 174 292 int ret; 175 - struct kset *attach_stats_kset; 176 293 177 294 if (!dmabuf || !dmabuf->file) 178 295 return -EINVAL; ··· 196 315 if (ret) 197 316 goto err_sysfs_dmabuf; 198 317 199 - /* create the directory for attachment stats */ 200 - attach_stats_kset = kset_create_and_add("attachments", 201 - &dmabuf_sysfs_no_uevent_ops, 202 - &sysfs_entry->kobj); 203 - if (!attach_stats_kset) { 204 - ret = -ENOMEM; 205 - goto err_sysfs_attach; 206 - } 207 - 208 - sysfs_entry->attach_stats_kset = attach_stats_kset; 209 - 210 318 return 0; 211 319 212 - err_sysfs_attach: 213 - kobject_del(&sysfs_entry->kobj); 214 320 err_sysfs_dmabuf: 215 321 kobject_put(&sysfs_entry->kobj); 216 322 dmabuf->sysfs_entry = NULL;
-27
drivers/dma-buf/dma-buf-sysfs-stats.h
··· 14 14 void dma_buf_uninit_sysfs_statistics(void); 15 15 16 16 int dma_buf_stats_setup(struct dma_buf *dmabuf); 17 - int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, 18 - unsigned int uid); 19 - static inline void dma_buf_update_attachment_map_count(struct dma_buf_attachment *attach, 20 - int delta) 21 - { 22 - struct dma_buf_attach_sysfs_entry *entry = attach->sysfs_entry; 23 17 24 - entry->map_counter += delta; 25 - } 26 18 void dma_buf_stats_teardown(struct dma_buf *dmabuf); 27 - void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach); 28 - static inline unsigned int dma_buf_update_attach_uid(struct dma_buf *dmabuf) 29 - { 30 - struct dma_buf_sysfs_entry *entry = dmabuf->sysfs_entry; 31 - 32 - return entry->attachment_uid++; 33 - } 34 19 #else 35 20 36 21 static inline int dma_buf_init_sysfs_statistics(void) ··· 29 44 { 30 45 return 0; 31 46 } 32 - static inline int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, 33 - unsigned int uid) 34 - { 35 - return 0; 36 - } 37 47 38 48 static inline void dma_buf_stats_teardown(struct dma_buf *dmabuf) {} 39 - static inline void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach) {} 40 - static inline void dma_buf_update_attachment_map_count(struct dma_buf_attachment *attach, 41 - int delta) {} 42 - static inline unsigned int dma_buf_update_attach_uid(struct dma_buf *dmabuf) 43 - { 44 - return 0; 45 - } 46 49 #endif 47 50 #endif // _DMA_BUF_SYSFS_STATS_H
+1 -17
drivers/dma-buf/dma-buf.c
··· 76 76 */ 77 77 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); 78 78 79 + dma_buf_stats_teardown(dmabuf); 79 80 dmabuf->ops->release(dmabuf); 80 81 81 82 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) 82 83 dma_resv_fini(dmabuf->resv); 83 84 84 - dma_buf_stats_teardown(dmabuf); 85 85 module_put(dmabuf->owner); 86 86 kfree(dmabuf->name); 87 87 kfree(dmabuf); ··· 738 738 { 739 739 struct dma_buf_attachment *attach; 740 740 int ret; 741 - unsigned int attach_uid; 742 741 743 742 if (WARN_ON(!dmabuf || !dev)) 744 743 return ERR_PTR(-EINVAL); ··· 763 764 } 764 765 dma_resv_lock(dmabuf->resv, NULL); 765 766 list_add(&attach->node, &dmabuf->attachments); 766 - attach_uid = dma_buf_update_attach_uid(dmabuf); 767 767 dma_resv_unlock(dmabuf->resv); 768 - 769 - ret = dma_buf_attach_stats_setup(attach, attach_uid); 770 - if (ret) 771 - goto err_sysfs; 772 768 773 769 /* When either the importer or the exporter can't handle dynamic 774 770 * mappings we cache the mapping here to avoid issues with the ··· 791 797 dma_resv_unlock(attach->dmabuf->resv); 792 798 attach->sgt = sgt; 793 799 attach->dir = DMA_BIDIRECTIONAL; 794 - dma_buf_update_attachment_map_count(attach, 1 /* delta */); 795 800 } 796 801 797 802 return attach; ··· 807 814 if (dma_buf_is_dynamic(attach->dmabuf)) 808 815 dma_resv_unlock(attach->dmabuf->resv); 809 816 810 - err_sysfs: 811 817 dma_buf_detach(dmabuf, attach); 812 818 return ERR_PTR(ret); 813 819 } ··· 856 864 dma_resv_lock(attach->dmabuf->resv, NULL); 857 865 858 866 __unmap_dma_buf(attach, attach->sgt, attach->dir); 859 - dma_buf_update_attachment_map_count(attach, -1 /* delta */); 860 867 861 868 if (dma_buf_is_dynamic(attach->dmabuf)) { 862 869 dmabuf->ops->unpin(attach); ··· 869 878 if (dmabuf->ops->detach) 870 879 dmabuf->ops->detach(dmabuf, attach); 871 880 872 - dma_buf_attach_stats_teardown(attach); 873 881 kfree(attach); 874 882 } 875 883 EXPORT_SYMBOL_GPL(dma_buf_detach); ··· 1010 1020 } 1011 1021 } 1012 1022 #endif /* CONFIG_DMA_API_DEBUG */ 1013 - 1014 - if (!IS_ERR(sg_table)) 1015 - dma_buf_update_attachment_map_count(attach, 1 /* delta */); 1016 - 1017 1023 return sg_table; 1018 1024 } 1019 1025 EXPORT_SYMBOL_GPL(dma_buf_map_attachment); ··· 1047 1061 if (dma_buf_is_dynamic(attach->dmabuf) && 1048 1062 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) 1049 1063 dma_buf_unpin(attach); 1050 - 1051 - dma_buf_update_attachment_map_count(attach, -1 /* delta */); 1052 1064 } 1053 1065 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); 1054 1066
+32
drivers/firmware/Kconfig
··· 251 251 252 252 Say Y here to enable "download mode" by default. 253 253 254 + config SYSFB 255 + bool 256 + default y 257 + depends on X86 || ARM || ARM64 || RISCV || COMPILE_TEST 258 + 259 + config SYSFB_SIMPLEFB 260 + bool "Mark VGA/VBE/EFI FB as generic system framebuffer" 261 + depends on SYSFB 262 + help 263 + Firmwares often provide initial graphics framebuffers so the BIOS, 264 + bootloader or kernel can show basic video-output during boot for 265 + user-guidance and debugging. Historically, x86 used the VESA BIOS 266 + Extensions and EFI-framebuffers for this, which are mostly limited 267 + to x86 BIOS or EFI systems. 268 + This option, if enabled, marks VGA/VBE/EFI framebuffers as generic 269 + framebuffers so the new generic system-framebuffer drivers can be 270 + used instead. If the framebuffer is not compatible with the generic 271 + modes, it is advertised as fallback platform framebuffer so legacy 272 + drivers like efifb, vesafb and uvesafb can pick it up. 273 + If this option is not selected, all system framebuffers are always 274 + marked as fallback platform framebuffers as usual. 275 + 276 + Note: Legacy fbdev drivers, including vesafb, efifb, uvesafb, will 277 + not be able to pick up generic system framebuffers if this option 278 + is selected. You are highly encouraged to enable simplefb as 279 + replacement if you select this option. simplefb can correctly deal 280 + with generic system framebuffers. But you should still keep vesafb 281 + and others enabled as fallback if a system framebuffer is 282 + incompatible with simplefb. 283 + 284 + If unsure, say Y. 285 + 254 286 config TI_SCI_PROTOCOL 255 287 tristate "TI System Control Interface (TISCI) Message Protocol" 256 288 depends on TI_MESSAGE_MANAGER
+2
drivers/firmware/Makefile
··· 18 18 obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o 19 19 obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o 20 20 obj-$(CONFIG_QCOM_SCM) += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o 21 + obj-$(CONFIG_SYSFB) += sysfb.o 22 + obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o 21 23 obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o 22 24 obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o 23 25 obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
+2
drivers/firmware/efi/Makefile
··· 36 36 fake_map-y += fake_mem.o 37 37 fake_map-$(CONFIG_X86) += x86_fake_mem.o 38 38 39 + obj-$(CONFIG_SYSFB) += sysfb_efi.o 40 + 39 41 arm-obj-$(CONFIG_EFI) := efi-init.o arm-runtime.o 40 42 obj-$(CONFIG_ARM) += $(arm-obj-y) 41 43 obj-$(CONFIG_ARM64) += $(arm-obj-y)
-90
drivers/firmware/efi/efi-init.c
··· 275 275 } 276 276 #endif 277 277 } 278 - 279 - static bool efifb_overlaps_pci_range(const struct of_pci_range *range) 280 - { 281 - u64 fb_base = screen_info.lfb_base; 282 - 283 - if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) 284 - fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32; 285 - 286 - return fb_base >= range->cpu_addr && 287 - fb_base < (range->cpu_addr + range->size); 288 - } 289 - 290 - static struct device_node *find_pci_overlap_node(void) 291 - { 292 - struct device_node *np; 293 - 294 - for_each_node_by_type(np, "pci") { 295 - struct of_pci_range_parser parser; 296 - struct of_pci_range range; 297 - int err; 298 - 299 - err = of_pci_range_parser_init(&parser, np); 300 - if (err) { 301 - pr_warn("of_pci_range_parser_init() failed: %d\n", err); 302 - continue; 303 - } 304 - 305 - for_each_of_pci_range(&parser, &range) 306 - if (efifb_overlaps_pci_range(&range)) 307 - return np; 308 - } 309 - return NULL; 310 - } 311 - 312 - /* 313 - * If the efifb framebuffer is backed by a PCI graphics controller, we have 314 - * to ensure that this relation is expressed using a device link when 315 - * running in DT mode, or the probe order may be reversed, resulting in a 316 - * resource reservation conflict on the memory window that the efifb 317 - * framebuffer steals from the PCIe host bridge. 318 - */ 319 - static int efifb_add_links(struct fwnode_handle *fwnode) 320 - { 321 - struct device_node *sup_np; 322 - 323 - sup_np = find_pci_overlap_node(); 324 - 325 - /* 326 - * If there's no PCI graphics controller backing the efifb, we are 327 - * done here. 328 - */ 329 - if (!sup_np) 330 - return 0; 331 - 332 - fwnode_link_add(fwnode, of_fwnode_handle(sup_np)); 333 - of_node_put(sup_np); 334 - 335 - return 0; 336 - } 337 - 338 - static const struct fwnode_operations efifb_fwnode_ops = { 339 - .add_links = efifb_add_links, 340 - }; 341 - 342 - static struct fwnode_handle efifb_fwnode; 343 - 344 - static int __init register_gop_device(void) 345 - { 346 - struct platform_device *pd; 347 - int err; 348 - 349 - if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) 350 - return 0; 351 - 352 - pd = platform_device_alloc("efi-framebuffer", 0); 353 - if (!pd) 354 - return -ENOMEM; 355 - 356 - if (IS_ENABLED(CONFIG_PCI)) { 357 - fwnode_init(&efifb_fwnode, &efifb_fwnode_ops); 358 - pd->dev.fwnode = &efifb_fwnode; 359 - } 360 - 361 - err = platform_device_add_data(pd, &screen_info, sizeof(screen_info)); 362 - if (err) 363 - return err; 364 - 365 - return platform_device_add(pd); 366 - } 367 - subsys_initcall(register_gop_device);
+6 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 1266 1266 /** 1267 1267 * amdgpu_device_vga_set_decode - enable/disable vga decode 1268 1268 * 1269 - * @cookie: amdgpu_device pointer 1269 + * @pdev: PCI device pointer 1270 1270 * @state: enable/disable vga decode 1271 1271 * 1272 1272 * Enable/disable vga decode (all asics). 1273 1273 * Returns VGA resource flags. 1274 1274 */ 1275 - static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state) 1275 + static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev, 1276 + bool state) 1276 1277 { 1277 - struct amdgpu_device *adev = cookie; 1278 + struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); 1278 1279 amdgpu_asic_set_vga_state(adev, state); 1279 1280 if (state) 1280 1281 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | ··· 3716 3715 /* this will fail for cards that aren't VGA class devices, just 3717 3716 * ignore it */ 3718 3717 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 3719 - vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); 3718 + vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); 3720 3719 3721 3720 if (amdgpu_device_supports_px(ddev)) { 3722 3721 px = true; ··· 3839 3838 vga_switcheroo_fini_domain_pm_ops(adev->dev); 3840 3839 } 3841 3840 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 3842 - vga_client_register(adev->pdev, NULL, NULL, NULL); 3841 + vga_client_unregister(adev->pdev); 3843 3842 3844 3843 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 3845 3844 amdgpu_pmu_fini(adev);
+6
drivers/gpu/drm/ast/ast_drv.h
··· 337 337 #define AST_DP501_LINKRATE 0xf014 338 338 #define AST_DP501_EDID_DATA 0xf020 339 339 340 + /* Define for Soc scratched reg */ 341 + #define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6) 342 + //#define AST_VRAM_INIT_BY_BMC BIT(7) 343 + //#define AST_VRAM_INIT_READY BIT(6) 344 + 340 345 int ast_mm_init(struct ast_private *ast); 341 346 342 347 /* ast post */ ··· 351 346 void ast_post_gpu(struct drm_device *dev); 352 347 u32 ast_mindwm(struct ast_private *ast, u32 r); 353 348 void ast_moutdwm(struct ast_private *ast, u32 r, u32 v); 349 + void ast_patch_ahb_2500(struct ast_private *ast); 354 350 /* ast dp501 */ 355 351 void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); 356 352 bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
+5
drivers/gpu/drm/ast/ast_main.c
··· 97 97 jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); 98 98 jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); 99 99 if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { 100 + /* Patch AST2500 */ 101 + if (((pdev->revision & 0xF0) == 0x40) 102 + && ((jregd0 & AST_VRAM_INIT_STATUS_MASK) == 0)) 103 + ast_patch_ahb_2500(ast); 104 + 100 105 /* Double check it's actually working */ 101 106 data = ast_read32(ast, 0xf004); 102 107 if ((data != 0xFFFFFFFF) && (data != 0x00)) {
+1 -1
drivers/gpu/drm/ast/ast_mode.c
··· 1298 1298 int r; 1299 1299 1300 1300 r = ast_get_modes(connector); 1301 - if (r < 0) 1301 + if (r <= 0) 1302 1302 return connector_status_disconnected; 1303 1303 1304 1304 return connector_status_connected;
+64 -25
drivers/gpu/drm/ast/ast_post.c
··· 2028 2028 return true; 2029 2029 } 2030 2030 2031 + void ast_patch_ahb_2500(struct ast_private *ast) 2032 + { 2033 + u32 data; 2034 + 2035 + /* Clear bus lock condition */ 2036 + ast_moutdwm(ast, 0x1e600000, 0xAEED1A03); 2037 + ast_moutdwm(ast, 0x1e600084, 0x00010000); 2038 + ast_moutdwm(ast, 0x1e600088, 0x00000000); 2039 + ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); 2040 + data = ast_mindwm(ast, 0x1e6e2070); 2041 + if (data & 0x08000000) { /* check fast reset */ 2042 + /* 2043 + * If "Fast restet" is enabled for ARM-ICE debugger, 2044 + * then WDT needs to enable, that 2045 + * WDT04 is WDT#1 Reload reg. 2046 + * WDT08 is WDT#1 counter restart reg to avoid system deadlock 2047 + * WDT0C is WDT#1 control reg 2048 + * [6:5]:= 01:Full chip 2049 + * [4]:= 1:1MHz clock source 2050 + * [1]:= 1:WDT will be cleeared and disabled after timeout occurs 2051 + * [0]:= 1:WDT enable 2052 + */ 2053 + ast_moutdwm(ast, 0x1E785004, 0x00000010); 2054 + ast_moutdwm(ast, 0x1E785008, 0x00004755); 2055 + ast_moutdwm(ast, 0x1E78500c, 0x00000033); 2056 + udelay(1000); 2057 + } 2058 + do { 2059 + ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); 2060 + data = ast_mindwm(ast, 0x1e6e2000); 2061 + } while (data != 1); 2062 + ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */ 2063 + } 2064 + 2031 2065 void ast_post_chip_2500(struct drm_device *dev) 2032 2066 { 2033 2067 struct ast_private *ast = to_ast_private(dev); ··· 2069 2035 u8 reg; 2070 2036 2071 2037 reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); 2072 - if ((reg & 0x80) == 0) {/* vga only */ 2038 + if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */ 2073 2039 /* Clear bus lock condition */ 2074 - ast_moutdwm(ast, 0x1e600000, 0xAEED1A03); 2075 - ast_moutdwm(ast, 0x1e600084, 0x00010000); 2076 - ast_moutdwm(ast, 0x1e600088, 0x00000000); 2077 - ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); 2078 - ast_write32(ast, 0xf004, 0x1e6e0000); 2079 - ast_write32(ast, 0xf000, 0x1); 2080 - ast_write32(ast, 0x12000, 0x1688a8a8); 2081 - while (ast_read32(ast, 0x12000) != 0x1) 2082 - ; 2040 + ast_patch_ahb_2500(ast); 2083 2041 2084 - ast_write32(ast, 0x10000, 0xfc600309); 2085 - while (ast_read32(ast, 0x10000) != 0x1) 2086 - ; 2042 + /* Disable watchdog */ 2043 + ast_moutdwm(ast, 0x1E78502C, 0x00000000); 2044 + ast_moutdwm(ast, 0x1E78504C, 0x00000000); 2045 + 2046 + /* 2047 + * Reset USB port to patch USB unknown device issue 2048 + * SCU90 is Multi-function Pin Control #5 2049 + * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub 2050 + * port). 2051 + * SCU94 is Multi-function Pin Control #6 2052 + * [14:13]:= 1x:USB2.0 Host2 controller 2053 + * SCU70 is Hardware Strap reg 2054 + * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by 2055 + * [18]: 0(24)/1(48) MHz) 2056 + * SCU7C is Write clear reg to SCU70 2057 + * [23]:= write 1 and then SCU70[23] will be clear as 0b. 2058 + */ 2059 + ast_moutdwm(ast, 0x1E6E2090, 0x20000000); 2060 + ast_moutdwm(ast, 0x1E6E2094, 0x00004000); 2061 + if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) { 2062 + ast_moutdwm(ast, 0x1E6E207C, 0x00800000); 2063 + mdelay(100); 2064 + ast_moutdwm(ast, 0x1E6E2070, 0x00800000); 2065 + } 2066 + /* Modify eSPI reset pin */ 2067 + temp = ast_mindwm(ast, 0x1E6E2070); 2068 + if (temp & 0x02000000) 2069 + ast_moutdwm(ast, 0x1E6E207C, 0x00004000); 2087 2070 2088 2071 /* Slow down CPU/AHB CLK in VGA only mode */ 2089 2072 temp = ast_read32(ast, 0x12008); 2090 2073 temp |= 0x73; 2091 2074 ast_write32(ast, 0x12008, temp); 2092 - 2093 - /* Reset USB port to patch USB unknown device issue */ 2094 - ast_moutdwm(ast, 0x1e6e2090, 0x20000000); 2095 - temp = ast_mindwm(ast, 0x1e6e2094); 2096 - temp |= 0x00004000; 2097 - ast_moutdwm(ast, 0x1e6e2094, temp); 2098 - temp = ast_mindwm(ast, 0x1e6e2070); 2099 - if (temp & 0x00800000) { 2100 - ast_moutdwm(ast, 0x1e6e207c, 0x00800000); 2101 - mdelay(100); 2102 - ast_moutdwm(ast, 0x1e6e2070, 0x00800000); 2103 - } 2104 2075 2105 2076 if (!ast_dram_init_2500(ast)) 2106 2077 drm_err(dev, "DRAM init failed !\n");
+68 -25
drivers/gpu/drm/drm_auth.c
··· 61 61 * trusted clients. 62 62 */ 63 63 64 + static bool drm_is_current_master_locked(struct drm_file *fpriv) 65 + { 66 + lockdep_assert_held_once(&fpriv->minor->dev->master_mutex); 67 + 68 + return fpriv->is_master && drm_lease_owner(fpriv->master) == fpriv->minor->dev->master; 69 + } 70 + 71 + /** 72 + * drm_is_current_master - checks whether @priv is the current master 73 + * @fpriv: DRM file private 74 + * 75 + * Checks whether @fpriv is current master on its device. This decides whether a 76 + * client is allowed to run DRM_MASTER IOCTLs. 77 + * 78 + * Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting 79 + * - the current master is assumed to own the non-shareable display hardware. 80 + */ 81 + bool drm_is_current_master(struct drm_file *fpriv) 82 + { 83 + bool ret; 84 + 85 + mutex_lock(&fpriv->minor->dev->master_mutex); 86 + ret = drm_is_current_master_locked(fpriv); 87 + mutex_unlock(&fpriv->minor->dev->master_mutex); 88 + 89 + return ret; 90 + } 91 + EXPORT_SYMBOL(drm_is_current_master); 92 + 64 93 int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) 65 94 { 66 95 struct drm_auth *auth = data; ··· 164 135 static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) 165 136 { 166 137 struct drm_master *old_master; 138 + struct drm_master *new_master; 167 139 168 140 lockdep_assert_held_once(&dev->master_mutex); 169 141 170 142 WARN_ON(fpriv->is_master); 171 143 old_master = fpriv->master; 172 - fpriv->master = drm_master_create(dev); 173 - if (!fpriv->master) { 174 - fpriv->master = old_master; 144 + new_master = drm_master_create(dev); 145 + if (!new_master) 175 146 return -ENOMEM; 176 - } 147 + spin_lock(&fpriv->master_lookup_lock); 148 + fpriv->master = new_master; 149 + spin_unlock(&fpriv->master_lookup_lock); 177 150 178 151 fpriv->is_master = 1; 179 152 fpriv->authenticated = 1; ··· 254 223 if (ret) 255 224 goto out_unlock; 256 225 257 - if (drm_is_current_master(file_priv)) 226 + if (drm_is_current_master_locked(file_priv)) 258 227 goto out_unlock; 259 228 260 229 if (dev->master) { ··· 303 272 if (ret) 304 273 goto out_unlock; 305 274 306 - if (!drm_is_current_master(file_priv)) { 275 + if (!drm_is_current_master_locked(file_priv)) { 307 276 ret = -EINVAL; 308 277 goto out_unlock; 309 278 } ··· 334 303 * any master object for render clients 335 304 */ 336 305 mutex_lock(&dev->master_mutex); 337 - if (!dev->master) 306 + if (!dev->master) { 338 307 ret = drm_new_set_master(dev, file_priv); 339 - else 308 + } else { 309 + spin_lock(&file_priv->master_lookup_lock); 340 310 file_priv->master = drm_master_get(dev->master); 311 + spin_unlock(&file_priv->master_lookup_lock); 312 + } 341 313 mutex_unlock(&dev->master_mutex); 342 314 343 315 return ret; ··· 356 322 if (file_priv->magic) 357 323 idr_remove(&file_priv->master->magic_map, file_priv->magic); 358 324 359 - if (!drm_is_current_master(file_priv)) 325 + if (!drm_is_current_master_locked(file_priv)) 360 326 goto out; 361 327 362 328 drm_legacy_lock_master_cleanup(dev, master); ··· 378 344 } 379 345 380 346 /** 381 - * drm_is_current_master - checks whether @priv is the current master 382 - * @fpriv: DRM file private 383 - * 384 - * Checks whether @fpriv is current master on its device. This decides whether a 385 - * client is allowed to run DRM_MASTER IOCTLs. 386 - * 387 - * Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting 388 - * - the current master is assumed to own the non-shareable display hardware. 389 - */ 390 - bool drm_is_current_master(struct drm_file *fpriv) 391 - { 392 - return fpriv->is_master && drm_lease_owner(fpriv->master) == fpriv->minor->dev->master; 393 - } 394 - EXPORT_SYMBOL(drm_is_current_master); 395 - 396 - /** 397 347 * drm_master_get - reference a master pointer 398 348 * @master: &struct drm_master 399 349 * ··· 389 371 return master; 390 372 } 391 373 EXPORT_SYMBOL(drm_master_get); 374 + 375 + /** 376 + * drm_file_get_master - reference &drm_file.master of @file_priv 377 + * @file_priv: DRM file private 378 + * 379 + * Increments the reference count of @file_priv's &drm_file.master and returns 380 + * the &drm_file.master. If @file_priv has no &drm_file.master, returns NULL. 381 + * 382 + * Master pointers returned from this function should be unreferenced using 383 + * drm_master_put(). 384 + */ 385 + struct drm_master *drm_file_get_master(struct drm_file *file_priv) 386 + { 387 + struct drm_master *master = NULL; 388 + 389 + spin_lock(&file_priv->master_lookup_lock); 390 + if (!file_priv->master) 391 + goto unlock; 392 + master = drm_master_get(file_priv->master); 393 + 394 + unlock: 395 + spin_unlock(&file_priv->master_lookup_lock); 396 + return master; 397 + } 398 + EXPORT_SYMBOL(drm_file_get_master); 392 399 393 400 static void drm_master_destroy(struct kref *kref) 394 401 {
+4 -1
drivers/gpu/drm/drm_connector.c
··· 2414 2414 struct drm_mode_modeinfo u_mode; 2415 2415 struct drm_mode_modeinfo __user *mode_ptr; 2416 2416 uint32_t __user *encoder_ptr; 2417 + bool is_current_master; 2417 2418 2418 2419 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2419 2420 return -EOPNOTSUPP; ··· 2445 2444 out_resp->connector_type = connector->connector_type; 2446 2445 out_resp->connector_type_id = connector->connector_type_id; 2447 2446 2447 + is_current_master = drm_is_current_master(file_priv); 2448 + 2448 2449 mutex_lock(&dev->mode_config.mutex); 2449 2450 if (out_resp->count_modes == 0) { 2450 - if (drm_is_current_master(file_priv)) 2451 + if (is_current_master) 2451 2452 connector->funcs->fill_modes(connector, 2452 2453 dev->mode_config.max_width, 2453 2454 dev->mode_config.max_height);
+2 -1
drivers/gpu/drm/drm_debugfs.c
··· 91 91 mutex_lock(&dev->filelist_mutex); 92 92 list_for_each_entry_reverse(priv, &dev->filelist, lhead) { 93 93 struct task_struct *task; 94 + bool is_current_master = drm_is_current_master(priv); 94 95 95 96 rcu_read_lock(); /* locks pid_task()->comm */ 96 97 task = pid_task(priv->pid, PIDTYPE_PID); ··· 100 99 task ? task->comm : "<unknown>", 101 100 pid_vnr(priv->pid), 102 101 priv->minor->index, 103 - drm_is_current_master(priv) ? 'y' : 'n', 102 + is_current_master ? 'y' : 'n', 104 103 priv->authenticated ? 'y' : 'n', 105 104 from_kuid_munged(seq_user_ns(m), uid), 106 105 priv->magic);
+1
drivers/gpu/drm/drm_file.c
··· 176 176 init_waitqueue_head(&file->event_wait); 177 177 file->event_space = 4096; /* set aside 4k for event buffer */ 178 178 179 + spin_lock_init(&file->master_lookup_lock); 179 180 mutex_init(&file->event_read_lock); 180 181 181 182 if (drm_core_check_feature(dev, DRIVER_GEM))
+2 -2
drivers/gpu/drm/drm_irq.c
··· 136 136 if (ret < 0) { 137 137 dev->irq_enabled = false; 138 138 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 139 - vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL); 139 + vga_client_unregister(to_pci_dev(dev->dev)); 140 140 free_irq(irq, dev); 141 141 } else { 142 142 dev->irq = irq; ··· 198 198 DRM_DEBUG("irq=%d\n", dev->irq); 199 199 200 200 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 201 - vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL); 201 + vga_client_unregister(to_pci_dev(dev->dev)); 202 202 203 203 if (dev->driver->irq_uninstall) 204 204 dev->driver->irq_uninstall(dev);
+61 -20
drivers/gpu/drm/drm_lease.c
··· 106 106 */ 107 107 bool _drm_lease_held(struct drm_file *file_priv, int id) 108 108 { 109 - if (!file_priv || !file_priv->master) 109 + bool ret; 110 + struct drm_master *master; 111 + 112 + if (!file_priv) 110 113 return true; 111 114 112 - return _drm_lease_held_master(file_priv->master, id); 115 + master = drm_file_get_master(file_priv); 116 + if (!master) 117 + return true; 118 + ret = _drm_lease_held_master(master, id); 119 + drm_master_put(&master); 120 + 121 + return ret; 113 122 } 114 123 115 124 /** ··· 137 128 struct drm_master *master; 138 129 bool ret; 139 130 140 - if (!file_priv || !file_priv->master || !file_priv->master->lessor) 131 + if (!file_priv) 141 132 return true; 142 133 143 - master = file_priv->master; 134 + master = drm_file_get_master(file_priv); 135 + if (!master) 136 + return true; 137 + if (!master->lessor) { 138 + ret = true; 139 + goto out; 140 + } 144 141 mutex_lock(&master->dev->mode_config.idr_mutex); 145 142 ret = _drm_lease_held_master(master, id); 146 143 mutex_unlock(&master->dev->mode_config.idr_mutex); 144 + 145 + out: 146 + drm_master_put(&master); 147 147 return ret; 148 148 } 149 149 ··· 172 154 int count_in, count_out; 173 155 uint32_t crtcs_out = 0; 174 156 175 - if (!file_priv || !file_priv->master || !file_priv->master->lessor) 157 + if (!file_priv) 176 158 return crtcs_in; 177 159 178 - master = file_priv->master; 160 + master = drm_file_get_master(file_priv); 161 + if (!master) 162 + return crtcs_in; 163 + if (!master->lessor) { 164 + crtcs_out = crtcs_in; 165 + goto out; 166 + } 179 167 dev = master->dev; 180 168 181 169 count_in = count_out = 0; ··· 200 176 count_in++; 201 177 } 202 178 mutex_unlock(&master->dev->mode_config.idr_mutex); 179 + 180 + out: 181 + drm_master_put(&master); 203 182 return crtcs_out; 204 183 } 205 184 ··· 516 489 size_t object_count; 517 490 int ret = 0; 518 491 struct idr leases; 519 - struct drm_master *lessor = lessor_priv->master; 492 + struct drm_master *lessor; 520 493 struct drm_master *lessee = NULL; 521 494 struct file *lessee_file = NULL; 522 495 struct file *lessor_file = lessor_priv->filp; ··· 527 500 /* Can't lease without MODESET */ 528 501 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 529 502 return -EOPNOTSUPP; 530 - 531 - /* Do not allow sub-leases */ 532 - if (lessor->lessor) { 533 - DRM_DEBUG_LEASE("recursive leasing not allowed\n"); 534 - return -EINVAL; 535 - } 536 503 537 504 /* need some objects */ 538 505 if (cl->object_count == 0) { ··· 539 518 return -EINVAL; 540 519 } 541 520 521 + lessor = drm_file_get_master(lessor_priv); 522 + /* Do not allow sub-leases */ 523 + if (lessor->lessor) { 524 + DRM_DEBUG_LEASE("recursive leasing not allowed\n"); 525 + ret = -EINVAL; 526 + goto out_lessor; 527 + } 528 + 542 529 object_count = cl->object_count; 543 530 544 531 object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), 545 532 array_size(object_count, sizeof(__u32))); 546 - if (IS_ERR(object_ids)) 547 - return PTR_ERR(object_ids); 533 + if (IS_ERR(object_ids)) { 534 + ret = PTR_ERR(object_ids); 535 + goto out_lessor; 536 + } 548 537 549 538 idr_init(&leases); 550 539 ··· 565 534 if (ret) { 566 535 DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret); 567 536 idr_destroy(&leases); 568 - return ret; 537 + goto out_lessor; 569 538 } 570 539 571 540 /* Allocate a file descriptor for the lease */ 572 541 fd = get_unused_fd_flags(cl->flags & (O_CLOEXEC | O_NONBLOCK)); 573 542 if (fd < 0) { 574 543 idr_destroy(&leases); 575 - return fd; 544 + ret = fd; 545 + goto out_lessor; 576 546 } 577 547 578 548 DRM_DEBUG_LEASE("Creating lease\n"); ··· 609 577 /* Hook up the fd */ 610 578 fd_install(fd, lessee_file); 611 579 580 + drm_master_put(&lessor); 612 581 DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n"); 613 582 return 0; 614 583 ··· 619 586 out_leases: 620 587 put_unused_fd(fd); 621 588 589 + out_lessor: 590 + drm_master_put(&lessor); 622 591 DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret); 623 592 return ret; 624 593 } ··· 643 608 struct drm_mode_list_lessees *arg = data; 644 609 __u32 __user *lessee_ids = (__u32 __user *) (uintptr_t) (arg->lessees_ptr); 645 610 __u32 count_lessees = arg->count_lessees; 646 - struct drm_master *lessor = lessor_priv->master, *lessee; 611 + struct drm_master *lessor, *lessee; 647 612 int count; 648 613 int ret = 0; 649 614 ··· 654 619 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 655 620 return -EOPNOTSUPP; 656 621 622 + lessor = drm_file_get_master(lessor_priv); 657 623 DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id); 658 624 659 625 mutex_lock(&dev->mode_config.idr_mutex); ··· 678 642 arg->count_lessees = count; 679 643 680 644 mutex_unlock(&dev->mode_config.idr_mutex); 645 + drm_master_put(&lessor); 681 646 682 647 return ret; 683 648 } ··· 698 661 struct drm_mode_get_lease *arg = data; 699 662 __u32 __user *object_ids = (__u32 __user *) (uintptr_t) (arg->objects_ptr); 700 663 __u32 count_objects = arg->count_objects; 701 - struct drm_master *lessee = lessee_priv->master; 664 + struct drm_master *lessee; 702 665 struct idr *object_idr; 703 666 int count; 704 667 void *entry; ··· 712 675 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 713 676 return -EOPNOTSUPP; 714 677 678 + lessee = drm_file_get_master(lessee_priv); 715 679 DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id); 716 680 717 681 mutex_lock(&dev->mode_config.idr_mutex); ··· 740 702 arg->count_objects = count; 741 703 742 704 mutex_unlock(&dev->mode_config.idr_mutex); 705 + drm_master_put(&lessee); 743 706 744 707 return ret; 745 708 } ··· 759 720 void *data, struct drm_file *lessor_priv) 760 721 { 761 722 struct drm_mode_revoke_lease *arg = data; 762 - struct drm_master *lessor = lessor_priv->master; 723 + struct drm_master *lessor; 763 724 struct drm_master *lessee; 764 725 int ret = 0; 765 726 ··· 769 730 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 770 731 return -EOPNOTSUPP; 771 732 733 + lessor = drm_file_get_master(lessor_priv); 772 734 mutex_lock(&dev->mode_config.idr_mutex); 773 735 774 736 lessee = _drm_find_lessee(lessor, arg->lessee_id); ··· 790 750 791 751 fail: 792 752 mutex_unlock(&dev->mode_config.idr_mutex); 753 + drm_master_put(&lessor); 793 754 794 755 return ret; 795 756 }
+5 -4
drivers/gpu/drm/i915/display/intel_vga.c
··· 124 124 } 125 125 126 126 static unsigned int 127 - intel_vga_set_decode(void *cookie, bool enable_decode) 127 + intel_vga_set_decode(struct pci_dev *pdev, bool enable_decode) 128 128 { 129 - struct drm_i915_private *i915 = cookie; 129 + struct drm_i915_private *i915 = pdev_to_i915(pdev); 130 130 131 131 intel_vga_set_state(i915, enable_decode); 132 132 ··· 139 139 140 140 int intel_vga_register(struct drm_i915_private *i915) 141 141 { 142 + 142 143 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 143 144 int ret; 144 145 ··· 151 150 * then we do not take part in VGA arbitration and the 152 151 * vga_client_register() fails with -ENODEV. 153 152 */ 154 - ret = vga_client_register(pdev, i915, NULL, intel_vga_set_decode); 153 + ret = vga_client_register(pdev, intel_vga_set_decode); 155 154 if (ret && ret != -ENODEV) 156 155 return ret; 157 156 ··· 162 161 { 163 162 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 164 163 165 - vga_client_register(pdev, NULL, NULL, NULL); 164 + vga_client_unregister(pdev); 166 165 }
+1 -4
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
··· 33 33 #include <drm/drm_fourcc.h> 34 34 #include <drm/drm_gem_atomic_helper.h> 35 35 #include <drm/drm_gem_framebuffer_helper.h> 36 - #include <drm/drm_irq.h> 37 36 #include <drm/drm_managed.h> 38 37 #include <drm/drm_of.h> 39 38 #include <drm/drm_panel.h> ··· 798 799 .fops = &ingenic_drm_fops, 799 800 .gem_create_object = ingenic_drm_gem_create_object, 800 801 DRM_GEM_CMA_DRIVER_OPS, 801 - 802 - .irq_handler = ingenic_drm_irq_handler, 803 802 }; 804 803 805 804 static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = { ··· 1095 1098 encoder->possible_clones = clone_mask; 1096 1099 } 1097 1100 1098 - ret = drm_irq_install(drm, irq); 1101 + ret = devm_request_irq(dev, irq, ingenic_drm_irq_handler, 0, drm->driver->name, drm); 1099 1102 if (ret) { 1100 1103 dev_err(dev, "Unable to install IRQ handler\n"); 1101 1104 return ret;
+4 -4
drivers/gpu/drm/nouveau/nouveau_vga.c
··· 11 11 #include "nouveau_vga.h" 12 12 13 13 static unsigned int 14 - nouveau_vga_set_decode(void *priv, bool state) 14 + nouveau_vga_set_decode(struct pci_dev *pdev, bool state) 15 15 { 16 - struct nouveau_drm *drm = nouveau_drm(priv); 16 + struct nouveau_drm *drm = nouveau_drm(pci_get_drvdata(pdev)); 17 17 struct nvif_object *device = &drm->client.device.object; 18 18 19 19 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE && ··· 94 94 return; 95 95 pdev = to_pci_dev(dev->dev); 96 96 97 - vga_client_register(pdev, dev, NULL, nouveau_vga_set_decode); 97 + vga_client_register(pdev, nouveau_vga_set_decode); 98 98 99 99 /* don't register Thunderbolt eGPU with vga_switcheroo */ 100 100 if (pci_is_thunderbolt_attached(pdev)) ··· 118 118 return; 119 119 pdev = to_pci_dev(dev->dev); 120 120 121 - vga_client_register(pdev, NULL, NULL, NULL); 121 + vga_client_unregister(pdev); 122 122 123 123 if (pci_is_thunderbolt_attached(pdev)) 124 124 return;
+10
drivers/gpu/drm/panel/Kconfig
··· 574 574 Say Y here if you want to enable support for Visionox 575 575 RM69299 DSI Video Mode panel. 576 576 577 + config DRM_PANEL_WIDECHIPS_WS2401 578 + tristate "Widechips WS2401 DPI panel driver" 579 + depends on SPI && GPIOLIB 580 + depends on BACKLIGHT_CLASS_DEVICE 581 + select DRM_MIPI_DBI 582 + help 583 + Say Y here if you want to enable support for the Widechips WS2401 DPI 584 + 480x800 display controller used in panels such as Samsung LMS380KF01. 585 + This display is used in the Samsung Galaxy Ace 2 GT-I8160 (Codina). 586 + 577 587 config DRM_PANEL_XINPENG_XPP055C272 578 588 tristate "Xinpeng XPP055C272 panel driver" 579 589 depends on OF
+1
drivers/gpu/drm/panel/Makefile
··· 60 60 obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o 61 61 obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o 62 62 obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o 63 + obj-$(CONFIG_DRM_PANEL_WIDECHIPS_WS2401) += panel-widechips-ws2401.o 63 64 obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
+441
drivers/gpu/drm/panel/panel-widechips-ws2401.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Panel driver for the WideChips WS2401 480x800 DPI RGB panel, used in 4 + * the Samsung Mobile Display (SMD) LMS380KF01. 5 + * Found in the Samsung Galaxy Ace 2 GT-I8160 mobile phone. 6 + * Linus Walleij <linus.walleij@linaro.org> 7 + * Inspired by code and know-how in the vendor driver by Gareth Phillips. 8 + */ 9 + #include <drm/drm_mipi_dbi.h> 10 + #include <drm/drm_modes.h> 11 + #include <drm/drm_panel.h> 12 + 13 + #include <linux/backlight.h> 14 + #include <linux/delay.h> 15 + #include <linux/gpio/consumer.h> 16 + #include <linux/init.h> 17 + #include <linux/kernel.h> 18 + #include <linux/media-bus-format.h> 19 + #include <linux/module.h> 20 + #include <linux/regulator/consumer.h> 21 + #include <linux/spi/spi.h> 22 + 23 + #include <video/mipi_display.h> 24 + 25 + #define WS2401_RESCTL 0xb8 /* Resolution select control */ 26 + #define WS2401_PSMPS 0xbd /* SMPS positive control */ 27 + #define WS2401_NSMPS 0xbe /* SMPS negative control */ 28 + #define WS2401_SMPS 0xbf 29 + #define WS2401_BCMODE 0xc1 /* Backlight control mode */ 30 + #define WS2401_WRBLCTL 0xc3 /* Backlight control */ 31 + #define WS2401_WRDISBV 0xc4 /* Write manual brightness */ 32 + #define WS2401_WRCTRLD 0xc6 /* Write BL control */ 33 + #define WS2401_WRMIE 0xc7 /* Write MIE mode */ 34 + #define WS2401_READ_ID1 0xda /* Read panel ID 1 */ 35 + #define WS2401_READ_ID2 0xdb /* Read panel ID 2 */ 36 + #define WS2401_READ_ID3 0xdc /* Read panel ID 3 */ 37 + #define WS2401_GAMMA_R1 0xe7 /* Gamma red 1 */ 38 + #define WS2401_GAMMA_G1 0xe8 /* Gamma green 1 */ 39 + #define WS2401_GAMMA_B1 0xe9 /* Gamma blue 1 */ 40 + #define WS2401_GAMMA_R2 0xea /* Gamma red 2 */ 41 + #define WS2401_GAMMA_G2 0xeb /* Gamma green 2 */ 42 + #define WS2401_GAMMA_B2 0xec /* Gamma blue 2 */ 43 + #define WS2401_PASSWD1 0xf0 /* Password command for level 2 */ 44 + #define WS2401_DISCTL 0xf2 /* Display control */ 45 + #define WS2401_PWRCTL 0xf3 /* Power control */ 46 + #define WS2401_VCOMCTL 0xf4 /* VCOM control */ 47 + #define WS2401_SRCCTL 0xf5 /* Source control */ 48 + #define WS2401_PANELCTL 0xf6 /* Panel control */ 49 + 50 + static const u8 ws2401_dbi_read_commands[] = { 51 + WS2401_READ_ID1, 52 + WS2401_READ_ID2, 53 + WS2401_READ_ID3, 54 + 0, /* sentinel */ 55 + }; 56 + 57 + /** 58 + * struct ws2401 - state container for a panel controlled by the WS2401 59 + * controller 60 + */ 61 + struct ws2401 { 62 + /** @dev: the container device */ 63 + struct device *dev; 64 + /** @dbi: the DBI bus abstraction handle */ 65 + struct mipi_dbi dbi; 66 + /** @panel: the DRM panel instance for this device */ 67 + struct drm_panel panel; 68 + /** @width: the width of this panel in mm */ 69 + u32 width; 70 + /** @height: the height of this panel in mm */ 71 + u32 height; 72 + /** @reset: reset GPIO line */ 73 + struct gpio_desc *reset; 74 + /** @regulators: VCCIO and VIO supply regulators */ 75 + struct regulator_bulk_data regulators[2]; 76 + /** @internal_bl: If using internal backlight */ 77 + bool internal_bl; 78 + }; 79 + 80 + static const struct drm_display_mode lms380kf01_480_800_mode = { 81 + /* 82 + * The vendor driver states that the "SMD panel" has a clock 83 + * frequency of 49920000 Hz / 2 = 24960000 Hz. 84 + */ 85 + .clock = 24960, 86 + .hdisplay = 480, 87 + .hsync_start = 480 + 8, 88 + .hsync_end = 480 + 8 + 10, 89 + .htotal = 480 + 8 + 10 + 8, 90 + .vdisplay = 800, 91 + .vsync_start = 800 + 8, 92 + .vsync_end = 800 + 8 + 2, 93 + .vtotal = 800 + 8 + 2 + 18, 94 + .width_mm = 50, 95 + .height_mm = 84, 96 + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, 97 + }; 98 + 99 + static inline struct ws2401 *to_ws2401(struct drm_panel *panel) 100 + { 101 + return container_of(panel, struct ws2401, panel); 102 + } 103 + 104 + static void ws2401_read_mtp_id(struct ws2401 *ws) 105 + { 106 + struct mipi_dbi *dbi = &ws->dbi; 107 + u8 id1, id2, id3; 108 + int ret; 109 + 110 + ret = mipi_dbi_command_read(dbi, WS2401_READ_ID1, &id1); 111 + if (ret) { 112 + dev_err(ws->dev, "unable to read MTP ID 1\n"); 113 + return; 114 + } 115 + ret = mipi_dbi_command_read(dbi, WS2401_READ_ID2, &id2); 116 + if (ret) { 117 + dev_err(ws->dev, "unable to read MTP ID 2\n"); 118 + return; 119 + } 120 + ret = mipi_dbi_command_read(dbi, WS2401_READ_ID3, &id3); 121 + if (ret) { 122 + dev_err(ws->dev, "unable to read MTP ID 3\n"); 123 + return; 124 + } 125 + dev_info(ws->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3); 126 + } 127 + 128 + static int ws2401_power_on(struct ws2401 *ws) 129 + { 130 + struct mipi_dbi *dbi = &ws->dbi; 131 + int ret; 132 + 133 + /* Power up */ 134 + ret = regulator_bulk_enable(ARRAY_SIZE(ws->regulators), 135 + ws->regulators); 136 + if (ret) { 137 + dev_err(ws->dev, "failed to enable regulators: %d\n", ret); 138 + return ret; 139 + } 140 + msleep(10); 141 + 142 + /* Assert reset >=1 ms */ 143 + gpiod_set_value_cansleep(ws->reset, 1); 144 + usleep_range(1000, 5000); 145 + /* De-assert reset */ 146 + gpiod_set_value_cansleep(ws->reset, 0); 147 + /* Wait >= 10 ms */ 148 + msleep(10); 149 + dev_dbg(ws->dev, "de-asserted RESET\n"); 150 + 151 + /* 152 + * Exit sleep mode and initialize display - some hammering is 153 + * necessary. 154 + */ 155 + mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); 156 + mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); 157 + msleep(50); 158 + 159 + /* Magic to unlock level 2 control of the display */ 160 + mipi_dbi_command(dbi, WS2401_PASSWD1, 0x5a, 0x5a); 161 + /* Configure resolution to 480RGBx800 */ 162 + mipi_dbi_command(dbi, WS2401_RESCTL, 0x12); 163 + /* Set addressing mode Flip V(d0), Flip H(d1) RGB/BGR(d3) */ 164 + mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, 0x01); 165 + /* Set pixel format: 24 bpp */ 166 + mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, 0x70); 167 + mipi_dbi_command(dbi, WS2401_SMPS, 0x00, 0x0f); 168 + mipi_dbi_command(dbi, WS2401_PSMPS, 0x06, 0x03, /* DDVDH: 4.6v */ 169 + 0x7e, 0x03, 0x12, 0x37); 170 + mipi_dbi_command(dbi, WS2401_NSMPS, 0x06, 0x03, /* DDVDH: -4.6v */ 171 + 0x7e, 0x02, 0x15, 0x37); 172 + mipi_dbi_command(dbi, WS2401_SMPS, 0x02, 0x0f); 173 + mipi_dbi_command(dbi, WS2401_PWRCTL, 0x10, 0xA9, 0x00, 0x01, 0x44, 174 + 0xb4, /* VGH:16.1v, VGL:-13.8v */ 175 + 0x50, /* GREFP:4.2v (default) */ 176 + 0x50, /* GREFN:-4.2v (default) */ 177 + 0x00, 178 + 0x44); /* VOUTL:-10v (default) */ 179 + mipi_dbi_command(dbi, WS2401_DISCTL, 0x01, 0x00, 0x00, 0x00, 0x14, 180 + 0x16); 181 + mipi_dbi_command(dbi, WS2401_VCOMCTL, 0x30, 0x53, 0x53); 182 + mipi_dbi_command(dbi, WS2401_SRCCTL, 0x03, 0x0C, 0x00, 0x00, 0x00, 183 + 0x01, /* 2 dot inversion */ 184 + 0x01, 0x06, 0x03); 185 + mipi_dbi_command(dbi, WS2401_PANELCTL, 0x14, 0x00, 0x80, 0x00); 186 + mipi_dbi_command(dbi, WS2401_WRMIE, 0x01); 187 + 188 + /* Set up gamma, probably these are P-gamma and N-gamma for each color */ 189 + mipi_dbi_command(dbi, WS2401_GAMMA_R1, 0x00, 190 + 0x5b, 0x42, 0x41, 0x3f, 0x42, 0x3d, 0x38, 0x2e, 191 + 0x2b, 0x2a, 0x27, 0x22, 0x27, 0x0f, 0x00, 0x00); 192 + mipi_dbi_command(dbi, WS2401_GAMMA_R2, 0x00, 193 + 0x5b, 0x42, 0x41, 0x3f, 0x42, 0x3d, 0x38, 0x2e, 194 + 0x2b, 0x2a, 0x27, 0x22, 0x27, 0x0f, 0x00, 0x00); 195 + mipi_dbi_command(dbi, WS2401_GAMMA_G1, 0x00, 196 + 0x59, 0x40, 0x3f, 0x3e, 0x41, 0x3d, 0x39, 0x2f, 197 + 0x2c, 0x2b, 0x29, 0x25, 0x29, 0x19, 0x08, 0x00); 198 + mipi_dbi_command(dbi, WS2401_GAMMA_G2, 0x00, 199 + 0x59, 0x40, 0x3f, 0x3e, 0x41, 0x3d, 0x39, 0x2f, 200 + 0x2c, 0x2b, 0x29, 0x25, 0x29, 0x19, 0x08, 0x00); 201 + mipi_dbi_command(dbi, WS2401_GAMMA_B1, 0x00, 202 + 0x57, 0x3b, 0x3a, 0x3b, 0x3f, 0x3b, 0x38, 0x27, 203 + 0x38, 0x2a, 0x26, 0x22, 0x34, 0x0c, 0x09, 0x00); 204 + mipi_dbi_command(dbi, WS2401_GAMMA_B2, 0x00, 205 + 0x57, 0x3b, 0x3a, 0x3b, 0x3f, 0x3b, 0x38, 0x27, 206 + 0x38, 0x2a, 0x26, 0x22, 0x34, 0x0c, 0x09, 0x00); 207 + 208 + if (ws->internal_bl) { 209 + mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x2c); 210 + } else { 211 + mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x00); 212 + /* 213 + * When not using internal backlight we do not need any further 214 + * L2 accesses to the panel so we close the door on our way out. 215 + * Otherwise we need to leave the L2 door open. 216 + */ 217 + mipi_dbi_command(dbi, WS2401_PASSWD1, 0xa5, 0xa5); 218 + } 219 + 220 + return 0; 221 + } 222 + 223 + static int ws2401_power_off(struct ws2401 *ws) 224 + { 225 + /* Go into RESET and disable regulators */ 226 + gpiod_set_value_cansleep(ws->reset, 1); 227 + return regulator_bulk_disable(ARRAY_SIZE(ws->regulators), 228 + ws->regulators); 229 + } 230 + 231 + static int ws2401_unprepare(struct drm_panel *panel) 232 + { 233 + struct ws2401 *ws = to_ws2401(panel); 234 + struct mipi_dbi *dbi = &ws->dbi; 235 + 236 + /* Make sure we disable backlight, if any */ 237 + if (ws->internal_bl) 238 + mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x00); 239 + mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE); 240 + msleep(120); 241 + return ws2401_power_off(to_ws2401(panel)); 242 + } 243 + 244 + static int ws2401_disable(struct drm_panel *panel) 245 + { 246 + struct ws2401 *ws = to_ws2401(panel); 247 + struct mipi_dbi *dbi = &ws->dbi; 248 + 249 + mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); 250 + msleep(25); 251 + 252 + return 0; 253 + } 254 + 255 + static int ws2401_prepare(struct drm_panel *panel) 256 + { 257 + return ws2401_power_on(to_ws2401(panel)); 258 + } 259 + 260 + static int ws2401_enable(struct drm_panel *panel) 261 + { 262 + struct ws2401 *ws = to_ws2401(panel); 263 + struct mipi_dbi *dbi = &ws->dbi; 264 + 265 + mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); 266 + 267 + return 0; 268 + } 269 + 270 + /** 271 + * ws2401_get_modes() - return the mode 272 + * @panel: the panel to get the mode for 273 + * @connector: reference to the central DRM connector control structure 274 + */ 275 + static int ws2401_get_modes(struct drm_panel *panel, 276 + struct drm_connector *connector) 277 + { 278 + struct ws2401 *ws = to_ws2401(panel); 279 + struct drm_display_mode *mode; 280 + static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; 281 + 282 + /* 283 + * We just support the LMS380KF01 so far, if we implement more panels 284 + * this mode, the following connector display_info settings and 285 + * probably the custom DCS sequences needs to selected based on what 286 + * the target panel needs. 287 + */ 288 + mode = drm_mode_duplicate(connector->dev, &lms380kf01_480_800_mode); 289 + if (!mode) { 290 + dev_err(ws->dev, "failed to add mode\n"); 291 + return -ENOMEM; 292 + } 293 + 294 + connector->display_info.bpc = 8; 295 + connector->display_info.width_mm = mode->width_mm; 296 + connector->display_info.height_mm = mode->height_mm; 297 + connector->display_info.bus_flags = 298 + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; 299 + drm_display_info_set_bus_formats(&connector->display_info, 300 + &bus_format, 1); 301 + 302 + drm_mode_set_name(mode); 303 + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; 304 + 305 + drm_mode_probed_add(connector, mode); 306 + 307 + return 1; 308 + } 309 + 310 + static const struct drm_panel_funcs ws2401_drm_funcs = { 311 + .disable = ws2401_disable, 312 + .unprepare = ws2401_unprepare, 313 + .prepare = ws2401_prepare, 314 + .enable = ws2401_enable, 315 + .get_modes = ws2401_get_modes, 316 + }; 317 + 318 + static int ws2401_set_brightness(struct backlight_device *bl) 319 + { 320 + struct ws2401 *ws = bl_get_data(bl); 321 + struct mipi_dbi *dbi = &ws->dbi; 322 + u8 brightness = backlight_get_brightness(bl); 323 + 324 + if (backlight_is_blank(bl)) { 325 + mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x00); 326 + } else { 327 + mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x2c); 328 + mipi_dbi_command(dbi, WS2401_WRDISBV, brightness); 329 + } 330 + 331 + return 0; 332 + } 333 + 334 + static const struct backlight_ops ws2401_bl_ops = { 335 + .update_status = ws2401_set_brightness, 336 + }; 337 + 338 + static const struct backlight_properties ws2401_bl_props = { 339 + .type = BACKLIGHT_PLATFORM, 340 + .brightness = 120, 341 + .max_brightness = U8_MAX, 342 + }; 343 + 344 + static int ws2401_probe(struct spi_device *spi) 345 + { 346 + struct device *dev = &spi->dev; 347 + struct ws2401 *ws; 348 + int ret; 349 + 350 + ws = devm_kzalloc(dev, sizeof(*ws), GFP_KERNEL); 351 + if (!ws) 352 + return -ENOMEM; 353 + ws->dev = dev; 354 + 355 + /* 356 + * VCI is the analog voltage supply 357 + * VCCIO is the digital I/O voltage supply 358 + */ 359 + ws->regulators[0].supply = "vci"; 360 + ws->regulators[1].supply = "vccio"; 361 + ret = devm_regulator_bulk_get(dev, 362 + ARRAY_SIZE(ws->regulators), 363 + ws->regulators); 364 + if (ret) 365 + return dev_err_probe(dev, ret, "failed to get regulators\n"); 366 + 367 + ws->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 368 + if (IS_ERR(ws->reset)) { 369 + ret = PTR_ERR(ws->reset); 370 + return dev_err_probe(dev, ret, "no RESET GPIO\n"); 371 + } 372 + 373 + ret = mipi_dbi_spi_init(spi, &ws->dbi, NULL); 374 + if (ret) 375 + return dev_err_probe(dev, ret, "MIPI DBI init failed\n"); 376 + ws->dbi.read_commands = ws2401_dbi_read_commands; 377 + 378 + ws2401_power_on(ws); 379 + ws2401_read_mtp_id(ws); 380 + ws2401_power_off(ws); 381 + 382 + drm_panel_init(&ws->panel, dev, &ws2401_drm_funcs, 383 + DRM_MODE_CONNECTOR_DPI); 384 + 385 + ret = drm_panel_of_backlight(&ws->panel); 386 + if (ret) 387 + return dev_err_probe(dev, ret, 388 + "failed to get external backlight device\n"); 389 + 390 + if (!ws->panel.backlight) { 391 + dev_dbg(dev, "no external backlight, using internal backlight\n"); 392 + ws->panel.backlight = 393 + devm_backlight_device_register(dev, "ws2401", dev, ws, 394 + &ws2401_bl_ops, &ws2401_bl_props); 395 + if (IS_ERR(ws->panel.backlight)) 396 + return dev_err_probe(dev, PTR_ERR(ws->panel.backlight), 397 + "failed to register backlight device\n"); 398 + } else { 399 + dev_dbg(dev, "using external backlight\n"); 400 + } 401 + 402 + spi_set_drvdata(spi, ws); 403 + 404 + drm_panel_add(&ws->panel); 405 + dev_dbg(dev, "added panel\n"); 406 + 407 + return 0; 408 + } 409 + 410 + static int ws2401_remove(struct spi_device *spi) 411 + { 412 + struct ws2401 *ws = spi_get_drvdata(spi); 413 + 414 + drm_panel_remove(&ws->panel); 415 + return 0; 416 + } 417 + 418 + /* 419 + * Samsung LMS380KF01 is the one instance of this display controller that we 420 + * know about, but if more are found, the controller can be parameterized 421 + * here and used for other configurations. 422 + */ 423 + static const struct of_device_id ws2401_match[] = { 424 + { .compatible = "samsung,lms380kf01", }, 425 + {}, 426 + }; 427 + MODULE_DEVICE_TABLE(of, ws2401_match); 428 + 429 + static struct spi_driver ws2401_driver = { 430 + .probe = ws2401_probe, 431 + .remove = ws2401_remove, 432 + .driver = { 433 + .name = "ws2401-panel", 434 + .of_match_table = ws2401_match, 435 + }, 436 + }; 437 + module_spi_driver(ws2401_driver); 438 + 439 + MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); 440 + MODULE_DESCRIPTION("Samsung WS2401 panel driver"); 441 + MODULE_LICENSE("GPL v2");
+6 -5
drivers/gpu/drm/radeon/radeon_device.c
··· 1067 1067 /** 1068 1068 * radeon_vga_set_decode - enable/disable vga decode 1069 1069 * 1070 - * @cookie: radeon_device pointer 1070 + * @pdev: PCI device 1071 1071 * @state: enable/disable vga decode 1072 1072 * 1073 1073 * Enable/disable vga decode (all asics). 1074 1074 * Returns VGA resource flags. 1075 1075 */ 1076 - static unsigned int radeon_vga_set_decode(void *cookie, bool state) 1076 + static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state) 1077 1077 { 1078 - struct radeon_device *rdev = cookie; 1078 + struct drm_device *dev = pci_get_drvdata(pdev); 1079 + struct radeon_device *rdev = dev->dev_private; 1079 1080 radeon_vga_set_state(rdev, state); 1080 1081 if (state) 1081 1082 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | ··· 1435 1434 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1436 1435 /* this will fail for cards that aren't VGA class devices, just 1437 1436 * ignore it */ 1438 - vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1437 + vga_client_register(rdev->pdev, radeon_vga_set_decode); 1439 1438 1440 1439 if (rdev->flags & RADEON_IS_PX) 1441 1440 runtime = true; ··· 1531 1530 vga_switcheroo_unregister_client(rdev->pdev); 1532 1531 if (rdev->flags & RADEON_IS_PX) 1533 1532 vga_switcheroo_fini_domain_pm_ops(rdev->dev); 1534 - vga_client_register(rdev->pdev, NULL, NULL, NULL); 1533 + vga_client_unregister(rdev->pdev); 1535 1534 if (rdev->rio_mem) 1536 1535 pci_iounmap(rdev->pdev, rdev->rio_mem); 1537 1536 rdev->rio_mem = NULL;
+13 -4
drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
··· 309 309 return 0; 310 310 } 311 311 312 + #define DSI_PHY_DELAY(fp, vp, mbps) DIV_ROUND_UP((fp) * (mbps) + 1000 * (vp), 8000) 313 + 312 314 static int 313 315 dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps, 314 316 struct dw_mipi_dsi_dphy_timing *timing) 315 317 { 316 - timing->clk_hs2lp = 0x40; 317 - timing->clk_lp2hs = 0x40; 318 - timing->data_hs2lp = 0x40; 319 - timing->data_lp2hs = 0x40; 318 + /* 319 + * From STM32MP157 datasheet, valid for STM32F469, STM32F7x9, STM32H747 320 + * phy_clkhs2lp_time = (272+136*UI)/(8*UI) 321 + * phy_clklp2hs_time = (512+40*UI)/(8*UI) 322 + * phy_hs2lp_time = (192+64*UI)/(8*UI) 323 + * phy_lp2hs_time = (256+32*UI)/(8*UI) 324 + */ 325 + timing->clk_hs2lp = DSI_PHY_DELAY(272, 136, lane_mbps); 326 + timing->clk_lp2hs = DSI_PHY_DELAY(512, 40, lane_mbps); 327 + timing->data_hs2lp = DSI_PHY_DELAY(192, 64, lane_mbps); 328 + timing->data_lp2hs = DSI_PHY_DELAY(256, 32, lane_mbps); 320 329 321 330 return 0; 322 331 }
+5 -3
drivers/gpu/drm/stm/ltdc.c
··· 1121 1121 1122 1122 ret = drm_bridge_attach(encoder, bridge, NULL, 0); 1123 1123 if (ret) { 1124 - drm_encoder_cleanup(encoder); 1125 - return -EINVAL; 1124 + if (ret != -EPROBE_DEFER) 1125 + drm_encoder_cleanup(encoder); 1126 + return ret; 1126 1127 } 1127 1128 1128 1129 DRM_DEBUG_DRIVER("Bridge encoder:%d created\n", encoder->base.id); ··· 1266 1265 if (bridge) { 1267 1266 ret = ltdc_encoder_init(ddev, bridge); 1268 1267 if (ret) { 1269 - DRM_ERROR("init encoder endpoint %d\n", i); 1268 + if (ret != -EPROBE_DEFER) 1269 + DRM_ERROR("init encoder endpoint %d\n", i); 1270 1270 goto err; 1271 1271 } 1272 1272 }
+2 -2
drivers/gpu/drm/tiny/Kconfig
··· 64 64 buffer, size, and display format must be provided via device tree, 65 65 UEFI, VESA, etc. 66 66 67 - On x86 and compatible, you should also select CONFIG_X86_SYSFB to 68 - use UEFI and VESA framebuffers. 67 + On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB 68 + to use UEFI and VESA framebuffers. 69 69 70 70 config TINYDRM_HX8357D 71 71 tristate "DRM support for HX8357D display panels"
+1 -1
drivers/gpu/drm/tiny/bochs.c
··· 648 648 if (IS_ERR(dev)) 649 649 return PTR_ERR(dev); 650 650 651 - ret = pci_enable_device(pdev); 651 + ret = pcim_enable_device(pdev); 652 652 if (ret) 653 653 goto err_free_dev; 654 654
+1
drivers/gpu/drm/v3d/Makefile
··· 9 9 v3d_gem.o \ 10 10 v3d_irq.o \ 11 11 v3d_mmu.o \ 12 + v3d_perfmon.o \ 12 13 v3d_trace_points.o \ 13 14 v3d_sched.o 14 15
+8
drivers/gpu/drm/v3d/v3d_drv.c
··· 94 94 case DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH: 95 95 args->value = 1; 96 96 return 0; 97 + case DRM_V3D_PARAM_SUPPORTS_PERFMON: 98 + args->value = (v3d->ver >= 40); 99 + return 0; 97 100 default: 98 101 DRM_DEBUG("Unknown parameter %d\n", args->param); 99 102 return -EINVAL; ··· 124 121 1, NULL); 125 122 } 126 123 124 + v3d_perfmon_open_file(v3d_priv); 127 125 file->driver_priv = v3d_priv; 128 126 129 127 return 0; ··· 140 136 drm_sched_entity_destroy(&v3d_priv->sched_entity[q]); 141 137 } 142 138 139 + v3d_perfmon_close_file(v3d_priv); 143 140 kfree(v3d_priv); 144 141 } 145 142 ··· 161 156 DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW), 162 157 DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), 163 158 DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CSD, v3d_submit_csd_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), 159 + DRM_IOCTL_DEF_DRV(V3D_PERFMON_CREATE, v3d_perfmon_create_ioctl, DRM_RENDER_ALLOW), 160 + DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW), 161 + DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW), 164 162 }; 165 163 166 164 static const struct drm_driver v3d_drm_driver = {
+63
drivers/gpu/drm/v3d/v3d_drv.h
··· 37 37 u64 emit_seqno; 38 38 }; 39 39 40 + /* Performance monitor object. The perform lifetime is controlled by userspace 41 + * using perfmon related ioctls. A perfmon can be attached to a submit_cl 42 + * request, and when this is the case, HW perf counters will be activated just 43 + * before the submit_cl is submitted to the GPU and disabled when the job is 44 + * done. This way, only events related to a specific job will be counted. 45 + */ 46 + struct v3d_perfmon { 47 + /* Tracks the number of users of the perfmon, when this counter reaches 48 + * zero the perfmon is destroyed. 49 + */ 50 + refcount_t refcnt; 51 + 52 + /* Protects perfmon stop, as it can be invoked from multiple places. */ 53 + struct mutex lock; 54 + 55 + /* Number of counters activated in this perfmon instance 56 + * (should be less than DRM_V3D_MAX_PERF_COUNTERS). 57 + */ 58 + u8 ncounters; 59 + 60 + /* Events counted by the HW perf counters. */ 61 + u8 counters[DRM_V3D_MAX_PERF_COUNTERS]; 62 + 63 + /* Storage for counter values. Counters are incremented by the 64 + * HW perf counter values every time the perfmon is attached 65 + * to a GPU job. This way, perfmon users don't have to 66 + * retrieve the results after each job if they want to track 67 + * events covering several submissions. Note that counter 68 + * values can't be reset, but you can fake a reset by 69 + * destroying the perfmon and creating a new one. 70 + */ 71 + u64 values[]; 72 + }; 73 + 40 74 struct v3d_dev { 41 75 struct drm_device drm; 42 76 ··· 123 89 */ 124 90 spinlock_t job_lock; 125 91 92 + /* Used to track the active perfmon if any. */ 93 + struct v3d_perfmon *active_perfmon; 94 + 126 95 /* Protects bo_stats */ 127 96 struct mutex bo_lock; 128 97 ··· 169 132 /* The per-fd struct, which tracks the MMU mappings. */ 170 133 struct v3d_file_priv { 171 134 struct v3d_dev *v3d; 135 + 136 + struct { 137 + struct idr idr; 138 + struct mutex lock; 139 + } perfmon; 172 140 173 141 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; 174 142 }; ··· 246 204 * the BO reservations can be released. 247 205 */ 248 206 struct dma_fence *done_fence; 207 + 208 + /* Pointer to a performance monitor object if the user requested it, 209 + * NULL otherwise. 210 + */ 211 + struct v3d_perfmon *perfmon; 249 212 250 213 /* Callback for the freeing of the job on refcount going to 0. */ 251 214 void (*free)(struct kref *ref); ··· 400 353 /* v3d_sched.c */ 401 354 int v3d_sched_init(struct v3d_dev *v3d); 402 355 void v3d_sched_fini(struct v3d_dev *v3d); 356 + 357 + /* v3d_perfmon.c */ 358 + void v3d_perfmon_get(struct v3d_perfmon *perfmon); 359 + void v3d_perfmon_put(struct v3d_perfmon *perfmon); 360 + void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon); 361 + void v3d_perfmon_stop(struct v3d_dev *v3d, struct v3d_perfmon *perfmon, 362 + bool capture); 363 + struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id); 364 + void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv); 365 + void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv); 366 + int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, 367 + struct drm_file *file_priv); 368 + int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, 369 + struct drm_file *file_priv); 370 + int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, 371 + struct drm_file *file_priv);
+31
drivers/gpu/drm/v3d/v3d_gem.c
··· 126 126 v3d_mmu_set_page_table(v3d); 127 127 v3d_irq_reset(v3d); 128 128 129 + v3d_perfmon_stop(v3d, v3d->active_perfmon, false); 130 + 129 131 trace_v3d_reset_end(dev); 130 132 } 131 133 ··· 377 375 pm_runtime_mark_last_busy(job->v3d->drm.dev); 378 376 pm_runtime_put_autosuspend(job->v3d->drm.dev); 379 377 378 + if (job->perfmon) 379 + v3d_perfmon_put(job->perfmon); 380 + 380 381 kfree(job); 381 382 } 382 383 ··· 544 539 545 540 trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); 546 541 542 + if (args->pad != 0) 543 + return -EINVAL; 544 + 547 545 if (args->flags != 0 && 548 546 args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { 549 547 DRM_INFO("invalid flags: %d\n", args->flags); ··· 619 611 if (ret) 620 612 goto fail; 621 613 614 + if (args->perfmon_id) { 615 + render->base.perfmon = v3d_perfmon_find(v3d_priv, 616 + args->perfmon_id); 617 + 618 + if (!render->base.perfmon) { 619 + ret = -ENOENT; 620 + goto fail; 621 + } 622 + } 623 + 622 624 mutex_lock(&v3d->sched_lock); 623 625 if (bin) { 626 + bin->base.perfmon = render->base.perfmon; 627 + v3d_perfmon_get(bin->base.perfmon); 624 628 ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN); 625 629 if (ret) 626 630 goto fail_unreserve; ··· 653 633 ret = drm_gem_fence_array_add(&clean_job->deps, render_fence); 654 634 if (ret) 655 635 goto fail_unreserve; 636 + clean_job->perfmon = render->base.perfmon; 637 + v3d_perfmon_get(clean_job->perfmon); 656 638 ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN); 657 639 if (ret) 658 640 goto fail_unreserve; ··· 848 826 ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx); 849 827 if (ret) 850 828 goto fail; 829 + 830 + if (args->perfmon_id) { 831 + job->base.perfmon = v3d_perfmon_find(v3d_priv, 832 + args->perfmon_id); 833 + if (!job->base.perfmon) { 834 + ret = -ENOENT; 835 + goto fail; 836 + } 837 + } 851 838 852 839 mutex_lock(&v3d->sched_lock); 853 840 ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD);
+213
drivers/gpu/drm/v3d/v3d_perfmon.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2021 Raspberry Pi 4 + */ 5 + 6 + #include "v3d_drv.h" 7 + #include "v3d_regs.h" 8 + 9 + #define V3D_PERFMONID_MIN 1 10 + #define V3D_PERFMONID_MAX U32_MAX 11 + 12 + void v3d_perfmon_get(struct v3d_perfmon *perfmon) 13 + { 14 + if (perfmon) 15 + refcount_inc(&perfmon->refcnt); 16 + } 17 + 18 + void v3d_perfmon_put(struct v3d_perfmon *perfmon) 19 + { 20 + if (perfmon && refcount_dec_and_test(&perfmon->refcnt)) 21 + kfree(perfmon); 22 + } 23 + 24 + void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon) 25 + { 26 + unsigned int i; 27 + u32 mask; 28 + u8 ncounters = perfmon->ncounters; 29 + 30 + if (WARN_ON_ONCE(!perfmon || v3d->active_perfmon)) 31 + return; 32 + 33 + mask = GENMASK(ncounters - 1, 0); 34 + 35 + for (i = 0; i < ncounters; i++) { 36 + u32 source = i / 4; 37 + u32 channel = V3D_SET_FIELD(perfmon->counters[i], V3D_PCTR_S0); 38 + 39 + i++; 40 + channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, 41 + V3D_PCTR_S1); 42 + i++; 43 + channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, 44 + V3D_PCTR_S2); 45 + i++; 46 + channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, 47 + V3D_PCTR_S3); 48 + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel); 49 + } 50 + 51 + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_CLR, mask); 52 + V3D_CORE_WRITE(0, V3D_PCTR_0_OVERFLOW, mask); 53 + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask); 54 + 55 + v3d->active_perfmon = perfmon; 56 + } 57 + 58 + void v3d_perfmon_stop(struct v3d_dev *v3d, struct v3d_perfmon *perfmon, 59 + bool capture) 60 + { 61 + unsigned int i; 62 + 63 + if (!perfmon || !v3d->active_perfmon) 64 + return; 65 + 66 + mutex_lock(&perfmon->lock); 67 + if (perfmon != v3d->active_perfmon) { 68 + mutex_unlock(&perfmon->lock); 69 + return; 70 + } 71 + 72 + if (capture) 73 + for (i = 0; i < perfmon->ncounters; i++) 74 + perfmon->values[i] += V3D_CORE_READ(0, V3D_PCTR_0_PCTRX(i)); 75 + 76 + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, 0); 77 + 78 + v3d->active_perfmon = NULL; 79 + mutex_unlock(&perfmon->lock); 80 + } 81 + 82 + struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id) 83 + { 84 + struct v3d_perfmon *perfmon; 85 + 86 + mutex_lock(&v3d_priv->perfmon.lock); 87 + perfmon = idr_find(&v3d_priv->perfmon.idr, id); 88 + v3d_perfmon_get(perfmon); 89 + mutex_unlock(&v3d_priv->perfmon.lock); 90 + 91 + return perfmon; 92 + } 93 + 94 + void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv) 95 + { 96 + mutex_init(&v3d_priv->perfmon.lock); 97 + idr_init(&v3d_priv->perfmon.idr); 98 + } 99 + 100 + static int v3d_perfmon_idr_del(int id, void *elem, void *data) 101 + { 102 + struct v3d_perfmon *perfmon = elem; 103 + 104 + v3d_perfmon_put(perfmon); 105 + 106 + return 0; 107 + } 108 + 109 + void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv) 110 + { 111 + mutex_lock(&v3d_priv->perfmon.lock); 112 + idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL); 113 + idr_destroy(&v3d_priv->perfmon.idr); 114 + mutex_unlock(&v3d_priv->perfmon.lock); 115 + } 116 + 117 + int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, 118 + struct drm_file *file_priv) 119 + { 120 + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 121 + struct drm_v3d_perfmon_create *req = data; 122 + struct v3d_perfmon *perfmon; 123 + unsigned int i; 124 + int ret; 125 + 126 + /* Number of monitored counters cannot exceed HW limits. */ 127 + if (req->ncounters > DRM_V3D_MAX_PERF_COUNTERS || 128 + !req->ncounters) 129 + return -EINVAL; 130 + 131 + /* Make sure all counters are valid. */ 132 + for (i = 0; i < req->ncounters; i++) { 133 + if (req->counters[i] >= V3D_PERFCNT_NUM) 134 + return -EINVAL; 135 + } 136 + 137 + perfmon = kzalloc(struct_size(perfmon, values, req->ncounters), 138 + GFP_KERNEL); 139 + if (!perfmon) 140 + return -ENOMEM; 141 + 142 + for (i = 0; i < req->ncounters; i++) 143 + perfmon->counters[i] = req->counters[i]; 144 + 145 + perfmon->ncounters = req->ncounters; 146 + 147 + refcount_set(&perfmon->refcnt, 1); 148 + mutex_init(&perfmon->lock); 149 + 150 + mutex_lock(&v3d_priv->perfmon.lock); 151 + ret = idr_alloc(&v3d_priv->perfmon.idr, perfmon, V3D_PERFMONID_MIN, 152 + V3D_PERFMONID_MAX, GFP_KERNEL); 153 + mutex_unlock(&v3d_priv->perfmon.lock); 154 + 155 + if (ret < 0) { 156 + kfree(perfmon); 157 + return ret; 158 + } 159 + 160 + req->id = ret; 161 + 162 + return 0; 163 + } 164 + 165 + int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, 166 + struct drm_file *file_priv) 167 + { 168 + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 169 + struct drm_v3d_perfmon_destroy *req = data; 170 + struct v3d_perfmon *perfmon; 171 + 172 + mutex_lock(&v3d_priv->perfmon.lock); 173 + perfmon = idr_remove(&v3d_priv->perfmon.idr, req->id); 174 + mutex_unlock(&v3d_priv->perfmon.lock); 175 + 176 + if (!perfmon) 177 + return -EINVAL; 178 + 179 + v3d_perfmon_put(perfmon); 180 + 181 + return 0; 182 + } 183 + 184 + int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, 185 + struct drm_file *file_priv) 186 + { 187 + struct v3d_dev *v3d = to_v3d_dev(dev); 188 + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 189 + struct drm_v3d_perfmon_get_values *req = data; 190 + struct v3d_perfmon *perfmon; 191 + int ret = 0; 192 + 193 + if (req->pad != 0) 194 + return -EINVAL; 195 + 196 + mutex_lock(&v3d_priv->perfmon.lock); 197 + perfmon = idr_find(&v3d_priv->perfmon.idr, req->id); 198 + v3d_perfmon_get(perfmon); 199 + mutex_unlock(&v3d_priv->perfmon.lock); 200 + 201 + if (!perfmon) 202 + return -EINVAL; 203 + 204 + v3d_perfmon_stop(v3d, perfmon, true); 205 + 206 + if (copy_to_user(u64_to_user_ptr(req->values_ptr), perfmon->values, 207 + perfmon->ncounters * sizeof(u64))) 208 + ret = -EFAULT; 209 + 210 + v3d_perfmon_put(perfmon); 211 + 212 + return ret; 213 + }
+2
drivers/gpu/drm/v3d/v3d_regs.h
··· 347 347 /* Each src reg muxes four counters each. */ 348 348 #define V3D_V4_PCTR_0_SRC_0_3 0x00660 349 349 #define V3D_V4_PCTR_0_SRC_28_31 0x0067c 350 + #define V3D_V4_PCTR_0_SRC_X(x) (V3D_V4_PCTR_0_SRC_0_3 + \ 351 + 4 * (x)) 350 352 # define V3D_PCTR_S0_MASK V3D_MASK(6, 0) 351 353 # define V3D_PCTR_S0_SHIFT 0 352 354 # define V3D_PCTR_S1_MASK V3D_MASK(14, 8)
+16
drivers/gpu/drm/v3d/v3d_sched.c
··· 63 63 v3d_job_put(job); 64 64 } 65 65 66 + static void 67 + v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) 68 + { 69 + if (job->perfmon != v3d->active_perfmon) 70 + v3d_perfmon_stop(v3d, v3d->active_perfmon, true); 71 + 72 + if (job->perfmon && v3d->active_perfmon != job->perfmon) 73 + v3d_perfmon_start(v3d, job->perfmon); 74 + } 75 + 66 76 /* 67 77 * Returns the fences that the job depends on, one by one. 68 78 * ··· 130 120 trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno, 131 121 job->start, job->end); 132 122 123 + v3d_switch_perfmon(v3d, &job->base); 124 + 133 125 /* Set the current and end address of the control list. 134 126 * Writing the end register is what starts the job. 135 127 */ ··· 180 168 181 169 trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno, 182 170 job->start, job->end); 171 + 172 + v3d_switch_perfmon(v3d, &job->base); 183 173 184 174 /* XXX: Set the QCFG */ 185 175 ··· 253 239 job->base.irq_fence = dma_fence_get(fence); 254 240 255 241 trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno); 242 + 243 + v3d_switch_perfmon(v3d, &job->base); 256 244 257 245 for (i = 1; i <= 6; i++) 258 246 V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]);
+23 -44
drivers/gpu/vga/vgaarb.c
··· 72 72 unsigned int io_norm_cnt; /* normal IO count */ 73 73 unsigned int mem_norm_cnt; /* normal MEM count */ 74 74 bool bridge_has_one_vga; 75 - /* allow IRQ enable/disable hook */ 76 - void *cookie; 77 - void (*irq_set_state)(void *cookie, bool enable); 78 - unsigned int (*set_vga_decode)(void *cookie, bool decode); 75 + unsigned int (*set_decode)(struct pci_dev *pdev, bool decode); 79 76 }; 80 77 81 78 static LIST_HEAD(vga_list); ··· 215 218 #endif 216 219 EXPORT_SYMBOL(vga_remove_vgacon); 217 220 218 - static inline void vga_irq_set_state(struct vga_device *vgadev, bool state) 219 - { 220 - if (vgadev->irq_set_state) 221 - vgadev->irq_set_state(vgadev->cookie, state); 222 - } 223 - 224 - 225 221 /* If we don't ever use VGA arb we should avoid 226 222 turning off anything anywhere due to old X servers getting 227 223 confused about the boot device not being VGA */ ··· 274 284 if (vgadev == conflict) 275 285 continue; 276 286 277 - /* Check if the architecture allows a conflict between those 278 - * 2 devices or if they are on separate domains 279 - */ 280 - if (!vga_conflicts(vgadev->pdev, conflict->pdev)) 281 - continue; 282 - 283 287 /* We have a possible conflict. before we go further, we must 284 288 * check if we sit on the same bus as the conflicting device. 285 289 * if we don't, then we must tie both IO and MEM resources ··· 315 331 if ((match & conflict->decodes) & VGA_RSRC_LEGACY_IO) 316 332 pci_bits |= PCI_COMMAND_IO; 317 333 318 - if (pci_bits) { 319 - vga_irq_set_state(conflict, false); 334 + if (pci_bits) 320 335 flags |= PCI_VGA_STATE_CHANGE_DECODES; 321 - } 322 336 } 323 337 324 338 if (change_bridge) ··· 352 370 flags |= PCI_VGA_STATE_CHANGE_BRIDGE; 353 371 354 372 pci_set_vga_state(vgadev->pdev, true, pci_bits, flags); 355 - 356 - if (!vgadev->bridge_has_one_vga) 357 - vga_irq_set_state(vgadev, true); 358 373 359 374 vgadev->owns |= wants; 360 375 lock_them: ··· 805 826 goto bail; 806 827 807 828 /* don't let userspace futz with kernel driver decodes */ 808 - if (userspace && vgadev->set_vga_decode) 829 + if (userspace && vgadev->set_decode) 809 830 goto bail; 810 831 811 832 /* update the device decodes + counter */ ··· 819 840 spin_unlock_irqrestore(&vga_lock, flags); 820 841 } 821 842 843 + /** 844 + * vga_set_legacy_decoding 845 + * @pdev: pci device of the VGA card 846 + * @decodes: bit mask of what legacy regions the card decodes 847 + * 848 + * Indicates to the arbiter if the card decodes legacy VGA IOs, legacy VGA 849 + * Memory, both, or none. All cards default to both, the card driver (fbdev for 850 + * example) should tell the arbiter if it has disabled legacy decoding, so the 851 + * card can be left out of the arbitration process (and can be safe to take 852 + * interrupts at any time. 853 + */ 822 854 void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes) 823 855 { 824 856 __vga_set_legacy_decoding(pdev, decodes, false); ··· 839 849 /** 840 850 * vga_client_register - register or unregister a VGA arbitration client 841 851 * @pdev: pci device of the VGA client 842 - * @cookie: client cookie to be used in callbacks 843 - * @irq_set_state: irq state change callback 844 - * @set_vga_decode: vga decode change callback 852 + * @set_decode: vga decode change callback 845 853 * 846 854 * Clients have two callback mechanisms they can use. 847 855 * 848 - * @irq_set_state callback: If a client can't disable its GPUs VGA 849 - * resources, then we need to be able to ask it to turn off its irqs when we 850 - * turn off its mem and io decoding. 851 - * 852 - * @set_vga_decode callback: If a client can disable its GPU VGA resource, it 856 + * @set_decode callback: If a client can disable its GPU VGA resource, it 853 857 * will get a callback from this to set the encode/decode state. 854 858 * 855 859 * Rationale: we cannot disable VGA decode resources unconditionally some single ··· 856 872 * This function does not check whether a client for @pdev has been registered 857 873 * already. 858 874 * 859 - * To unregister just call this function with @irq_set_state and @set_vga_decode 860 - * both set to NULL for the same @pdev as originally used to register them. 875 + * To unregister just call vga_client_unregister(). 861 876 * 862 877 * Returns: 0 on success, -1 on failure 863 878 */ 864 - int vga_client_register(struct pci_dev *pdev, void *cookie, 865 - void (*irq_set_state)(void *cookie, bool state), 866 - unsigned int (*set_vga_decode)(void *cookie, 867 - bool decode)) 879 + int vga_client_register(struct pci_dev *pdev, 880 + unsigned int (*set_decode)(struct pci_dev *pdev, bool decode)) 868 881 { 869 882 int ret = -ENODEV; 870 883 struct vga_device *vgadev; ··· 872 891 if (!vgadev) 873 892 goto bail; 874 893 875 - vgadev->irq_set_state = irq_set_state; 876 - vgadev->set_vga_decode = set_vga_decode; 877 - vgadev->cookie = cookie; 894 + vgadev->set_decode = set_decode; 878 895 ret = 0; 879 896 880 897 bail: ··· 1382 1403 new_state = false; 1383 1404 else 1384 1405 new_state = true; 1385 - if (vgadev->set_vga_decode) { 1386 - new_decodes = vgadev->set_vga_decode(vgadev->cookie, 1387 - new_state); 1406 + if (vgadev->set_decode) { 1407 + new_decodes = vgadev->set_decode(vgadev->pdev, 1408 + new_state); 1388 1409 vga_update_device_decodes(vgadev, new_decodes); 1389 1410 } 1390 1411 }
+5 -6
drivers/vfio/pci/vfio_pci.c
··· 119 119 * has no way to get to it and routing can be disabled externally at the 120 120 * bridge. 121 121 */ 122 - static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga) 122 + static unsigned int vfio_pci_set_decode(struct pci_dev *pdev, bool single_vga) 123 123 { 124 - struct vfio_pci_device *vdev = opaque; 125 - struct pci_dev *tmp = NULL, *pdev = vdev->pdev; 124 + struct pci_dev *tmp = NULL; 126 125 unsigned char max_busnr; 127 126 unsigned int decodes; 128 127 ··· 1953 1954 if (!vfio_pci_is_vga(pdev)) 1954 1955 return 0; 1955 1956 1956 - ret = vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode); 1957 + ret = vga_client_register(pdev, vfio_pci_set_decode); 1957 1958 if (ret) 1958 1959 return ret; 1959 - vga_set_legacy_decoding(pdev, vfio_pci_set_vga_decode(vdev, false)); 1960 + vga_set_legacy_decoding(pdev, vfio_pci_set_decode(pdev, false)); 1960 1961 return 0; 1961 1962 } 1962 1963 ··· 1966 1967 1967 1968 if (!vfio_pci_is_vga(pdev)) 1968 1969 return; 1969 - vga_client_register(pdev, NULL, NULL, NULL); 1970 + vga_client_unregister(pdev); 1970 1971 vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM | 1971 1972 VGA_RSRC_LEGACY_IO | 1972 1973 VGA_RSRC_LEGACY_MEM);
+1 -1
drivers/video/fbdev/arcfb.c
··· 446 446 /* modded from epson 1355 */ 447 447 448 448 unsigned long p; 449 - int err=-EINVAL; 449 + int err; 450 450 unsigned int fbmemlength,x,y,w,h, bitppos, startpos, endpos, bitcount; 451 451 struct arcfb_par *par; 452 452 unsigned int xres;
+3 -3
drivers/video/fbdev/core/fbmem.c
··· 67 67 mutex_lock(&registration_lock); 68 68 fb_info = registered_fb[idx]; 69 69 if (fb_info) 70 - atomic_inc(&fb_info->count); 70 + refcount_inc(&fb_info->count); 71 71 mutex_unlock(&registration_lock); 72 72 73 73 return fb_info; ··· 75 75 76 76 static void put_fb_info(struct fb_info *fb_info) 77 77 { 78 - if (!atomic_dec_and_test(&fb_info->count)) 78 + if (!refcount_dec_and_test(&fb_info->count)) 79 79 return; 80 80 if (fb_info->fbops->fb_destroy) 81 81 fb_info->fbops->fb_destroy(fb_info); ··· 1592 1592 if (!registered_fb[i]) 1593 1593 break; 1594 1594 fb_info->node = i; 1595 - atomic_set(&fb_info->count, 1); 1595 + refcount_set(&fb_info->count, 1); 1596 1596 mutex_init(&fb_info->lock); 1597 1597 mutex_init(&fb_info->mm_lock); 1598 1598
+5
drivers/video/fbdev/kyro/fbdev.c
··· 372 372 /* probably haven't called CreateOverlay yet */ 373 373 return -EINVAL; 374 374 375 + if (ulWidth == 0 || ulWidth == 0xffffffff || 376 + ulHeight == 0 || ulHeight == 0xffffffff || 377 + (x < 2 && ulWidth + 2 == 0)) 378 + return -EINVAL; 379 + 375 380 /* Stop Ramdac Output */ 376 381 DisableRamdacOutput(deviceInfo.pSTGReg); 377 382
+1 -1
drivers/video/fbdev/neofb.c
··· 585 585 586 586 DBG("neofb_check_var"); 587 587 588 - if (PICOS2KHZ(var->pixclock) > par->maxClock) 588 + if (var->pixclock && PICOS2KHZ(var->pixclock) > par->maxClock) 589 589 return -EINVAL; 590 590 591 591 /* Is the mode larger than the LCD panel? */
+1
include/drm/drm_auth.h
··· 107 107 }; 108 108 109 109 struct drm_master *drm_master_get(struct drm_master *master); 110 + struct drm_master *drm_file_get_master(struct drm_file *file_priv); 110 111 void drm_master_put(struct drm_master **master); 111 112 bool drm_is_current_master(struct drm_file *fpriv); 112 113
+15 -3
include/drm/drm_file.h
··· 226 226 /** 227 227 * @master: 228 228 * 229 - * Master this node is currently associated with. Only relevant if 230 - * drm_is_primary_client() returns true. Note that this only 231 - * matches &drm_device.master if the master is the currently active one. 229 + * Master this node is currently associated with. Protected by struct 230 + * &drm_device.master_mutex, and serialized by @master_lookup_lock. 231 + * 232 + * Only relevant if drm_is_primary_client() returns true. Note that 233 + * this only matches &drm_device.master if the master is the currently 234 + * active one. 235 + * 236 + * When dereferencing this pointer, either hold struct 237 + * &drm_device.master_mutex for the duration of the pointer's use, or 238 + * use drm_file_get_master() if struct &drm_device.master_mutex is not 239 + * currently held and there is no other need to hold it. This prevents 240 + * @master from being freed during use. 232 241 * 233 242 * See also @authentication and @is_master and the :ref:`section on 234 243 * primary nodes and authentication <drm_primary_node>`. 235 244 */ 236 245 struct drm_master *master; 246 + 247 + /** @master_lock: Serializes @master. */ 248 + spinlock_t master_lookup_lock; 237 249 238 250 /** @pid: Process that opened this file. */ 239 251 struct pid *pid;
+1 -1
include/drm/drm_print.h
··· 327 327 /* 328 328 * struct device based logging 329 329 * 330 - * Prefer drm_device based logging over device or prink based logging. 330 + * Prefer drm_device based logging over device or printk based logging. 331 331 */ 332 332 333 333 __printf(3, 4)
-17
include/linux/dma-buf.h
··· 444 444 struct dma_buf_sysfs_entry { 445 445 struct kobject kobj; 446 446 struct dma_buf *dmabuf; 447 - 448 - /** 449 - * @sysfs_entry.attachment_uid: 450 - * 451 - * This is protected by the dma_resv_lock() on @resv and is 452 - * incremented on each attach. 453 - */ 454 - unsigned int attachment_uid; 455 - struct kset *attach_stats_kset; 456 447 } *sysfs_entry; 457 448 #endif 458 449 }; ··· 495 504 * @importer_ops: importer operations for this attachment, if provided 496 505 * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held. 497 506 * @importer_priv: importer specific attachment data. 498 - * @sysfs_entry: For exposing information about this attachment in sysfs. 499 507 * 500 508 * This structure holds the attachment information between the dma_buf buffer 501 509 * and its user device(s). The list contains one attachment struct per device ··· 515 525 const struct dma_buf_attach_ops *importer_ops; 516 526 void *importer_priv; 517 527 void *priv; 518 - #ifdef CONFIG_DMABUF_SYSFS_STATS 519 - /* for sysfs stats */ 520 - struct dma_buf_attach_sysfs_entry { 521 - struct kobject kobj; 522 - unsigned int map_counter; 523 - } *sysfs_entry; 524 - #endif 525 528 }; 526 529 527 530 /**
+2 -1
include/linux/fb.h
··· 2 2 #ifndef _LINUX_FB_H 3 3 #define _LINUX_FB_H 4 4 5 + #include <linux/refcount.h> 5 6 #include <linux/kgdb.h> 6 7 #include <uapi/linux/fb.h> 7 8 ··· 436 435 437 436 438 437 struct fb_info { 439 - atomic_t count; 438 + refcount_t count; 440 439 int node; 441 440 int flags; 442 441 /*
+42 -76
include/linux/vgaarb.h
··· 33 33 34 34 #include <video/vga.h> 35 35 36 + struct pci_dev; 37 + 36 38 /* Legacy VGA regions */ 37 39 #define VGA_RSRC_NONE 0x00 38 40 #define VGA_RSRC_LEGACY_IO 0x01 ··· 44 42 #define VGA_RSRC_NORMAL_IO 0x04 45 43 #define VGA_RSRC_NORMAL_MEM 0x08 46 44 47 - /* Passing that instead of a pci_dev to use the system "default" 48 - * device, that is the one used by vgacon. Archs will probably 49 - * have to provide their own vga_default_device(); 50 - */ 51 - #define VGA_DEFAULT_DEVICE (NULL) 52 - 53 - struct pci_dev; 54 - 55 - /* For use by clients */ 56 - 57 - /** 58 - * vga_set_legacy_decoding 59 - * 60 - * @pdev: pci device of the VGA card 61 - * @decodes: bit mask of what legacy regions the card decodes 62 - * 63 - * Indicates to the arbiter if the card decodes legacy VGA IOs, 64 - * legacy VGA Memory, both, or none. All cards default to both, 65 - * the card driver (fbdev for example) should tell the arbiter 66 - * if it has disabled legacy decoding, so the card can be left 67 - * out of the arbitration process (and can be safe to take 68 - * interrupts at any time. 69 - */ 70 - #if defined(CONFIG_VGA_ARB) 71 - extern void vga_set_legacy_decoding(struct pci_dev *pdev, 72 - unsigned int decodes); 73 - #else 45 + #ifdef CONFIG_VGA_ARB 46 + void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes); 47 + int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible); 48 + void vga_put(struct pci_dev *pdev, unsigned int rsrc); 49 + struct pci_dev *vga_default_device(void); 50 + void vga_set_default_device(struct pci_dev *pdev); 51 + int vga_remove_vgacon(struct pci_dev *pdev); 52 + int vga_client_register(struct pci_dev *pdev, 53 + unsigned int (*set_decode)(struct pci_dev *pdev, bool state)); 54 + #else /* CONFIG_VGA_ARB */ 74 55 static inline void vga_set_legacy_decoding(struct pci_dev *pdev, 75 - unsigned int decodes) { }; 76 - #endif 77 - 78 - #if defined(CONFIG_VGA_ARB) 79 - extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible); 80 - #else 81 - static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; } 82 - #endif 56 + unsigned int decodes) 57 + { 58 + }; 59 + static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, 60 + int interruptible) 61 + { 62 + return 0; 63 + } 64 + static inline void vga_put(struct pci_dev *pdev, unsigned int rsrc) 65 + { 66 + } 67 + static inline struct pci_dev *vga_default_device(void) 68 + { 69 + return NULL; 70 + } 71 + static inline void vga_set_default_device(struct pci_dev *pdev) 72 + { 73 + } 74 + static inline int vga_remove_vgacon(struct pci_dev *pdev) 75 + { 76 + return 0; 77 + } 78 + static inline int vga_client_register(struct pci_dev *pdev, 79 + unsigned int (*set_decode)(struct pci_dev *pdev, bool state)) 80 + { 81 + return 0; 82 + } 83 + #endif /* CONFIG_VGA_ARB */ 83 84 84 85 /** 85 86 * vga_get_interruptible ··· 114 109 return vga_get(pdev, rsrc, 0); 115 110 } 116 111 117 - #if defined(CONFIG_VGA_ARB) 118 - extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); 119 - #else 120 - static inline void vga_put(struct pci_dev *pdev, unsigned int rsrc) 112 + static inline void vga_client_unregister(struct pci_dev *pdev) 121 113 { 114 + vga_client_register(pdev, NULL); 122 115 } 123 - #endif 124 - 125 - 126 - #ifdef CONFIG_VGA_ARB 127 - extern struct pci_dev *vga_default_device(void); 128 - extern void vga_set_default_device(struct pci_dev *pdev); 129 - extern int vga_remove_vgacon(struct pci_dev *pdev); 130 - #else 131 - static inline struct pci_dev *vga_default_device(void) { return NULL; } 132 - static inline void vga_set_default_device(struct pci_dev *pdev) { } 133 - static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; } 134 - #endif 135 - 136 - /* 137 - * Architectures should define this if they have several 138 - * independent PCI domains that can afford concurrent VGA 139 - * decoding 140 - */ 141 - #ifndef __ARCH_HAS_VGA_CONFLICT 142 - static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2) 143 - { 144 - return 1; 145 - } 146 - #endif 147 - 148 - #if defined(CONFIG_VGA_ARB) 149 - int vga_client_register(struct pci_dev *pdev, void *cookie, 150 - void (*irq_set_state)(void *cookie, bool state), 151 - unsigned int (*set_vga_decode)(void *cookie, bool state)); 152 - #else 153 - static inline int vga_client_register(struct pci_dev *pdev, void *cookie, 154 - void (*irq_set_state)(void *cookie, bool state), 155 - unsigned int (*set_vga_decode)(void *cookie, bool state)) 156 - { 157 - return 0; 158 - } 159 - #endif 160 116 161 117 #endif /* LINUX_VGA_H */
+136
include/uapi/drm/v3d_drm.h
··· 38 38 #define DRM_V3D_GET_BO_OFFSET 0x05 39 39 #define DRM_V3D_SUBMIT_TFU 0x06 40 40 #define DRM_V3D_SUBMIT_CSD 0x07 41 + #define DRM_V3D_PERFMON_CREATE 0x08 42 + #define DRM_V3D_PERFMON_DESTROY 0x09 43 + #define DRM_V3D_PERFMON_GET_VALUES 0x0a 41 44 42 45 #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) 43 46 #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) ··· 50 47 #define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset) 51 48 #define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu) 52 49 #define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd) 50 + #define DRM_IOCTL_V3D_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_CREATE, \ 51 + struct drm_v3d_perfmon_create) 52 + #define DRM_IOCTL_V3D_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_DESTROY, \ 53 + struct drm_v3d_perfmon_destroy) 54 + #define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \ 55 + struct drm_v3d_perfmon_get_values) 53 56 54 57 #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01 55 58 ··· 136 127 __u32 bo_handle_count; 137 128 138 129 __u32 flags; 130 + 131 + /* ID of the perfmon to attach to this job. 0 means no perfmon. */ 132 + __u32 perfmon_id; 133 + 134 + __u32 pad; 139 135 }; 140 136 141 137 /** ··· 209 195 DRM_V3D_PARAM_SUPPORTS_TFU, 210 196 DRM_V3D_PARAM_SUPPORTS_CSD, 211 197 DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH, 198 + DRM_V3D_PARAM_SUPPORTS_PERFMON, 212 199 }; 213 200 214 201 struct drm_v3d_get_param { ··· 273 258 __u32 in_sync; 274 259 /* Sync object to signal when the CSD job is done. */ 275 260 __u32 out_sync; 261 + 262 + /* ID of the perfmon to attach to this job. 0 means no perfmon. */ 263 + __u32 perfmon_id; 264 + }; 265 + 266 + enum { 267 + V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS, 268 + V3D_PERFCNT_FEP_VALID_PRIMS, 269 + V3D_PERFCNT_FEP_EZ_NFCLIP_QUADS, 270 + V3D_PERFCNT_FEP_VALID_QUADS, 271 + V3D_PERFCNT_TLB_QUADS_STENCIL_FAIL, 272 + V3D_PERFCNT_TLB_QUADS_STENCILZ_FAIL, 273 + V3D_PERFCNT_TLB_QUADS_STENCILZ_PASS, 274 + V3D_PERFCNT_TLB_QUADS_ZERO_COV, 275 + V3D_PERFCNT_TLB_QUADS_NONZERO_COV, 276 + V3D_PERFCNT_TLB_QUADS_WRITTEN, 277 + V3D_PERFCNT_PTB_PRIM_VIEWPOINT_DISCARD, 278 + V3D_PERFCNT_PTB_PRIM_CLIP, 279 + V3D_PERFCNT_PTB_PRIM_REV, 280 + V3D_PERFCNT_QPU_IDLE_CYCLES, 281 + V3D_PERFCNT_QPU_ACTIVE_CYCLES_VERTEX_COORD_USER, 282 + V3D_PERFCNT_QPU_ACTIVE_CYCLES_FRAG, 283 + V3D_PERFCNT_QPU_CYCLES_VALID_INSTR, 284 + V3D_PERFCNT_QPU_CYCLES_TMU_STALL, 285 + V3D_PERFCNT_QPU_CYCLES_SCOREBOARD_STALL, 286 + V3D_PERFCNT_QPU_CYCLES_VARYINGS_STALL, 287 + V3D_PERFCNT_QPU_IC_HIT, 288 + V3D_PERFCNT_QPU_IC_MISS, 289 + V3D_PERFCNT_QPU_UC_HIT, 290 + V3D_PERFCNT_QPU_UC_MISS, 291 + V3D_PERFCNT_TMU_TCACHE_ACCESS, 292 + V3D_PERFCNT_TMU_TCACHE_MISS, 293 + V3D_PERFCNT_VPM_VDW_STALL, 294 + V3D_PERFCNT_VPM_VCD_STALL, 295 + V3D_PERFCNT_BIN_ACTIVE, 296 + V3D_PERFCNT_RDR_ACTIVE, 297 + V3D_PERFCNT_L2T_HITS, 298 + V3D_PERFCNT_L2T_MISSES, 299 + V3D_PERFCNT_CYCLE_COUNT, 300 + V3D_PERFCNT_QPU_CYCLES_STALLED_VERTEX_COORD_USER, 301 + V3D_PERFCNT_QPU_CYCLES_STALLED_FRAGMENT, 302 + V3D_PERFCNT_PTB_PRIMS_BINNED, 303 + V3D_PERFCNT_AXI_WRITES_WATCH_0, 304 + V3D_PERFCNT_AXI_READS_WATCH_0, 305 + V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_0, 306 + V3D_PERFCNT_AXI_READ_STALLS_WATCH_0, 307 + V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_0, 308 + V3D_PERFCNT_AXI_READ_BYTES_WATCH_0, 309 + V3D_PERFCNT_AXI_WRITES_WATCH_1, 310 + V3D_PERFCNT_AXI_READS_WATCH_1, 311 + V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_1, 312 + V3D_PERFCNT_AXI_READ_STALLS_WATCH_1, 313 + V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_1, 314 + V3D_PERFCNT_AXI_READ_BYTES_WATCH_1, 315 + V3D_PERFCNT_TLB_PARTIAL_QUADS, 316 + V3D_PERFCNT_TMU_CONFIG_ACCESSES, 317 + V3D_PERFCNT_L2T_NO_ID_STALL, 318 + V3D_PERFCNT_L2T_COM_QUE_STALL, 319 + V3D_PERFCNT_L2T_TMU_WRITES, 320 + V3D_PERFCNT_TMU_ACTIVE_CYCLES, 321 + V3D_PERFCNT_TMU_STALLED_CYCLES, 322 + V3D_PERFCNT_CLE_ACTIVE, 323 + V3D_PERFCNT_L2T_TMU_READS, 324 + V3D_PERFCNT_L2T_CLE_READS, 325 + V3D_PERFCNT_L2T_VCD_READS, 326 + V3D_PERFCNT_L2T_TMUCFG_READS, 327 + V3D_PERFCNT_L2T_SLC0_READS, 328 + V3D_PERFCNT_L2T_SLC1_READS, 329 + V3D_PERFCNT_L2T_SLC2_READS, 330 + V3D_PERFCNT_L2T_TMU_W_MISSES, 331 + V3D_PERFCNT_L2T_TMU_R_MISSES, 332 + V3D_PERFCNT_L2T_CLE_MISSES, 333 + V3D_PERFCNT_L2T_VCD_MISSES, 334 + V3D_PERFCNT_L2T_TMUCFG_MISSES, 335 + V3D_PERFCNT_L2T_SLC0_MISSES, 336 + V3D_PERFCNT_L2T_SLC1_MISSES, 337 + V3D_PERFCNT_L2T_SLC2_MISSES, 338 + V3D_PERFCNT_CORE_MEM_WRITES, 339 + V3D_PERFCNT_L2T_MEM_WRITES, 340 + V3D_PERFCNT_PTB_MEM_WRITES, 341 + V3D_PERFCNT_TLB_MEM_WRITES, 342 + V3D_PERFCNT_CORE_MEM_READS, 343 + V3D_PERFCNT_L2T_MEM_READS, 344 + V3D_PERFCNT_PTB_MEM_READS, 345 + V3D_PERFCNT_PSE_MEM_READS, 346 + V3D_PERFCNT_TLB_MEM_READS, 347 + V3D_PERFCNT_GMP_MEM_READS, 348 + V3D_PERFCNT_PTB_W_MEM_WORDS, 349 + V3D_PERFCNT_TLB_W_MEM_WORDS, 350 + V3D_PERFCNT_PSE_R_MEM_WORDS, 351 + V3D_PERFCNT_TLB_R_MEM_WORDS, 352 + V3D_PERFCNT_TMU_MRU_HITS, 353 + V3D_PERFCNT_COMPUTE_ACTIVE, 354 + V3D_PERFCNT_NUM, 355 + }; 356 + 357 + #define DRM_V3D_MAX_PERF_COUNTERS 32 358 + 359 + struct drm_v3d_perfmon_create { 360 + __u32 id; 361 + __u32 ncounters; 362 + __u8 counters[DRM_V3D_MAX_PERF_COUNTERS]; 363 + }; 364 + 365 + struct drm_v3d_perfmon_destroy { 366 + __u32 id; 367 + }; 368 + 369 + /* 370 + * Returns the values of the performance counters tracked by this 371 + * perfmon (as an array of ncounters u64 values). 372 + * 373 + * No implicit synchronization is performed, so the user has to 374 + * guarantee that any jobs using this perfmon have already been 375 + * completed (probably by blocking on the seqno returned by the 376 + * last exec that used the perfmon). 377 + */ 378 + struct drm_v3d_perfmon_get_values { 379 + __u32 id; 380 + __u32 pad; 381 + __u64 values_ptr; 276 382 }; 277 383 278 384 #if defined(__cplusplus)