Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'spi-5.15' into spi-5.16

+2928 -1604
+1 -1
Documentation/admin-guide/README.rst
··· 259 259 Compiling the kernel 260 260 -------------------- 261 261 262 - - Make sure you have at least gcc 4.9 available. 262 + - Make sure you have at least gcc 5.1 available. 263 263 For more information, refer to :ref:`Documentation/process/changes.rst <changes>`. 264 264 265 265 Please note that you can still run a.out user programs with this kernel.
+1 -1
Documentation/devicetree/bindings/arm/tegra.yaml
··· 54 54 - const: toradex,apalis_t30 55 55 - const: nvidia,tegra30 56 56 - items: 57 - - const: toradex,apalis_t30-eval-v1.1 57 + - const: toradex,apalis_t30-v1.1-eval 58 58 - const: toradex,apalis_t30-eval 59 59 - const: toradex,apalis_t30-v1.1 60 60 - const: toradex,apalis_t30
+1 -1
Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
··· 9 9 10 10 All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node. 11 11 For a description of the MMSYS_CONFIG binding, see 12 - Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt. 12 + Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml. 13 13 14 14 DISP function blocks 15 15 ====================
+3 -1
Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
··· 19 19 - const: allwinner,sun8i-v3s-emac 20 20 - const: allwinner,sun50i-a64-emac 21 21 - items: 22 - - const: allwinner,sun50i-h6-emac 22 + - enum: 23 + - allwinner,sun20i-d1-emac 24 + - allwinner,sun50i-h6-emac 23 25 - const: allwinner,sun50i-a64-emac 24 26 25 27 reg:
+89
Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Samsung SoC series UFS host controller Device Tree Bindings 8 + 9 + maintainers: 10 + - Alim Akhtar <alim.akhtar@samsung.com> 11 + 12 + description: | 13 + Each Samsung UFS host controller instance should have its own node. 14 + This binding define Samsung specific binding other then what is used 15 + in the common ufshcd bindings 16 + [1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt 17 + 18 + properties: 19 + 20 + compatible: 21 + enum: 22 + - samsung,exynos7-ufs 23 + 24 + reg: 25 + items: 26 + - description: HCI register 27 + - description: vendor specific register 28 + - description: unipro register 29 + - description: UFS protector register 30 + 31 + reg-names: 32 + items: 33 + - const: hci 34 + - const: vs_hci 35 + - const: unipro 36 + - const: ufsp 37 + 38 + clocks: 39 + items: 40 + - description: ufs link core clock 41 + - description: unipro main clock 42 + 43 + clock-names: 44 + items: 45 + - const: core_clk 46 + - const: sclk_unipro_main 47 + 48 + interrupts: 49 + maxItems: 1 50 + 51 + phys: 52 + maxItems: 1 53 + 54 + phy-names: 55 + const: ufs-phy 56 + 57 + required: 58 + - compatible 59 + - reg 60 + - interrupts 61 + - phys 62 + - phy-names 63 + - clocks 64 + - clock-names 65 + 66 + additionalProperties: false 67 + 68 + examples: 69 + - | 70 + #include <dt-bindings/interrupt-controller/arm-gic.h> 71 + #include <dt-bindings/clock/exynos7-clk.h> 72 + 73 + ufs: ufs@15570000 { 74 + compatible = "samsung,exynos7-ufs"; 75 + reg = <0x15570000 0x100>, 76 + <0x15570100 0x100>, 77 + <0x15571000 0x200>, 78 + <0x15572000 0x300>; 79 + reg-names = "hci", "vs_hci", "unipro", "ufsp"; 80 + interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>; 81 + clocks = <&clock_fsys1 ACLK_UFS20_LINK>, 82 + <&clock_fsys1 SCLK_UFSUNIPRO20_USER>; 83 + clock-names = "core_clk", "sclk_unipro_main"; 84 + pinctrl-names = "default"; 85 + pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>; 86 + phys = <&ufs_phy>; 87 + phy-names = "ufs-phy"; 88 + }; 89 + ...
+1 -1
Documentation/networking/dsa/sja1105.rst
··· 296 296 Device Tree bindings and board design 297 297 ===================================== 298 298 299 - This section references ``Documentation/devicetree/bindings/net/dsa/sja1105.txt`` 299 + This section references ``Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml`` 300 300 and aims to showcase some potential switch caveats. 301 301 302 302 RMII PHY role and out-of-band signaling
+1 -1
Documentation/process/changes.rst
··· 29 29 ====================== =============== ======================================== 30 30 Program Minimal version Command to check the version 31 31 ====================== =============== ======================================== 32 - GNU C 4.9 gcc --version 32 + GNU C 5.1 gcc --version 33 33 Clang/LLVM (optional) 10.0.1 clang --version 34 34 GNU make 3.81 make --version 35 35 binutils 2.23 ld -v
+1 -1
Documentation/translations/zh_CN/admin-guide/README.rst
··· 223 223 编译内核 224 224 --------- 225 225 226 - - 确保您至少有gcc 4.9可用。 226 + - 确保您至少有gcc 5.1可用。 227 227 有关更多信息,请参阅 :ref:`Documentation/process/changes.rst <changes>` 。 228 228 229 229 请注意,您仍然可以使用此内核运行a.out用户程序。
+1 -1
Documentation/translations/zh_TW/admin-guide/README.rst
··· 226 226 編譯內核 227 227 --------- 228 228 229 - - 確保您至少有gcc 4.9可用。 229 + - 確保您至少有gcc 5.1可用。 230 230 有關更多信息,請參閱 :ref:`Documentation/process/changes.rst <changes>` 。 231 231 232 232 請注意,您仍然可以使用此內核運行a.out用戶程序。
+2 -1
MAINTAINERS
··· 14342 14342 F: drivers/pci/controller/pci-ixp4xx.c 14343 14343 14344 14344 PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD) 14345 - M: Jonathan Derrick <jonathan.derrick@intel.com> 14345 + M: Nirmal Patel <nirmal.patel@linux.intel.com> 14346 + R: Jonathan Derrick <jonathan.derrick@linux.dev> 14346 14347 L: linux-pci@vger.kernel.org 14347 14348 S: Supported 14348 14349 F: drivers/pci/controller/vmd.c
+1 -7
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 15 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc1 5 + EXTRAVERSION = -rc2 6 6 NAME = Opossums on Parade 7 7 8 8 # *DOCUMENTATION* ··· 848 848 endif 849 849 850 850 DEBUG_CFLAGS := 851 - 852 - # Workaround for GCC versions < 5.0 853 - # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801 854 - ifdef CONFIG_CC_IS_GCC 855 - DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments)) 856 - endif 857 851 858 852 ifdef CONFIG_DEBUG_INFO 859 853
+1 -2
arch/alpha/Kconfig
··· 20 20 select NEED_SG_DMA_LENGTH 21 21 select VIRT_TO_BUS 22 22 select GENERIC_IRQ_PROBE 23 - select GENERIC_PCI_IOMAP if PCI 23 + select GENERIC_PCI_IOMAP 24 24 select AUTO_IRQ_AFFINITY if SMP 25 25 select GENERIC_IRQ_SHOW 26 26 select ARCH_WANT_IPC_PARSE_VERSION ··· 199 199 200 200 config ALPHA_JENSEN 201 201 bool "Jensen" 202 - depends on BROKEN 203 202 select HAVE_EISA 204 203 help 205 204 DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
+1
arch/alpha/include/asm/asm-prototypes.h
··· 16 16 extern void __remlu(void); 17 17 extern void __divqu(void); 18 18 extern void __remqu(void); 19 + extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);
+3 -3
arch/alpha/include/asm/io.h
··· 60 60 * Change virtual addresses to physical addresses and vv. 61 61 */ 62 62 #ifdef USE_48_BIT_KSEG 63 - static inline unsigned long virt_to_phys(void *address) 63 + static inline unsigned long virt_to_phys(volatile void *address) 64 64 { 65 65 return (unsigned long)address - IDENT_ADDR; 66 66 } ··· 70 70 return (void *) (address + IDENT_ADDR); 71 71 } 72 72 #else 73 - static inline unsigned long virt_to_phys(void *address) 73 + static inline unsigned long virt_to_phys(volatile void *address) 74 74 { 75 75 unsigned long phys = (unsigned long)address; 76 76 ··· 106 106 extern unsigned long __direct_map_base; 107 107 extern unsigned long __direct_map_size; 108 108 109 - static inline unsigned long __deprecated virt_to_bus(void *address) 109 + static inline unsigned long __deprecated virt_to_bus(volatile void *address) 110 110 { 111 111 unsigned long phys = virt_to_phys(address); 112 112 unsigned long bus = phys + __direct_map_base;
+4 -4
arch/alpha/include/asm/jensen.h
··· 111 111 * convinced that I need one of the newer machines. 112 112 */ 113 113 114 - static inline unsigned int jensen_local_inb(unsigned long addr) 114 + __EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr) 115 115 { 116 116 return 0xff & *(vuip)((addr << 9) + EISA_VL82C106); 117 117 } 118 118 119 - static inline void jensen_local_outb(u8 b, unsigned long addr) 119 + __EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr) 120 120 { 121 121 *(vuip)((addr << 9) + EISA_VL82C106) = b; 122 122 mb(); 123 123 } 124 124 125 - static inline unsigned int jensen_bus_inb(unsigned long addr) 125 + __EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr) 126 126 { 127 127 long result; 128 128 ··· 131 131 return __kernel_extbl(result, addr & 3); 132 132 } 133 133 134 - static inline void jensen_bus_outb(u8 b, unsigned long addr) 134 + __EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr) 135 135 { 136 136 jensen_set_hae(0); 137 137 *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
+43
arch/alpha/include/asm/setup.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + #ifndef __ALPHA_SETUP_H 3 + #define __ALPHA_SETUP_H 4 + 5 + #include <uapi/asm/setup.h> 6 + 7 + /* 8 + * We leave one page for the initial stack page, and one page for 9 + * the initial process structure. Also, the console eats 3 MB for 10 + * the initial bootloader (one of which we can reclaim later). 11 + */ 12 + #define BOOT_PCB 0x20000000 13 + #define BOOT_ADDR 0x20000000 14 + /* Remove when official MILO sources have ELF support: */ 15 + #define BOOT_SIZE (16*1024) 16 + 17 + #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS 18 + #define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ 19 + #else 20 + #define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ 21 + #endif 22 + 23 + #define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) 24 + #define SWAPPER_PGD KERNEL_START 25 + #define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) 26 + #define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) 27 + #define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) 28 + #define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) 29 + 30 + #define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) 31 + 32 + /* 33 + * This is setup by the secondary bootstrap loader. Because 34 + * the zero page is zeroed out as soon as the vm system is 35 + * initialized, we need to copy things out into a more permanent 36 + * place. 37 + */ 38 + #define PARAM ZERO_PGE 39 + #define COMMAND_LINE ((char *)(absolute_pointer(PARAM + 0x0000))) 40 + #define INITRD_START (*(unsigned long *) (PARAM+0x100)) 41 + #define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) 42 + 43 + #endif
+3 -39
arch/alpha/include/uapi/asm/setup.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 - #ifndef __ALPHA_SETUP_H 3 - #define __ALPHA_SETUP_H 2 + #ifndef _UAPI__ALPHA_SETUP_H 3 + #define _UAPI__ALPHA_SETUP_H 4 4 5 5 #define COMMAND_LINE_SIZE 256 6 6 7 - /* 8 - * We leave one page for the initial stack page, and one page for 9 - * the initial process structure. Also, the console eats 3 MB for 10 - * the initial bootloader (one of which we can reclaim later). 11 - */ 12 - #define BOOT_PCB 0x20000000 13 - #define BOOT_ADDR 0x20000000 14 - /* Remove when official MILO sources have ELF support: */ 15 - #define BOOT_SIZE (16*1024) 16 - 17 - #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS 18 - #define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ 19 - #else 20 - #define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ 21 - #endif 22 - 23 - #define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) 24 - #define SWAPPER_PGD KERNEL_START 25 - #define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) 26 - #define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) 27 - #define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) 28 - #define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) 29 - 30 - #define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) 31 - 32 - /* 33 - * This is setup by the secondary bootstrap loader. Because 34 - * the zero page is zeroed out as soon as the vm system is 35 - * initialized, we need to copy things out into a more permanent 36 - * place. 37 - */ 38 - #define PARAM ZERO_PGE 39 - #define COMMAND_LINE ((char*)(PARAM + 0x0000)) 40 - #define INITRD_START (*(unsigned long *) (PARAM+0x100)) 41 - #define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) 42 - 43 - #endif 7 + #endif /* _UAPI__ALPHA_SETUP_H */
+5 -5
arch/alpha/kernel/sys_jensen.c
··· 7 7 * 8 8 * Code supporting the Jensen. 9 9 */ 10 + #define __EXTERN_INLINE 11 + #include <asm/io.h> 12 + #include <asm/jensen.h> 13 + #undef __EXTERN_INLINE 14 + 10 15 #include <linux/interrupt.h> 11 16 #include <linux/kernel.h> 12 17 #include <linux/types.h> ··· 21 16 #include <linux/init.h> 22 17 23 18 #include <asm/ptrace.h> 24 - 25 - #define __EXTERN_INLINE inline 26 - #include <asm/io.h> 27 - #include <asm/jensen.h> 28 - #undef __EXTERN_INLINE 29 19 30 20 #include <asm/dma.h> 31 21 #include <asm/irq.h>
+1
arch/alpha/lib/Makefile
··· 14 14 ev67-$(CONFIG_ALPHA_EV67) := ev67- 15 15 16 16 lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ 17 + udiv-qrnnd.o \ 17 18 udelay.o \ 18 19 $(ev6-y)memset.o \ 19 20 $(ev6-y)memcpy.o \
+1 -1
arch/alpha/math-emu/Makefile
··· 7 7 8 8 obj-$(CONFIG_MATHEMU) += math-emu.o 9 9 10 - math-emu-objs := math.o qrnnd.o 10 + math-emu-objs := math.o
-2
arch/alpha/math-emu/math.c
··· 403 403 egress: 404 404 return si_code; 405 405 } 406 - 407 - EXPORT_SYMBOL(__udiv_qrnnd);
+2
arch/alpha/math-emu/qrnnd.S arch/alpha/lib/udiv-qrnnd.S
··· 25 25 # along with GCC; see the file COPYING. If not, write to the 26 26 # Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, 27 27 # MA 02111-1307, USA. 28 + #include <asm/export.h> 28 29 29 30 .set noreorder 30 31 .set noat ··· 162 161 ret $31,($26),1 163 162 164 163 .end __udiv_qrnnd 164 + EXPORT_SYMBOL(__udiv_qrnnd)
+1 -1
arch/arm64/Kconfig
··· 86 86 select ARCH_SUPPORTS_LTO_CLANG_THIN 87 87 select ARCH_SUPPORTS_CFI_CLANG 88 88 select ARCH_SUPPORTS_ATOMIC_RMW 89 - select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG) 89 + select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 90 90 select ARCH_SUPPORTS_NUMA_BALANCING 91 91 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT 92 92 select ARCH_WANT_DEFAULT_BPF_JIT
+1 -1
arch/arm64/kernel/fpsimd.c
··· 513 513 void sve_alloc(struct task_struct *task) 514 514 { 515 515 if (task->thread.sve_state) { 516 - memset(task->thread.sve_state, 0, sve_state_size(current)); 516 + memset(task->thread.sve_state, 0, sve_state_size(task)); 517 517 return; 518 518 } 519 519
+1 -2
arch/arm64/kernel/process.c
··· 18 18 #include <linux/mman.h> 19 19 #include <linux/mm.h> 20 20 #include <linux/nospec.h> 21 - #include <linux/sched.h> 22 21 #include <linux/stddef.h> 23 22 #include <linux/sysctl.h> 24 23 #include <linux/unistd.h> ··· 57 58 58 59 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) 59 60 #include <linux/stackprotector.h> 60 - unsigned long __stack_chk_guard __read_mostly; 61 + unsigned long __stack_chk_guard __ro_after_init; 61 62 EXPORT_SYMBOL(__stack_chk_guard); 62 63 #endif 63 64
+10 -10
arch/m68k/include/asm/raw_io.h
··· 17 17 * two accesses to memory, which may be undesirable for some devices. 18 18 */ 19 19 #define in_8(addr) \ 20 - ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; }) 20 + ({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; }) 21 21 #define in_be16(addr) \ 22 - ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; }) 22 + ({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; }) 23 23 #define in_be32(addr) \ 24 - ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; }) 24 + ({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; }) 25 25 #define in_le16(addr) \ 26 - ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; }) 26 + ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; }) 27 27 #define in_le32(addr) \ 28 - ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; }) 28 + ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; }) 29 29 30 - #define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b)) 31 - #define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w)) 32 - #define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l)) 33 - #define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w)) 34 - #define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l)) 30 + #define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b)) 31 + #define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w)) 32 + #define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l)) 33 + #define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w)) 34 + #define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l)) 35 35 36 36 #define raw_inb in_8 37 37 #define raw_inw in_be16
+3 -1
arch/m68k/mvme147/config.c
··· 171 171 172 172 int mvme147_hwclk(int op, struct rtc_time *t) 173 173 { 174 - #warning check me! 175 174 if (!op) { 176 175 m147_rtc->ctrl = RTC_READ; 177 176 t->tm_year = bcd2int (m147_rtc->bcd_year); ··· 182 183 m147_rtc->ctrl = 0; 183 184 if (t->tm_year < 70) 184 185 t->tm_year += 100; 186 + } else { 187 + /* FIXME Setting the time is not yet supported */ 188 + return -EOPNOTSUPP; 185 189 } 186 190 return 0; 187 191 }
+3 -1
arch/m68k/mvme16x/config.c
··· 436 436 437 437 int mvme16x_hwclk(int op, struct rtc_time *t) 438 438 { 439 - #warning check me! 440 439 if (!op) { 441 440 rtc->ctrl = RTC_READ; 442 441 t->tm_year = bcd2int (rtc->bcd_year); ··· 447 448 rtc->ctrl = 0; 448 449 if (t->tm_year < 70) 449 450 t->tm_year += 100; 451 + } else { 452 + /* FIXME Setting the time is not yet supported */ 453 + return -EOPNOTSUPP; 450 454 } 451 455 return 0; 452 456 }
+1 -1
arch/parisc/include/asm/page.h
··· 184 184 #include <asm-generic/getorder.h> 185 185 #include <asm/pdc.h> 186 186 187 - #define PAGE0 ((struct zeropage *)__PAGE_OFFSET) 187 + #define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET)) 188 188 189 189 /* DEFINITION OF THE ZERO-PAGE (PAG0) */ 190 190 /* based on work by Jason Eckhardt (jason@equator.com) */
+3 -1
arch/parisc/lib/iomap.c
··· 513 513 } 514 514 } 515 515 516 + #ifdef CONFIG_PCI 516 517 void pci_iounmap(struct pci_dev *dev, void __iomem * addr) 517 518 { 518 519 if (!INDIRECT_ADDR(addr)) { 519 520 iounmap(addr); 520 521 } 521 522 } 523 + EXPORT_SYMBOL(pci_iounmap); 524 + #endif 522 525 523 526 EXPORT_SYMBOL(ioread8); 524 527 EXPORT_SYMBOL(ioread16); ··· 547 544 EXPORT_SYMBOL(iowrite32_rep); 548 545 EXPORT_SYMBOL(ioport_map); 549 546 EXPORT_SYMBOL(ioport_unmap); 550 - EXPORT_SYMBOL(pci_iounmap);
+1 -1
arch/powerpc/boot/Makefile
··· 35 35 BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ 36 36 -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \ 37 37 -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ 38 - -include $(srctree)/include/linux/compiler_attributes.h \ 39 38 $(LINUXINCLUDE) 40 39 41 40 ifdef CONFIG_PPC64_BOOT_WRAPPER ··· 69 70 BOOTCFLAGS += -fno-stack-protector 70 71 endif 71 72 73 + BOOTCFLAGS += -include $(srctree)/include/linux/compiler_attributes.h 72 74 BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj) 73 75 74 76 DTC_FLAGS ?= -p 1024
-10
arch/powerpc/include/asm/asm-const.h
··· 12 12 # define ASM_CONST(x) __ASM_CONST(x) 13 13 #endif 14 14 15 - /* 16 - * Inline assembly memory constraint 17 - * 18 - * GCC 4.9 doesn't properly handle pre update memory constraint "m<>" 19 - * 20 - */ 21 - #if defined(GCC_VERSION) && GCC_VERSION < 50000 22 - #define UPD_CONSTR "" 23 - #else 24 15 #define UPD_CONSTR "<>" 25 - #endif 26 16 27 17 #endif /* _ASM_POWERPC_ASM_CONST_H */
+43
arch/powerpc/kernel/interrupt.c
··· 18 18 #include <asm/switch_to.h> 19 19 #include <asm/syscall.h> 20 20 #include <asm/time.h> 21 + #include <asm/tm.h> 21 22 #include <asm/unistd.h> 22 23 23 24 #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32) ··· 136 135 * returns to user with IRQS_ENABLED, this store could be avoided! 137 136 */ 138 137 irq_soft_mask_regs_set_state(regs, IRQS_ENABLED); 138 + 139 + /* 140 + * If system call is called with TM active, set _TIF_RESTOREALL to 141 + * prevent RFSCV being used to return to userspace, because POWER9 142 + * TM implementation has problems with this instruction returning to 143 + * transactional state. Final register values are not relevant because 144 + * the transaction will be aborted upon return anyway. Or in the case 145 + * of unsupported_scv SIGILL fault, the return state does not much 146 + * matter because it's an edge case. 147 + */ 148 + if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && 149 + unlikely(MSR_TM_TRANSACTIONAL(regs->msr))) 150 + current_thread_info()->flags |= _TIF_RESTOREALL; 151 + 152 + /* 153 + * If the system call was made with a transaction active, doom it and 154 + * return without performing the system call. Unless it was an 155 + * unsupported scv vector, in which case it's treated like an illegal 156 + * instruction. 157 + */ 158 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 159 + if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) && 160 + !trap_is_unsupported_scv(regs)) { 161 + /* Enable TM in the kernel, and disable EE (for scv) */ 162 + hard_irq_disable(); 163 + mtmsr(mfmsr() | MSR_TM); 164 + 165 + /* tabort, this dooms the transaction, nothing else */ 166 + asm volatile(".long 0x7c00071d | ((%0) << 16)" 167 + :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)); 168 + 169 + /* 170 + * Userspace will never see the return value. Execution will 171 + * resume after the tbegin. of the aborted transaction with the 172 + * checkpointed register state. A context switch could occur 173 + * or signal delivered to the process before resuming the 174 + * doomed transaction context, but that should all be handled 175 + * as expected. 176 + */ 177 + return -ENOSYS; 178 + } 179 + #endif // CONFIG_PPC_TRANSACTIONAL_MEM 139 180 140 181 local_irq_enable(); 141 182
-41
arch/powerpc/kernel/interrupt_64.S
··· 12 12 #include <asm/mmu.h> 13 13 #include <asm/ppc_asm.h> 14 14 #include <asm/ptrace.h> 15 - #include <asm/tm.h> 16 15 17 16 .section ".toc","aw" 18 17 SYS_CALL_TABLE: ··· 54 55 .globl system_call_vectored_\name 55 56 system_call_vectored_\name: 56 57 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) 57 - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 58 - BEGIN_FTR_SECTION 59 - extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ 60 - bne tabort_syscall 61 - END_FTR_SECTION_IFSET(CPU_FTR_TM) 62 - #endif 63 58 SCV_INTERRUPT_TO_KERNEL 64 59 mr r10,r1 65 60 ld r1,PACAKSAVE(r13) ··· 240 247 .globl system_call_common 241 248 system_call_common: 242 249 _ASM_NOKPROBE_SYMBOL(system_call_common) 243 - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 244 - BEGIN_FTR_SECTION 245 - extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ 246 - bne tabort_syscall 247 - END_FTR_SECTION_IFSET(CPU_FTR_TM) 248 - #endif 249 250 mr r10,r1 250 251 ld r1,PACAKSAVE(r13) 251 252 std r10,0(r1) ··· 410 423 411 424 SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b) 412 425 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart) 413 - #endif 414 - 415 - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 416 - tabort_syscall: 417 - _ASM_NOKPROBE_SYMBOL(tabort_syscall) 418 - /* Firstly we need to enable TM in the kernel */ 419 - mfmsr r10 420 - li r9, 1 421 - rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG 422 - mtmsrd r10, 0 423 - 424 - /* tabort, this dooms the transaction, nothing else */ 425 - li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT) 426 - TABORT(R9) 427 - 428 - /* 429 - * Return directly to userspace. We have corrupted user register state, 430 - * but userspace will never see that register state. Execution will 431 - * resume after the tbegin of the aborted transaction with the 432 - * checkpointed register state. 433 - */ 434 - li r9, MSR_RI 435 - andc r10, r10, r9 436 - mtmsrd r10, 1 437 - mtspr SPRN_SRR0, r11 438 - mtspr SPRN_SRR1, r12 439 - RFI_TO_USER 440 - b . /* prevent speculative execution */ 441 426 #endif 442 427 443 428 /*
+15 -2
arch/powerpc/kernel/mce.c
··· 249 249 { 250 250 int index; 251 251 struct machine_check_event evt; 252 + unsigned long msr; 252 253 253 254 if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 254 255 return; ··· 263 262 memcpy(&local_paca->mce_info->mce_event_queue[index], 264 263 &evt, sizeof(evt)); 265 264 266 - /* Queue irq work to process this event later. */ 267 - irq_work_queue(&mce_event_process_work); 265 + /* 266 + * Queue irq work to process this event later. Before 267 + * queuing the work enable translation for non radix LPAR, 268 + * as irq_work_queue may try to access memory outside RMO 269 + * region. 270 + */ 271 + if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) { 272 + msr = mfmsr(); 273 + mtmsr(msr | MSR_IR | MSR_DR); 274 + irq_work_queue(&mce_event_process_work); 275 + mtmsr(msr); 276 + } else { 277 + irq_work_queue(&mce_event_process_work); 278 + } 268 279 } 269 280 270 281 void mce_common_process_ue(struct pt_regs *regs,
+34 -2
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 2536 2536 /* The following code handles the fake_suspend = 1 case */ 2537 2537 mflr r0 2538 2538 std r0, PPC_LR_STKOFF(r1) 2539 - stdu r1, -PPC_MIN_STKFRM(r1) 2539 + stdu r1, -TM_FRAME_SIZE(r1) 2540 2540 2541 2541 /* Turn on TM. */ 2542 2542 mfmsr r8 ··· 2551 2551 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 2552 2552 nop 2553 2553 2554 + /* 2555 + * It's possible that treclaim. may modify registers, if we have lost 2556 + * track of fake-suspend state in the guest due to it using rfscv. 2557 + * Save and restore registers in case this occurs. 2558 + */ 2559 + mfspr r3, SPRN_DSCR 2560 + mfspr r4, SPRN_XER 2561 + mfspr r5, SPRN_AMR 2562 + /* SPRN_TAR would need to be saved here if the kernel ever used it */ 2563 + mfcr r12 2564 + SAVE_NVGPRS(r1) 2565 + SAVE_GPR(2, r1) 2566 + SAVE_GPR(3, r1) 2567 + SAVE_GPR(4, r1) 2568 + SAVE_GPR(5, r1) 2569 + stw r12, 8(r1) 2570 + std r1, HSTATE_HOST_R1(r13) 2571 + 2554 2572 /* We have to treclaim here because that's the only way to do S->N */ 2555 2573 li r3, TM_CAUSE_KVM_RESCHED 2556 2574 TRECLAIM(R3) 2575 + 2576 + GET_PACA(r13) 2577 + ld r1, HSTATE_HOST_R1(r13) 2578 + REST_GPR(2, r1) 2579 + REST_GPR(3, r1) 2580 + REST_GPR(4, r1) 2581 + REST_GPR(5, r1) 2582 + lwz r12, 8(r1) 2583 + REST_NVGPRS(r1) 2584 + mtspr SPRN_DSCR, r3 2585 + mtspr SPRN_XER, r4 2586 + mtspr SPRN_AMR, r5 2587 + mtcr r12 2588 + HMT_MEDIUM 2557 2589 2558 2590 /* 2559 2591 * We were in fake suspend, so we are not going to save the ··· 2614 2582 std r5, VCPU_TFHAR(r9) 2615 2583 std r6, VCPU_TFIAR(r9) 2616 2584 2617 - addi r1, r1, PPC_MIN_STKFRM 2585 + addi r1, r1, TM_FRAME_SIZE 2618 2586 ld r0, PPC_LR_STKOFF(r1) 2619 2587 mtlr r0 2620 2588 blr
+2 -2
arch/powerpc/sysdev/xics/xics-common.c
··· 348 348 if (xics_ics->check(xics_ics, hwirq)) 349 349 return -EINVAL; 350 350 351 - /* No chip data for the XICS domain */ 351 + /* Let the ICS be the chip data for the XICS domain. For ICS native */ 352 352 irq_domain_set_info(domain, virq, hwirq, xics_ics->chip, 353 - NULL, handle_fasteoi_irq, NULL, NULL); 353 + xics_ics, handle_fasteoi_irq, NULL, NULL); 354 354 355 355 return 0; 356 356 }
+1 -1
arch/riscv/Kconfig
··· 236 236 config ARCH_RV64I 237 237 bool "RV64I" 238 238 select 64BIT 239 - select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000 239 + select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 240 240 select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8) 241 241 select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE 242 242 select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
-10
arch/s390/Kconfig
··· 685 685 The minimum size for the stack guard should be 256 for 31 bit and 686 686 512 for 64 bit. 687 687 688 - config WARN_DYNAMIC_STACK 689 - def_bool n 690 - prompt "Emit compiler warnings for function with dynamic stack usage" 691 - help 692 - This option enables the compiler option -mwarn-dynamicstack. If the 693 - compiler supports this options generates warnings for functions 694 - that dynamically allocate stack space using alloca. 695 - 696 - Say N if you are unsure. 697 - 698 688 endmenu 699 689 700 690 menu "I/O subsystem"
-7
arch/s390/Makefile
··· 85 85 endif 86 86 endif 87 87 88 - ifdef CONFIG_WARN_DYNAMIC_STACK 89 - ifneq ($(call cc-option,-mwarn-dynamicstack),) 90 - KBUILD_CFLAGS += -mwarn-dynamicstack 91 - KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack 92 - endif 93 - endif 94 - 95 88 ifdef CONFIG_EXPOLINE 96 89 ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),) 97 90 CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
+5 -3
arch/s390/configs/debug_defconfig
··· 10 10 CONFIG_BPF_JIT_ALWAYS_ON=y 11 11 CONFIG_BPF_LSM=y 12 12 CONFIG_PREEMPT=y 13 + CONFIG_SCHED_CORE=y 13 14 CONFIG_BSD_PROCESS_ACCT=y 14 15 CONFIG_BSD_PROCESS_ACCT_V3=y 15 16 CONFIG_TASKSTATS=y ··· 504 503 # CONFIG_NET_VENDOR_HUAWEI is not set 505 504 # CONFIG_NET_VENDOR_INTEL is not set 506 505 # CONFIG_NET_VENDOR_MICROSOFT is not set 506 + # CONFIG_NET_VENDOR_LITEX is not set 507 507 # CONFIG_NET_VENDOR_MARVELL is not set 508 508 CONFIG_MLX4_EN=m 509 509 CONFIG_MLX5_CORE=m ··· 663 661 CONFIG_NFSD_V4=y 664 662 CONFIG_NFSD_V4_SECURITY_LABEL=y 665 663 CONFIG_CIFS=m 666 - CONFIG_CIFS_WEAK_PW_HASH=y 667 664 CONFIG_CIFS_UPCALL=y 668 665 CONFIG_CIFS_XATTR=y 669 666 CONFIG_CIFS_POSIX=y ··· 721 720 CONFIG_CRYPTO_VMAC=m 722 721 CONFIG_CRYPTO_CRC32=m 723 722 CONFIG_CRYPTO_BLAKE2S=m 723 + CONFIG_CRYPTO_MD4=m 724 + CONFIG_CRYPTO_MD5=y 724 725 CONFIG_CRYPTO_MICHAEL_MIC=m 725 726 CONFIG_CRYPTO_RMD160=m 726 727 CONFIG_CRYPTO_SHA3=m ··· 777 774 CONFIG_DMA_CMA=y 778 775 CONFIG_CMA_SIZE_MBYTES=0 779 776 CONFIG_DMA_API_DEBUG=y 780 - CONFIG_STRING_SELFTEST=y 781 777 CONFIG_PRINTK_TIME=y 782 778 CONFIG_DYNAMIC_DEBUG=y 783 779 CONFIG_DEBUG_INFO=y ··· 855 853 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y 856 854 CONFIG_LKDTM=m 857 855 CONFIG_TEST_MIN_HEAP=y 858 - CONFIG_TEST_SORT=y 859 856 CONFIG_KPROBES_SANITY_TEST=y 860 857 CONFIG_RBTREE_TEST=y 861 858 CONFIG_INTERVAL_TREE_TEST=m 862 859 CONFIG_PERCPU_TEST=m 863 860 CONFIG_ATOMIC64_SELFTEST=y 861 + CONFIG_STRING_SELFTEST=y 864 862 CONFIG_TEST_BITOPS=m 865 863 CONFIG_TEST_BPF=m 866 864 CONFIG_TEST_LIVEPATCH=m
+4 -1
arch/s390/configs/defconfig
··· 8 8 CONFIG_BPF_JIT=y 9 9 CONFIG_BPF_JIT_ALWAYS_ON=y 10 10 CONFIG_BPF_LSM=y 11 + CONFIG_SCHED_CORE=y 11 12 CONFIG_BSD_PROCESS_ACCT=y 12 13 CONFIG_BSD_PROCESS_ACCT_V3=y 13 14 CONFIG_TASKSTATS=y ··· 495 494 # CONFIG_NET_VENDOR_HUAWEI is not set 496 495 # CONFIG_NET_VENDOR_INTEL is not set 497 496 # CONFIG_NET_VENDOR_MICROSOFT is not set 497 + # CONFIG_NET_VENDOR_LITEX is not set 498 498 # CONFIG_NET_VENDOR_MARVELL is not set 499 499 CONFIG_MLX4_EN=m 500 500 CONFIG_MLX5_CORE=m ··· 650 648 CONFIG_NFSD_V4=y 651 649 CONFIG_NFSD_V4_SECURITY_LABEL=y 652 650 CONFIG_CIFS=m 653 - CONFIG_CIFS_WEAK_PW_HASH=y 654 651 CONFIG_CIFS_UPCALL=y 655 652 CONFIG_CIFS_XATTR=y 656 653 CONFIG_CIFS_POSIX=y ··· 709 708 CONFIG_CRYPTO_VMAC=m 710 709 CONFIG_CRYPTO_CRC32=m 711 710 CONFIG_CRYPTO_BLAKE2S=m 711 + CONFIG_CRYPTO_MD4=m 712 + CONFIG_CRYPTO_MD5=y 712 713 CONFIG_CRYPTO_MICHAEL_MIC=m 713 714 CONFIG_CRYPTO_RMD160=m 714 715 CONFIG_CRYPTO_SHA3=m
+2 -2
arch/s390/pci/pci_mmio.c
··· 159 159 160 160 mmap_read_lock(current->mm); 161 161 ret = -EINVAL; 162 - vma = find_vma(current->mm, mmio_addr); 162 + vma = vma_lookup(current->mm, mmio_addr); 163 163 if (!vma) 164 164 goto out_unlock_mmap; 165 165 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) ··· 298 298 299 299 mmap_read_lock(current->mm); 300 300 ret = -EINVAL; 301 - vma = find_vma(current->mm, mmio_addr); 301 + vma = vma_lookup(current->mm, mmio_addr); 302 302 if (!vma) 303 303 goto out_unlock_mmap; 304 304 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+8 -8
arch/sh/boot/Makefile
··· 80 80 $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE 81 81 $(call if_changed,lzo) 82 82 83 - $(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 83 + $(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE 84 84 $(call if_changed,uimage,bzip2) 85 85 86 - $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz 86 + $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE 87 87 $(call if_changed,uimage,gzip) 88 88 89 - $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma 89 + $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE 90 90 $(call if_changed,uimage,lzma) 91 91 92 - $(obj)/uImage.xz: $(obj)/vmlinux.bin.xz 92 + $(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE 93 93 $(call if_changed,uimage,xz) 94 94 95 - $(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo 95 + $(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE 96 96 $(call if_changed,uimage,lzo) 97 97 98 - $(obj)/uImage.bin: $(obj)/vmlinux.bin 98 + $(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE 99 99 $(call if_changed,uimage,none) 100 100 101 101 OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec 102 - $(obj)/vmlinux.srec: $(obj)/compressed/vmlinux 102 + $(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE 103 103 $(call if_changed,objcopy) 104 104 105 105 OBJCOPYFLAGS_uImage.srec := -I binary -O srec 106 - $(obj)/uImage.srec: $(obj)/uImage 106 + $(obj)/uImage.srec: $(obj)/uImage FORCE 107 107 $(call if_changed,objcopy) 108 108 109 109 $(obj)/uImage: $(obj)/uImage.$(suffix-y)
+3 -1
arch/sparc/kernel/ioport.c
··· 356 356 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 357 357 dma_addr_t dma_addr, unsigned long attrs) 358 358 { 359 - if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size))) 359 + size = PAGE_ALIGN(size); 360 + 361 + if (!sparc_dma_free_resource(cpu_addr, size)) 360 362 return; 361 363 362 364 dma_make_coherent(dma_addr, size);
+2 -1
arch/sparc/kernel/mdesc.c
··· 39 39 u32 node_sz; /* node block size */ 40 40 u32 name_sz; /* name block size */ 41 41 u32 data_sz; /* data block size */ 42 + char data[]; 42 43 } __attribute__((aligned(16))); 43 44 44 45 struct mdesc_elem { ··· 613 612 614 613 static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) 615 614 { 616 - return (struct mdesc_elem *) (mdesc + 1); 615 + return (struct mdesc_elem *) mdesc->data; 617 616 } 618 617 619 618 static void *name_block(struct mdesc_hdr *mdesc)
+5
arch/x86/Kconfig
··· 339 339 config ARCH_HIBERNATION_POSSIBLE 340 340 def_bool y 341 341 342 + config ARCH_NR_GPIO 343 + int 344 + default 1024 if X86_64 345 + default 512 346 + 342 347 config ARCH_SUSPEND_POSSIBLE 343 348 def_bool y 344 349
+9 -3
arch/x86/Makefile_32.cpu
··· 4 4 5 5 tune = $(call cc-option,-mtune=$(1),$(2)) 6 6 7 + ifdef CONFIG_CC_IS_CLANG 8 + align := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0) 9 + else 10 + align := -falign-functions=0 -falign-jumps=0 -falign-loops=0 11 + endif 12 + 7 13 cflags-$(CONFIG_M486SX) += -march=i486 8 14 cflags-$(CONFIG_M486) += -march=i486 9 15 cflags-$(CONFIG_M586) += -march=i586 ··· 25 19 # They make zero difference whatsosever to performance at this time. 26 20 cflags-$(CONFIG_MK7) += -march=athlon 27 21 cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon) 28 - cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0 29 - cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0 22 + cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align) 23 + cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align) 30 24 cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586) 31 25 cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586) 32 - cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0 26 + cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align) 33 27 cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) 34 28 cflags-$(CONFIG_MVIAC7) += -march=i686 35 29 cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
+26 -17
arch/x86/hyperv/hv_apic.c
··· 99 99 /* 100 100 * IPI implementation on Hyper-V. 101 101 */ 102 - static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) 102 + static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector, 103 + bool exclude_self) 103 104 { 104 105 struct hv_send_ipi_ex **arg; 105 106 struct hv_send_ipi_ex *ipi_arg; ··· 124 123 125 124 if (!cpumask_equal(mask, cpu_present_mask)) { 126 125 ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; 127 - nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); 126 + if (exclude_self) 127 + nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask); 128 + else 129 + nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); 128 130 } 129 131 if (nr_bank < 0) 130 132 goto ipi_mask_ex_done; ··· 142 138 return hv_result_success(status); 143 139 } 144 140 145 - static bool __send_ipi_mask(const struct cpumask *mask, int vector) 141 + static bool __send_ipi_mask(const struct cpumask *mask, int vector, 142 + bool exclude_self) 146 143 { 147 - int cur_cpu, vcpu; 144 + int cur_cpu, vcpu, this_cpu = smp_processor_id(); 148 145 struct hv_send_ipi ipi_arg; 149 146 u64 status; 147 + unsigned int weight; 150 148 151 149 trace_hyperv_send_ipi_mask(mask, vector); 152 150 153 - if (cpumask_empty(mask)) 151 + weight = cpumask_weight(mask); 152 + 153 + /* 154 + * Do nothing if 155 + * 1. the mask is empty 156 + * 2. the mask only contains self when exclude_self is true 157 + */ 158 + if (weight == 0 || 159 + (exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask))) 154 160 return true; 155 161 156 162 if (!hv_hypercall_pg) ··· 186 172 ipi_arg.cpu_mask = 0; 187 173 188 174 for_each_cpu(cur_cpu, mask) { 175 + if (exclude_self && cur_cpu == this_cpu) 176 + continue; 189 177 vcpu = hv_cpu_number_to_vp_number(cur_cpu); 190 178 if (vcpu == VP_INVAL) 191 179 return false; ··· 207 191 return hv_result_success(status); 208 192 209 193 do_ex_hypercall: 210 - return __send_ipi_mask_ex(mask, vector); 194 + return __send_ipi_mask_ex(mask, vector, exclude_self); 211 195 } 212 196 213 197 static bool __send_ipi_one(int cpu, int vector) ··· 224 208 return false; 225 209 226 210 if (vp >= 64) 227 - return __send_ipi_mask_ex(cpumask_of(cpu), vector); 211 + return __send_ipi_mask_ex(cpumask_of(cpu), vector, false); 228 212 229 213 status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp)); 230 214 return hv_result_success(status); ··· 238 222 239 223 static void hv_send_ipi_mask(const struct cpumask *mask, int vector) 240 224 { 241 - if (!__send_ipi_mask(mask, vector)) 225 + if (!__send_ipi_mask(mask, vector, false)) 242 226 orig_apic.send_IPI_mask(mask, vector); 243 227 } 244 228 245 229 static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) 246 230 { 247 - unsigned int this_cpu = smp_processor_id(); 248 - struct cpumask new_mask; 249 - const struct cpumask *local_mask; 250 - 251 - cpumask_copy(&new_mask, mask); 252 - cpumask_clear_cpu(this_cpu, &new_mask); 253 - local_mask = &new_mask; 254 - if (!__send_ipi_mask(local_mask, vector)) 231 + if (!__send_ipi_mask(mask, vector, true)) 255 232 orig_apic.send_IPI_mask_allbutself(mask, vector); 256 233 } 257 234 ··· 255 246 256 247 static void hv_send_ipi_all(int vector) 257 248 { 258 - if (!__send_ipi_mask(cpu_online_mask, vector)) 249 + if (!__send_ipi_mask(cpu_online_mask, vector, false)) 259 250 orig_apic.send_IPI_all(vector); 260 251 } 261 252
+2 -2
arch/x86/include/asm/uaccess.h
··· 301 301 unsigned int __gu_low, __gu_high; \ 302 302 const unsigned int __user *__gu_ptr; \ 303 303 __gu_ptr = (const void __user *)(ptr); \ 304 - __get_user_asm(__gu_low, ptr, "l", "=r", label); \ 305 - __get_user_asm(__gu_high, ptr+1, "l", "=r", label); \ 304 + __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \ 305 + __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \ 306 306 (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \ 307 307 } while (0) 308 308 #else
+32 -11
arch/x86/kernel/cpu/mce/core.c
··· 1253 1253 1254 1254 static void kill_me_now(struct callback_head *ch) 1255 1255 { 1256 + struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me); 1257 + 1258 + p->mce_count = 0; 1256 1259 force_sig(SIGBUS); 1257 1260 } 1258 1261 ··· 1265 1262 int flags = MF_ACTION_REQUIRED; 1266 1263 int ret; 1267 1264 1265 + p->mce_count = 0; 1268 1266 pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr); 1269 1267 1270 1268 if (!p->mce_ripv) ··· 1294 1290 } 1295 1291 } 1296 1292 1297 - static void queue_task_work(struct mce *m, int kill_current_task) 1293 + static void queue_task_work(struct mce *m, char *msg, int kill_current_task) 1298 1294 { 1299 - current->mce_addr = m->addr; 1300 - current->mce_kflags = m->kflags; 1301 - current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); 1302 - current->mce_whole_page = whole_page(m); 1295 + int count = ++current->mce_count; 1303 1296 1304 - if (kill_current_task) 1305 - current->mce_kill_me.func = kill_me_now; 1306 - else 1307 - current->mce_kill_me.func = kill_me_maybe; 1297 + /* First call, save all the details */ 1298 + if (count == 1) { 1299 + current->mce_addr = m->addr; 1300 + current->mce_kflags = m->kflags; 1301 + current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); 1302 + current->mce_whole_page = whole_page(m); 1303 + 1304 + if (kill_current_task) 1305 + current->mce_kill_me.func = kill_me_now; 1306 + else 1307 + current->mce_kill_me.func = kill_me_maybe; 1308 + } 1309 + 1310 + /* Ten is likely overkill. Don't expect more than two faults before task_work() */ 1311 + if (count > 10) 1312 + mce_panic("Too many consecutive machine checks while accessing user data", m, msg); 1313 + 1314 + /* Second or later call, make sure page address matches the one from first call */ 1315 + if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT)) 1316 + mce_panic("Consecutive machine checks to different user pages", m, msg); 1317 + 1318 + /* Do not call task_work_add() more than once */ 1319 + if (count > 1) 1320 + return; 1308 1321 1309 1322 task_work_add(current, &current->mce_kill_me, TWA_RESUME); 1310 1323 } ··· 1459 1438 /* If this triggers there is no way to recover. Die hard. */ 1460 1439 BUG_ON(!on_thread_stack() || !user_mode(regs)); 1461 1440 1462 - queue_task_work(&m, kill_current_task); 1441 + queue_task_work(&m, msg, kill_current_task); 1463 1442 1464 1443 } else { 1465 1444 /* ··· 1477 1456 } 1478 1457 1479 1458 if (m.kflags & MCE_IN_KERNEL_COPYIN) 1480 - queue_task_work(&m, kill_current_task); 1459 + queue_task_work(&m, msg, kill_current_task); 1481 1460 } 1482 1461 out: 1483 1462 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+1 -1
arch/x86/kernel/setup_percpu.c
··· 135 135 136 136 static void __init pcpu_fc_free(void *ptr, size_t size) 137 137 { 138 - memblock_free(__pa(ptr), size); 138 + memblock_free_ptr(ptr, size); 139 139 } 140 140 141 141 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+3 -3
arch/x86/mm/init_64.c
··· 1432 1432 return 0; 1433 1433 1434 1434 p4d = p4d_offset(pgd, addr); 1435 - if (p4d_none(*p4d)) 1435 + if (!p4d_present(*p4d)) 1436 1436 return 0; 1437 1437 1438 1438 pud = pud_offset(p4d, addr); 1439 - if (pud_none(*pud)) 1439 + if (!pud_present(*pud)) 1440 1440 return 0; 1441 1441 1442 1442 if (pud_large(*pud)) 1443 1443 return pfn_valid(pud_pfn(*pud)); 1444 1444 1445 1445 pmd = pmd_offset(pud, addr); 1446 - if (pmd_none(*pmd)) 1446 + if (!pmd_present(*pmd)) 1447 1447 return 0; 1448 1448 1449 1449 if (pmd_large(*pmd))
+2 -4
arch/x86/mm/kasan_init_64.c
··· 49 49 p = early_alloc(PMD_SIZE, nid, false); 50 50 if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) 51 51 return; 52 - else if (p) 53 - memblock_free(__pa(p), PMD_SIZE); 52 + memblock_free_ptr(p, PMD_SIZE); 54 53 } 55 54 56 55 p = early_alloc(PAGE_SIZE, nid, true); ··· 85 86 p = early_alloc(PUD_SIZE, nid, false); 86 87 if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) 87 88 return; 88 - else if (p) 89 - memblock_free(__pa(p), PUD_SIZE); 89 + memblock_free_ptr(p, PUD_SIZE); 90 90 } 91 91 92 92 p = early_alloc(PAGE_SIZE, nid, true);
+1 -1
arch/x86/mm/numa.c
··· 355 355 356 356 /* numa_distance could be 1LU marking allocation failure, test cnt */ 357 357 if (numa_distance_cnt) 358 - memblock_free(__pa(numa_distance), size); 358 + memblock_free_ptr(numa_distance, size); 359 359 numa_distance_cnt = 0; 360 360 numa_distance = NULL; /* enable table creation */ 361 361 }
+1 -2
arch/x86/mm/numa_emulation.c
··· 517 517 } 518 518 519 519 /* free the copied physical distance table */ 520 - if (phys_dist) 521 - memblock_free(__pa(phys_dist), phys_size); 520 + memblock_free_ptr(phys_dist, phys_size); 522 521 return; 523 522 524 523 no_emu:
+6 -1
arch/x86/mm/pat/memtype.c
··· 583 583 int err = 0; 584 584 585 585 start = sanitize_phys(start); 586 - end = sanitize_phys(end); 586 + 587 + /* 588 + * The end address passed into this function is exclusive, but 589 + * sanitize_phys() expects an inclusive address. 590 + */ 591 + end = sanitize_phys(end - 1) + 1; 587 592 if (start >= end) { 588 593 WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, 589 594 start, end - 1, cattr_name(req_type));
+7
arch/x86/xen/enlighten_pv.c
··· 1214 1214 x86_platform.legacy.rtc = 1; 1215 1215 } 1216 1216 1217 + static void __init xen_domu_set_legacy_features(void) 1218 + { 1219 + x86_platform.legacy.rtc = 0; 1220 + } 1221 + 1217 1222 /* First C function to be called on Xen boot */ 1218 1223 asmlinkage __visible void __init xen_start_kernel(void) 1219 1224 { ··· 1364 1359 add_preferred_console("xenboot", 0, NULL); 1365 1360 if (pci_xen) 1366 1361 x86_init.pci.arch_init = pci_xen_init; 1362 + x86_platform.set_legacy_features = 1363 + xen_domu_set_legacy_features; 1367 1364 } else { 1368 1365 const struct dom0_vga_console_info *info = 1369 1366 (void *)((char *)xen_start_info +
+5 -2
arch/x86/xen/mmu_pv.c
··· 1518 1518 if (pinned) { 1519 1519 struct page *page = pfn_to_page(pfn); 1520 1520 1521 - if (static_branch_likely(&xen_struct_pages_ready)) 1521 + pinned = false; 1522 + if (static_branch_likely(&xen_struct_pages_ready)) { 1523 + pinned = PagePinned(page); 1522 1524 SetPagePinned(page); 1525 + } 1523 1526 1524 1527 xen_mc_batch(); 1525 1528 1526 1529 __set_pfn_prot(pfn, PAGE_KERNEL_RO); 1527 1530 1528 - if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) 1531 + if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned) 1529 1532 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); 1530 1533 1531 1534 xen_mc_issue(PARAVIRT_LAZY_MMU);
+14 -4
block/blk-cgroup.c
··· 1182 1182 if (preloaded) 1183 1183 radix_tree_preload_end(); 1184 1184 1185 - ret = blk_iolatency_init(q); 1186 - if (ret) 1187 - goto err_destroy_all; 1188 - 1189 1185 ret = blk_ioprio_init(q); 1190 1186 if (ret) 1191 1187 goto err_destroy_all; ··· 1189 1193 ret = blk_throtl_init(q); 1190 1194 if (ret) 1191 1195 goto err_destroy_all; 1196 + 1197 + ret = blk_iolatency_init(q); 1198 + if (ret) { 1199 + blk_throtl_exit(q); 1200 + goto err_destroy_all; 1201 + } 1192 1202 1193 1203 return 0; 1194 1204 ··· 1366 1364 /* alloc failed, nothing's initialized yet, free everything */ 1367 1365 spin_lock_irq(&q->queue_lock); 1368 1366 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1367 + struct blkcg *blkcg = blkg->blkcg; 1368 + 1369 + spin_lock(&blkcg->lock); 1369 1370 if (blkg->pd[pol->plid]) { 1370 1371 pol->pd_free_fn(blkg->pd[pol->plid]); 1371 1372 blkg->pd[pol->plid] = NULL; 1372 1373 } 1374 + spin_unlock(&blkcg->lock); 1373 1375 } 1374 1376 spin_unlock_irq(&q->queue_lock); 1375 1377 ret = -ENOMEM; ··· 1405 1399 __clear_bit(pol->plid, q->blkcg_pols); 1406 1400 1407 1401 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1402 + struct blkcg *blkcg = blkg->blkcg; 1403 + 1404 + spin_lock(&blkcg->lock); 1408 1405 if (blkg->pd[pol->plid]) { 1409 1406 if (pol->pd_offline_fn) 1410 1407 pol->pd_offline_fn(blkg->pd[pol->plid]); 1411 1408 pol->pd_free_fn(blkg->pd[pol->plid]); 1412 1409 blkg->pd[pol->plid] = NULL; 1413 1410 } 1411 + spin_unlock(&blkcg->lock); 1414 1412 } 1415 1413 1416 1414 spin_unlock_irq(&q->queue_lock);
+8 -1
block/blk-integrity.c
··· 426 426 */ 427 427 void blk_integrity_unregister(struct gendisk *disk) 428 428 { 429 + struct blk_integrity *bi = &disk->queue->integrity; 430 + 431 + if (!bi->profile) 432 + return; 433 + 434 + /* ensure all bios are off the integrity workqueue */ 435 + blk_flush_integrity(); 429 436 blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue); 430 - memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity)); 437 + memset(bi, 0, sizeof(*bi)); 431 438 } 432 439 EXPORT_SYMBOL(blk_integrity_unregister); 433 440
+1 -1
block/blk-mq-tag.c
··· 208 208 209 209 spin_lock_irqsave(&tags->lock, flags); 210 210 rq = tags->rqs[bitnr]; 211 - if (!rq || !refcount_inc_not_zero(&rq->ref)) 211 + if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref)) 212 212 rq = NULL; 213 213 spin_unlock_irqrestore(&tags->lock, flags); 214 214 return rq;
+1 -1
drivers/base/arch_numa.c
··· 264 264 size = numa_distance_cnt * numa_distance_cnt * 265 265 sizeof(numa_distance[0]); 266 266 267 - memblock_free(__pa(numa_distance), size); 267 + memblock_free_ptr(numa_distance, size); 268 268 numa_distance_cnt = 0; 269 269 numa_distance = NULL; 270 270 }
+10
drivers/base/power/trace.c
··· 13 13 #include <linux/export.h> 14 14 #include <linux/rtc.h> 15 15 #include <linux/suspend.h> 16 + #include <linux/init.h> 16 17 17 18 #include <linux/mc146818rtc.h> 18 19 ··· 166 165 const char *file = *(const char **)(tracedata + 2); 167 166 unsigned int user_hash_value, file_hash_value; 168 167 168 + if (!x86_platform.legacy.rtc) 169 + return; 170 + 169 171 user_hash_value = user % USERHASH; 170 172 file_hash_value = hash_string(lineno, file, FILEHASH); 171 173 set_magic_time(user_hash_value, file_hash_value, dev_hash_value); ··· 271 267 272 268 static int __init early_resume_init(void) 273 269 { 270 + if (!x86_platform.legacy.rtc) 271 + return 0; 272 + 274 273 hash_value_early_read = read_magic_time(); 275 274 register_pm_notifier(&pm_trace_nb); 276 275 return 0; ··· 283 276 { 284 277 unsigned int val = hash_value_early_read; 285 278 unsigned int user, file, dev; 279 + 280 + if (!x86_platform.legacy.rtc) 281 + return 0; 286 282 287 283 user = val % USERHASH; 288 284 val = val / USERHASH;
+1 -1
drivers/cpufreq/cpufreq_governor_attr_set.c
··· 74 74 if (count) 75 75 return count; 76 76 77 - kobject_put(&attr_set->kobj); 78 77 mutex_destroy(&attr_set->update_lock); 78 + kobject_put(&attr_set->kobj); 79 79 return 0; 80 80 } 81 81 EXPORT_SYMBOL_GPL(gov_attr_set_put);
+14 -8
drivers/cpufreq/intel_pstate.c
··· 3205 3205 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 3206 3206 return -ENODEV; 3207 3207 3208 - if (no_load) 3209 - return -ENODEV; 3210 - 3211 3208 id = x86_match_cpu(hwp_support_ids); 3212 3209 if (id) { 3210 + bool hwp_forced = intel_pstate_hwp_is_enabled(); 3211 + 3212 + if (hwp_forced) 3213 + pr_info("HWP enabled by BIOS\n"); 3214 + else if (no_load) 3215 + return -ENODEV; 3216 + 3213 3217 copy_cpu_funcs(&core_funcs); 3214 3218 /* 3215 3219 * Avoid enabling HWP for processors without EPP support, ··· 3223 3219 * If HWP is enabled already, though, there is no choice but to 3224 3220 * deal with it. 3225 3221 */ 3226 - if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || 3227 - intel_pstate_hwp_is_enabled()) { 3222 + if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { 3228 3223 hwp_active++; 3229 3224 hwp_mode_bdw = id->driver_data; 3230 3225 intel_pstate.attr = hwp_cpufreq_attrs; ··· 3238 3235 3239 3236 goto hwp_cpu_matched; 3240 3237 } 3238 + pr_info("HWP not enabled\n"); 3241 3239 } else { 3240 + if (no_load) 3241 + return -ENODEV; 3242 + 3242 3243 id = x86_match_cpu(intel_pstate_cpu_ids); 3243 3244 if (!id) { 3244 3245 pr_info("CPU model not supported\n"); ··· 3321 3314 else if (!strcmp(str, "passive")) 3322 3315 default_driver = &intel_cpufreq; 3323 3316 3324 - if (!strcmp(str, "no_hwp")) { 3325 - pr_info("HWP disabled\n"); 3317 + if (!strcmp(str, "no_hwp")) 3326 3318 no_hwp = 1; 3327 - } 3319 + 3328 3320 if (!strcmp(str, "force")) 3329 3321 force_load = 1; 3330 3322 if (!strcmp(str, "hwp_only"))
-1
drivers/cpufreq/vexpress-spc-cpufreq.c
··· 451 451 static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) 452 452 { 453 453 struct device *cpu_dev; 454 - int cur_cluster = cpu_to_cluster(policy->cpu); 455 454 456 455 cpu_dev = get_cpu_device(policy->cpu); 457 456 if (!cpu_dev) {
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 758 758 MAX_HWIP 759 759 }; 760 760 761 - #define HWIP_MAX_INSTANCE 8 761 + #define HWIP_MAX_INSTANCE 10 762 762 763 763 struct amd_powerplay { 764 764 void *pp_handle;
+10
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 192 192 kgd2kfd_suspend(adev->kfd.dev, run_pm); 193 193 } 194 194 195 + int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev) 196 + { 197 + int r = 0; 198 + 199 + if (adev->kfd.dev) 200 + r = kgd2kfd_resume_iommu(adev->kfd.dev); 201 + 202 + return r; 203 + } 204 + 195 205 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) 196 206 { 197 207 int r = 0;
+7
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
··· 137 137 void amdgpu_amdkfd_fini(void); 138 138 139 139 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); 140 + int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev); 140 141 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); 141 142 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, 142 143 const void *ih_ring_entry); ··· 328 327 const struct kgd2kfd_shared_resources *gpu_resources); 329 328 void kgd2kfd_device_exit(struct kfd_dev *kfd); 330 329 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); 330 + int kgd2kfd_resume_iommu(struct kfd_dev *kfd); 331 331 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); 332 332 int kgd2kfd_pre_reset(struct kfd_dev *kfd); 333 333 int kgd2kfd_post_reset(struct kfd_dev *kfd); ··· 365 363 366 364 static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) 367 365 { 366 + } 367 + 368 + static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd) 369 + { 370 + return 0; 368 371 } 369 372 370 373 static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+4 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
··· 1544 1544 struct dentry *ent; 1545 1545 int r, i; 1546 1546 1547 - 1548 - 1549 1547 ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, 1550 1548 &fops_ib_preempt); 1551 - if (!ent) { 1549 + if (IS_ERR(ent)) { 1552 1550 DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); 1553 - return -EIO; 1551 + return PTR_ERR(ent); 1554 1552 } 1555 1553 1556 1554 ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, 1557 1555 &fops_sclk_set); 1558 - if (!ent) { 1556 + if (IS_ERR(ent)) { 1559 1557 DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); 1560 - return -EIO; 1558 + return PTR_ERR(ent); 1561 1559 } 1562 1560 1563 1561 /* Register debugfs entries for amdgpu_ttm */
+12
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 2394 2394 if (r) 2395 2395 goto init_failed; 2396 2396 2397 + r = amdgpu_amdkfd_resume_iommu(adev); 2398 + if (r) 2399 + goto init_failed; 2400 + 2397 2401 r = amdgpu_device_ip_hw_init_phase1(adev); 2398 2402 if (r) 2399 2403 goto init_failed; ··· 3151 3147 static int amdgpu_device_ip_resume(struct amdgpu_device *adev) 3152 3148 { 3153 3149 int r; 3150 + 3151 + r = amdgpu_amdkfd_resume_iommu(adev); 3152 + if (r) 3153 + return r; 3154 3154 3155 3155 r = amdgpu_device_ip_resume_phase1(adev); 3156 3156 if (r) ··· 4609 4601 dev_warn(tmp_adev->dev, "asic atom init failed!"); 4610 4602 } else { 4611 4603 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); 4604 + r = amdgpu_amdkfd_resume_iommu(tmp_adev); 4605 + if (r) 4606 + goto out; 4607 + 4612 4608 r = amdgpu_device_ip_resume_phase1(tmp_adev); 4613 4609 if (r) 4614 4610 goto out;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 598 598 break; 599 599 default: 600 600 adev->gmc.tmz_enabled = false; 601 - dev_warn(adev->dev, 601 + dev_info(adev->dev, 602 602 "Trusted Memory Zone (TMZ) feature not supported\n"); 603 603 break; 604 604 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
··· 757 757 return res; 758 758 } 759 759 760 - inline uint32_t amdgpu_ras_eeprom_max_record_count(void) 760 + uint32_t amdgpu_ras_eeprom_max_record_count(void) 761 761 { 762 762 return RAS_MAX_RECORD_COUNT; 763 763 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
··· 120 120 int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, 121 121 struct eeprom_table_record *records, const u32 num); 122 122 123 - inline uint32_t amdgpu_ras_eeprom_max_record_count(void); 123 + uint32_t amdgpu_ras_eeprom_max_record_count(void); 124 124 125 125 void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control); 126 126
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
··· 428 428 ent = debugfs_create_file(name, 429 429 S_IFREG | S_IRUGO, root, 430 430 ring, &amdgpu_debugfs_ring_fops); 431 - if (!ent) 432 - return -ENOMEM; 431 + if (IS_ERR(ent)) 432 + return PTR_ERR(ent); 433 433 434 434 i_size_write(ent->d_inode, ring->ring_size + 12); 435 435 ring->ent = ent;
+9 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 515 515 goto out; 516 516 } 517 517 518 + if (bo->type == ttm_bo_type_device && 519 + new_mem->mem_type == TTM_PL_VRAM && 520 + old_mem->mem_type != TTM_PL_VRAM) { 521 + /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU 522 + * accesses the BO after it's moved. 523 + */ 524 + abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 525 + } 526 + 518 527 if (adev->mman.buffer_funcs_enabled) { 519 528 if (((old_mem->mem_type == TTM_PL_SYSTEM && 520 529 new_mem->mem_type == TTM_PL_VRAM) || ··· 552 543 r = ttm_bo_move_memcpy(bo, ctx, new_mem); 553 544 if (r) 554 545 return r; 555 - } 556 - 557 - if (bo->type == ttm_bo_type_device && 558 - new_mem->mem_type == TTM_PL_VRAM && 559 - old_mem->mem_type != TTM_PL_VRAM) { 560 - /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU 561 - * accesses the BO after it's moved. 562 - */ 563 - abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 564 546 } 565 547 566 548 out:
+36 -20
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 468 468 .needs_iommu_device = false, 469 469 .supports_cwsr = true, 470 470 .needs_pci_atomics = true, 471 + .no_atomic_fw_version = 145, 471 472 .num_sdma_engines = 2, 472 473 .num_xgmi_sdma_engines = 0, 473 474 .num_sdma_queues_per_engine = 8, ··· 488 487 .needs_iommu_device = false, 489 488 .supports_cwsr = true, 490 489 .needs_pci_atomics = true, 490 + .no_atomic_fw_version = 145, 491 491 .num_sdma_engines = 2, 492 492 .num_xgmi_sdma_engines = 0, 493 493 .num_sdma_queues_per_engine = 8, ··· 508 506 .needs_iommu_device = false, 509 507 .supports_cwsr = true, 510 508 .needs_pci_atomics = true, 509 + .no_atomic_fw_version = 145, 511 510 .num_sdma_engines = 2, 512 511 .num_xgmi_sdma_engines = 0, 513 512 .num_sdma_queues_per_engine = 8, ··· 528 525 .needs_iommu_device = false, 529 526 .supports_cwsr = true, 530 527 .needs_pci_atomics = true, 528 + .no_atomic_fw_version = 92, 531 529 .num_sdma_engines = 4, 532 530 .num_xgmi_sdma_engines = 0, 533 531 .num_sdma_queues_per_engine = 8, ··· 548 544 .needs_iommu_device = false, 549 545 .supports_cwsr = true, 550 546 .needs_pci_atomics = true, 547 + .no_atomic_fw_version = 92, 551 548 .num_sdma_engines = 2, 552 549 .num_xgmi_sdma_engines = 0, 553 550 .num_sdma_queues_per_engine = 8, ··· 567 562 .mqd_size_aligned = MQD_SIZE_ALIGNED, 568 563 .needs_iommu_device = false, 569 564 .supports_cwsr = true, 570 - .needs_pci_atomics = false, 565 + .needs_pci_atomics = true, 566 + .no_atomic_fw_version = 92, 571 567 .num_sdma_engines = 1, 572 568 .num_xgmi_sdma_engines = 0, 573 569 .num_sdma_queues_per_engine = 2, ··· 588 582 .needs_iommu_device = false, 589 583 .supports_cwsr = true, 590 584 .needs_pci_atomics = true, 585 + .no_atomic_fw_version = 92, 591 586 .num_sdma_engines = 2, 592 587 .num_xgmi_sdma_engines = 0, 593 588 .num_sdma_queues_per_engine = 8, ··· 608 601 .needs_iommu_device = false, 609 602 .supports_cwsr = true, 610 603 .needs_pci_atomics = true, 604 + .no_atomic_fw_version = 92, 611 605 .num_sdma_engines = 1, 612 606 .num_xgmi_sdma_engines = 0, 613 607 .num_sdma_queues_per_engine = 8, ··· 627 619 .mqd_size_aligned = MQD_SIZE_ALIGNED, 628 620 .needs_iommu_device = false, 629 621 .supports_cwsr = true, 630 - .needs_pci_atomics = false, 622 + .needs_pci_atomics = true, 623 + .no_atomic_fw_version = 92, 631 624 .num_sdma_engines = 1, 632 625 .num_xgmi_sdma_engines = 0, 633 626 .num_sdma_queues_per_engine = 2, ··· 716 707 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); 717 708 if (!kfd) 718 709 return NULL; 719 - 720 - /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. 721 - * 32 and 64-bit requests are possible and must be 722 - * supported. 723 - */ 724 - kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd); 725 - if (device_info->needs_pci_atomics && 726 - !kfd->pci_atomic_requested) { 727 - dev_info(kfd_device, 728 - "skipped device %x:%x, PCI rejects atomics\n", 729 - pdev->vendor, pdev->device); 730 - kfree(kfd); 731 - return NULL; 732 - } 733 710 734 711 kfd->kgd = kgd; 735 712 kfd->device_info = device_info; ··· 815 820 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; 816 821 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd 817 822 - kfd->vm_info.first_vmid_kfd + 1; 823 + 824 + /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. 825 + * 32 and 64-bit requests are possible and must be 826 + * supported. 827 + */ 828 + kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd); 829 + if (!kfd->pci_atomic_requested && 830 + kfd->device_info->needs_pci_atomics && 831 + (!kfd->device_info->no_atomic_fw_version || 832 + kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) { 833 + dev_info(kfd_device, 834 + "skipped device %x:%x, PCI rejects atomics %d<%d\n", 835 + kfd->pdev->vendor, kfd->pdev->device, 836 + kfd->mec_fw_version, 837 + kfd->device_info->no_atomic_fw_version); 838 + return false; 839 + } 818 840 819 841 /* Verify module parameters regarding mapped process number*/ 820 842 if ((hws_max_conc_proc < 0) ··· 1069 1057 return ret; 1070 1058 } 1071 1059 1072 - static int kfd_resume(struct kfd_dev *kfd) 1060 + int kgd2kfd_resume_iommu(struct kfd_dev *kfd) 1073 1061 { 1074 1062 int err = 0; 1075 1063 1076 1064 err = kfd_iommu_resume(kfd); 1077 - if (err) { 1065 + if (err) 1078 1066 dev_err(kfd_device, 1079 1067 "Failed to resume IOMMU for device %x:%x\n", 1080 1068 kfd->pdev->vendor, kfd->pdev->device); 1081 - return err; 1082 - } 1069 + return err; 1070 + } 1071 + 1072 + static int kfd_resume(struct kfd_dev *kfd) 1073 + { 1074 + int err = 0; 1083 1075 1084 1076 err = kfd->dqm->ops.start(kfd->dqm); 1085 1077 if (err) {
+1
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 207 207 bool supports_cwsr; 208 208 bool needs_iommu_device; 209 209 bool needs_pci_atomics; 210 + uint32_t no_atomic_fw_version; 210 211 unsigned int num_sdma_engines; 211 212 unsigned int num_xgmi_sdma_engines; 212 213 unsigned int num_sdma_queues_per_engine;
+86 -23
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 998 998 uint32_t agp_base, agp_bot, agp_top; 999 999 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1000 1000 1001 + memset(pa_config, 0, sizeof(*pa_config)); 1002 + 1001 1003 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1002 1004 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1003 1005 ··· 6026 6024 return 0; 6027 6025 6028 6026 #if defined(CONFIG_DRM_AMD_DC_DCN) 6029 - work = kzalloc(sizeof(*work), GFP_ATOMIC); 6030 - if (!work) 6031 - return -ENOMEM; 6027 + if (dm->vblank_control_workqueue) { 6028 + work = kzalloc(sizeof(*work), GFP_ATOMIC); 6029 + if (!work) 6030 + return -ENOMEM; 6032 6031 6033 - INIT_WORK(&work->work, vblank_control_worker); 6034 - work->dm = dm; 6035 - work->acrtc = acrtc; 6036 - work->enable = enable; 6032 + INIT_WORK(&work->work, vblank_control_worker); 6033 + work->dm = dm; 6034 + work->acrtc = acrtc; 6035 + work->enable = enable; 6037 6036 6038 - if (acrtc_state->stream) { 6039 - dc_stream_retain(acrtc_state->stream); 6040 - work->stream = acrtc_state->stream; 6037 + if (acrtc_state->stream) { 6038 + dc_stream_retain(acrtc_state->stream); 6039 + work->stream = acrtc_state->stream; 6040 + } 6041 + 6042 + queue_work(dm->vblank_control_workqueue, &work->work); 6041 6043 } 6042 - 6043 - queue_work(dm->vblank_control_workqueue, &work->work); 6044 6044 #endif 6045 6045 6046 6046 return 0; ··· 6796 6792 6797 6793 #if defined(CONFIG_DRM_AMD_DC_DCN) 6798 6794 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 6799 - struct dc_state *dc_state) 6795 + struct dc_state *dc_state, 6796 + struct dsc_mst_fairness_vars *vars) 6800 6797 { 6801 6798 struct dc_stream_state *stream = NULL; 6802 6799 struct drm_connector *connector; 6803 6800 struct drm_connector_state *new_con_state; 6804 6801 struct amdgpu_dm_connector *aconnector; 6805 6802 struct dm_connector_state *dm_conn_state; 6806 - int i, j, clock, bpp; 6803 + int i, j, clock; 6807 6804 int vcpi, pbn_div, pbn = 0; 6808 6805 6809 6806 for_each_new_connector_in_state(state, connector, new_con_state, i) { ··· 6843 6838 } 6844 6839 6845 6840 pbn_div = dm_mst_get_pbn_divider(stream->link); 6846 - bpp = stream->timing.dsc_cfg.bits_per_pixel; 6847 6841 clock = stream->timing.pix_clk_100hz / 10; 6848 - pbn = drm_dp_calc_pbn_mode(clock, bpp, true); 6842 + /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 6843 + for (j = 0; j < dc_state->stream_count; j++) { 6844 + if (vars[j].aconnector == aconnector) { 6845 + pbn = vars[j].pbn; 6846 + break; 6847 + } 6848 + } 6849 + 6849 6850 vcpi = drm_dp_mst_atomic_enable_dsc(state, 6850 6851 aconnector->port, 6851 6852 pbn, pbn_div, ··· 7530 7519 } 7531 7520 } 7532 7521 7522 + static void amdgpu_set_panel_orientation(struct drm_connector *connector) 7523 + { 7524 + struct drm_encoder *encoder; 7525 + struct amdgpu_encoder *amdgpu_encoder; 7526 + const struct drm_display_mode *native_mode; 7527 + 7528 + if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 7529 + connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 7530 + return; 7531 + 7532 + encoder = amdgpu_dm_connector_to_encoder(connector); 7533 + if (!encoder) 7534 + return; 7535 + 7536 + amdgpu_encoder = to_amdgpu_encoder(encoder); 7537 + 7538 + native_mode = &amdgpu_encoder->native_mode; 7539 + if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 7540 + return; 7541 + 7542 + drm_connector_set_panel_orientation_with_quirk(connector, 7543 + DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 7544 + native_mode->hdisplay, 7545 + native_mode->vdisplay); 7546 + } 7547 + 7533 7548 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 7534 7549 struct edid *edid) 7535 7550 { ··· 7584 7547 * restored here. 7585 7548 */ 7586 7549 amdgpu_dm_update_freesync_caps(connector, edid); 7550 + 7551 + amdgpu_set_panel_orientation(connector); 7587 7552 } else { 7588 7553 amdgpu_dm_connector->num_modes = 0; 7589 7554 } ··· 8097 8058 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 8098 8059 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8099 8060 8100 - /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled 8101 - * hot-plug, headless s3, dpms 8061 + /* Stream removed and re-enabled 8062 + * 8063 + * Can sometimes overlap with the HPD case, 8064 + * thus set update_hdcp to false to avoid 8065 + * setting HDCP multiple times. 8066 + * 8067 + * Handles: DESIRED -> DESIRED (Special case) 8068 + */ 8069 + if (!(old_state->crtc && old_state->crtc->enabled) && 8070 + state->crtc && state->crtc->enabled && 8071 + connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8072 + dm_con_state->update_hdcp = false; 8073 + return true; 8074 + } 8075 + 8076 + /* Hot-plug, headless s3, dpms 8077 + * 8078 + * Only start HDCP if the display is connected/enabled. 8079 + * update_hdcp flag will be set to false until the next 8080 + * HPD comes in. 8102 8081 * 8103 8082 * Handles: DESIRED -> DESIRED (Special case) 8104 8083 */ ··· 8705 8648 * If PSR or idle optimizations are enabled then flush out 8706 8649 * any pending work before hardware programming. 8707 8650 */ 8708 - flush_workqueue(dm->vblank_control_workqueue); 8651 + if (dm->vblank_control_workqueue) 8652 + flush_workqueue(dm->vblank_control_workqueue); 8709 8653 #endif 8710 8654 8711 8655 bundle->stream_update.stream = acrtc_state->stream; ··· 9041 8983 /* if there mode set or reset, disable eDP PSR */ 9042 8984 if (mode_set_reset_required) { 9043 8985 #if defined(CONFIG_DRM_AMD_DC_DCN) 9044 - flush_workqueue(dm->vblank_control_workqueue); 8986 + if (dm->vblank_control_workqueue) 8987 + flush_workqueue(dm->vblank_control_workqueue); 9045 8988 #endif 9046 8989 amdgpu_dm_psr_disable_all(dm); 9047 8990 } ··· 10302 10243 int ret, i; 10303 10244 bool lock_and_validation_needed = false; 10304 10245 struct dm_crtc_state *dm_old_crtc_state; 10246 + #if defined(CONFIG_DRM_AMD_DC_DCN) 10247 + struct dsc_mst_fairness_vars vars[MAX_PIPES]; 10248 + #endif 10305 10249 10306 10250 trace_amdgpu_dm_atomic_check_begin(state); 10307 10251 ··· 10535 10473 goto fail; 10536 10474 10537 10475 #if defined(CONFIG_DRM_AMD_DC_DCN) 10538 - if (!compute_mst_dsc_configs_for_state(state, dm_state->context)) 10476 + if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) 10539 10477 goto fail; 10540 10478 10541 - ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context); 10479 + ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 10542 10480 if (ret) 10543 10481 goto fail; 10544 10482 #endif ··· 10554 10492 goto fail; 10555 10493 status = dc_validate_global_state(dc, dm_state->context, false); 10556 10494 if (status != DC_OK) { 10557 - DC_LOG_WARNING("DC global validation failure: %s (%d)", 10495 + drm_dbg_atomic(dev, 10496 + "DC global validation failure: %s (%d)", 10558 10497 dc_status_to_str(status), status); 10559 10498 ret = -EINVAL; 10560 10499 goto fail;
+8 -10
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 518 518 uint32_t num_slices_h; 519 519 uint32_t num_slices_v; 520 520 uint32_t bpp_overwrite; 521 - }; 522 - 523 - struct dsc_mst_fairness_vars { 524 - int pbn; 525 - bool dsc_enabled; 526 - int bpp_x16; 521 + struct amdgpu_dm_connector *aconnector; 527 522 }; 528 523 529 524 static int kbps_to_peak_pbn(int kbps) ··· 745 750 746 751 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, 747 752 struct dc_state *dc_state, 748 - struct dc_link *dc_link) 753 + struct dc_link *dc_link, 754 + struct dsc_mst_fairness_vars *vars) 749 755 { 750 756 int i; 751 757 struct dc_stream_state *stream; 752 758 struct dsc_mst_fairness_params params[MAX_PIPES]; 753 - struct dsc_mst_fairness_vars vars[MAX_PIPES]; 754 759 struct amdgpu_dm_connector *aconnector; 755 760 int count = 0; 756 761 bool debugfs_overwrite = false; ··· 771 776 params[count].timing = &stream->timing; 772 777 params[count].sink = stream->sink; 773 778 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 779 + params[count].aconnector = aconnector; 774 780 params[count].port = aconnector->port; 775 781 params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; 776 782 if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) ··· 794 798 } 795 799 /* Try no compression */ 796 800 for (i = 0; i < count; i++) { 801 + vars[i].aconnector = params[i].aconnector; 797 802 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); 798 803 vars[i].dsc_enabled = false; 799 804 vars[i].bpp_x16 = 0; ··· 848 851 } 849 852 850 853 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 851 - struct dc_state *dc_state) 854 + struct dc_state *dc_state, 855 + struct dsc_mst_fairness_vars *vars) 852 856 { 853 857 int i, j; 854 858 struct dc_stream_state *stream; ··· 880 882 return false; 881 883 882 884 mutex_lock(&aconnector->mst_mgr.lock); 883 - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) { 885 + if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) { 884 886 mutex_unlock(&aconnector->mst_mgr.lock); 885 887 return false; 886 888 }
+10 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
··· 39 39 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); 40 40 41 41 #if defined(CONFIG_DRM_AMD_DC_DCN) 42 + 43 + struct dsc_mst_fairness_vars { 44 + int pbn; 45 + bool dsc_enabled; 46 + int bpp_x16; 47 + struct amdgpu_dm_connector *aconnector; 48 + }; 49 + 42 50 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 43 - struct dc_state *dc_state); 51 + struct dc_state *dc_state, 52 + struct dsc_mst_fairness_vars *vars); 44 53 #endif 45 54 46 55 #endif
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
··· 62 62 depth = *pcpu; 63 63 put_cpu_ptr(&fpu_recursion_depth); 64 64 65 - ASSERT(depth > 1); 65 + ASSERT(depth >= 1); 66 66 } 67 67 68 68 /**
+12 -4
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 2586 2586 2587 2587 int dc_link_get_backlight_level(const struct dc_link *link) 2588 2588 { 2589 - 2590 2589 struct abm *abm = get_abm_from_stream_res(link); 2590 + struct panel_cntl *panel_cntl = link->panel_cntl; 2591 + struct dc *dc = link->ctx->dc; 2592 + struct dmcu *dmcu = dc->res_pool->dmcu; 2593 + bool fw_set_brightness = true; 2591 2594 2592 - if (abm == NULL || abm->funcs->get_current_backlight == NULL) 2595 + if (dmcu) 2596 + fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); 2597 + 2598 + if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight) 2599 + return panel_cntl->funcs->get_current_backlight(panel_cntl); 2600 + else if (abm != NULL && abm->funcs->get_current_backlight != NULL) 2601 + return (int) abm->funcs->get_current_backlight(abm); 2602 + else 2593 2603 return DC_ERROR_UNEXPECTED; 2594 - 2595 - return (int) abm->funcs->get_current_backlight(abm); 2596 2604 } 2597 2605 2598 2606 int dc_link_get_target_backlight_pwm(const struct dc_link *link)
-10
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
··· 49 49 static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) 50 50 { 51 51 uint64_t current_backlight; 52 - uint32_t round_result; 53 52 uint32_t bl_period, bl_int_count; 54 53 uint32_t bl_pwm, fractional_duty_cycle_en; 55 54 uint32_t bl_period_mask, bl_pwm_mask; ··· 82 83 83 84 current_backlight = div_u64(current_backlight, bl_period); 84 85 current_backlight = (current_backlight + 1) >> 1; 85 - 86 - current_backlight = (uint64_t)(current_backlight) * bl_period; 87 - 88 - round_result = (uint32_t)(current_backlight & 0xFFFFFFFF); 89 - 90 - round_result = (round_result >> (bl_int_count-1)) & 1; 91 - 92 - current_backlight >>= bl_int_count; 93 - current_backlight += round_result; 94 86 95 87 return (uint32_t)(current_backlight); 96 88 }
+35 -51
drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
··· 33 33 #define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging 34 34 #define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible 35 35 #define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible 36 - #define TABLE_COUNT 6 36 + #define TABLE_SMU_METRICS 6 // Called by Driver 37 + #define TABLE_COUNT 7 37 38 38 - #define NUM_DSPCLK_LEVELS 8 39 - #define NUM_SOCCLK_DPM_LEVELS 8 40 - #define NUM_DCEFCLK_DPM_LEVELS 4 41 - #define NUM_FCLK_DPM_LEVELS 4 42 - #define NUM_MEMCLK_DPM_LEVELS 4 39 + typedef struct SmuMetricsTable_t { 40 + //CPU status 41 + uint16_t CoreFrequency[6]; //[MHz] 42 + uint32_t CorePower[6]; //[mW] 43 + uint16_t CoreTemperature[6]; //[centi-Celsius] 44 + uint16_t L3Frequency[2]; //[MHz] 45 + uint16_t L3Temperature[2]; //[centi-Celsius] 46 + uint16_t C0Residency[6]; //Percentage 43 47 44 - #define NUMBER_OF_PSTATES 8 45 - #define NUMBER_OF_CORES 8 48 + // GFX status 49 + uint16_t GfxclkFrequency; //[MHz] 50 + uint16_t GfxTemperature; //[centi-Celsius] 46 51 47 - typedef enum { 48 - S3_TYPE_ENTRY, 49 - S5_TYPE_ENTRY, 50 - } Sleep_Type_e; 52 + // SOC IP info 53 + uint16_t SocclkFrequency; //[MHz] 54 + uint16_t VclkFrequency; //[MHz] 55 + uint16_t DclkFrequency; //[MHz] 56 + uint16_t MemclkFrequency; //[MHz] 51 57 52 - typedef enum { 53 - GFX_OFF = 0, 54 - GFX_ON = 1, 55 - } GFX_Mode_e; 58 + // power, VF info for CPU/GFX telemetry rails, and then socket power total 59 + uint32_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_GFX 60 + uint32_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_GFX 61 + uint32_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_GFX 62 + uint32_t CurrentSocketPower; //[mW] 56 63 57 - typedef enum { 58 - CPU_P0 = 0, 59 - CPU_P1, 60 - CPU_P2, 61 - CPU_P3, 62 - CPU_P4, 63 - CPU_P5, 64 - CPU_P6, 65 - CPU_P7 66 - } CPU_PState_e; 64 + uint16_t SocTemperature; //[centi-Celsius] 65 + uint16_t EdgeTemperature; 66 + uint16_t ThrottlerStatus; 67 + uint16_t Spare; 67 68 68 - typedef enum { 69 - CPU_CORE0 = 0, 70 - CPU_CORE1, 71 - CPU_CORE2, 72 - CPU_CORE3, 73 - CPU_CORE4, 74 - CPU_CORE5, 75 - CPU_CORE6, 76 - CPU_CORE7 77 - } CORE_ID_e; 69 + } SmuMetricsTable_t; 78 70 79 - typedef enum { 80 - DF_DPM0 = 0, 81 - DF_DPM1, 82 - DF_DPM2, 83 - DF_DPM3, 84 - DF_PState_Count 85 - } DF_PState_e; 86 - 87 - typedef enum { 88 - GFX_DPM0 = 0, 89 - GFX_DPM1, 90 - GFX_DPM2, 91 - GFX_DPM3, 92 - GFX_PState_Count 93 - } GFX_PState_e; 71 + typedef struct SmuMetrics_t { 72 + SmuMetricsTable_t Current; 73 + SmuMetricsTable_t Average; 74 + uint32_t SampleStartTime; 75 + uint32_t SampleStopTime; 76 + uint32_t Accnt; 77 + } SmuMetrics_t; 94 78 95 79 #endif
+4 -1
drivers/gpu/drm/amd/pm/inc/smu_types.h
··· 226 226 __SMU_DUMMY_MAP(SetUclkDpmMode), \ 227 227 __SMU_DUMMY_MAP(LightSBR), \ 228 228 __SMU_DUMMY_MAP(GfxDriverResetRecovery), \ 229 - __SMU_DUMMY_MAP(BoardPowerCalibration), 229 + __SMU_DUMMY_MAP(BoardPowerCalibration), \ 230 + __SMU_DUMMY_MAP(RequestGfxclk), \ 231 + __SMU_DUMMY_MAP(ForceGfxVid), \ 232 + __SMU_DUMMY_MAP(UnforceGfxVid), 230 233 231 234 #undef __SMU_DUMMY_MAP 232 235 #define __SMU_DUMMY_MAP(type) SMU_MSG_##type
+8 -1
drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
··· 65 65 #define PPSMC_MSG_SetDriverTableVMID 0x34 66 66 #define PPSMC_MSG_SetSoftMinCclk 0x35 67 67 #define PPSMC_MSG_SetSoftMaxCclk 0x36 68 - #define PPSMC_Message_Count 0x37 68 + #define PPSMC_MSG_GetGfxFrequency 0x37 69 + #define PPSMC_MSG_GetGfxVid 0x38 70 + #define PPSMC_MSG_ForceGfxFreq 0x39 71 + #define PPSMC_MSG_UnForceGfxFreq 0x3A 72 + #define PPSMC_MSG_ForceGfxVid 0x3B 73 + #define PPSMC_MSG_UnforceGfxVid 0x3C 74 + #define PPSMC_MSG_GetEnabledSmuFeatures 0x3D 75 + #define PPSMC_Message_Count 0x3E 69 76 70 77 #endif
+1 -1
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 1404 1404 */ 1405 1405 if (smu->uploading_custom_pp_table && 1406 1406 (adev->asic_type >= CHIP_NAVI10) && 1407 - (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) 1407 + (adev->asic_type <= CHIP_BEIGE_GOBY)) 1408 1408 return smu_disable_all_features_with_exception(smu, 1409 1409 true, 1410 1410 SMU_FEATURE_COUNT);
+6 -2
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
··· 771 771 struct smu_11_0_dpm_context *dpm_context = NULL; 772 772 uint32_t gen_speed, lane_width; 773 773 774 - if (amdgpu_ras_intr_triggered()) 775 - return sysfs_emit(buf, "unavailable\n"); 774 + smu_cmn_get_sysfs_buf(&buf, &size); 775 + 776 + if (amdgpu_ras_intr_triggered()) { 777 + size += sysfs_emit_at(buf, size, "unavailable\n"); 778 + return size; 779 + } 776 780 777 781 dpm_context = smu_dpm->dpm_context; 778 782
+481
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
··· 44 44 #undef pr_info 45 45 #undef pr_debug 46 46 47 + /* unit: MHz */ 48 + #define CYAN_SKILLFISH_SCLK_MIN 1000 49 + #define CYAN_SKILLFISH_SCLK_MAX 2000 50 + #define CYAN_SKILLFISH_SCLK_DEFAULT 1800 51 + 52 + /* unit: mV */ 53 + #define CYAN_SKILLFISH_VDDC_MIN 700 54 + #define CYAN_SKILLFISH_VDDC_MAX 1129 55 + #define CYAN_SKILLFISH_VDDC_MAGIC 5118 // 0x13fe 56 + 57 + static struct gfx_user_settings { 58 + uint32_t sclk; 59 + uint32_t vddc; 60 + } cyan_skillfish_user_settings; 61 + 62 + #define FEATURE_MASK(feature) (1ULL << feature) 63 + #define SMC_DPM_FEATURE ( \ 64 + FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 65 + FEATURE_MASK(FEATURE_SOC_DPM_BIT) | \ 66 + FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 67 + 47 68 static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = { 48 69 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), 49 70 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), ··· 73 52 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverTableDramAddrLow, 0), 74 53 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), 75 54 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 55 + MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0), 56 + MSG_MAP(RequestGfxclk, PPSMC_MSG_RequestGfxclk, 0), 57 + MSG_MAP(ForceGfxVid, PPSMC_MSG_ForceGfxVid, 0), 58 + MSG_MAP(UnforceGfxVid, PPSMC_MSG_UnforceGfxVid, 0), 76 59 }; 60 + 61 + static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = { 62 + TAB_MAP_VALID(SMU_METRICS), 63 + }; 64 + 65 + static int cyan_skillfish_tables_init(struct smu_context *smu) 66 + { 67 + struct smu_table_context *smu_table = &smu->smu_table; 68 + struct smu_table *tables = smu_table->tables; 69 + 70 + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, 71 + sizeof(SmuMetrics_t), 72 + PAGE_SIZE, 73 + AMDGPU_GEM_DOMAIN_VRAM); 74 + 75 + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 76 + if (!smu_table->metrics_table) 77 + goto err0_out; 78 + 79 + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); 80 + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 81 + if (!smu_table->gpu_metrics_table) 82 + goto err1_out; 83 + 84 + smu_table->metrics_time = 0; 85 + 86 + return 0; 87 + 88 + err1_out: 89 + smu_table->gpu_metrics_table_size = 0; 90 + kfree(smu_table->metrics_table); 91 + err0_out: 92 + return -ENOMEM; 93 + } 94 + 95 + static int cyan_skillfish_init_smc_tables(struct smu_context *smu) 96 + { 97 + int ret = 0; 98 + 99 + ret = cyan_skillfish_tables_init(smu); 100 + if (ret) 101 + return ret; 102 + 103 + return smu_v11_0_init_smc_tables(smu); 104 + } 105 + 106 + static int cyan_skillfish_finit_smc_tables(struct smu_context *smu) 107 + { 108 + struct smu_table_context *smu_table = &smu->smu_table; 109 + 110 + kfree(smu_table->metrics_table); 111 + smu_table->metrics_table = NULL; 112 + 113 + kfree(smu_table->gpu_metrics_table); 114 + smu_table->gpu_metrics_table = NULL; 115 + smu_table->gpu_metrics_table_size = 0; 116 + 117 + smu_table->metrics_time = 0; 118 + 119 + return 0; 120 + } 121 + 122 + static int 123 + cyan_skillfish_get_smu_metrics_data(struct smu_context *smu, 124 + MetricsMember_t member, 125 + uint32_t *value) 126 + { 127 + struct smu_table_context *smu_table = &smu->smu_table; 128 + SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 129 + int ret = 0; 130 + 131 + mutex_lock(&smu->metrics_lock); 132 + 133 + ret = smu_cmn_get_metrics_table_locked(smu, NULL, false); 134 + if (ret) { 135 + mutex_unlock(&smu->metrics_lock); 136 + return ret; 137 + } 138 + 139 + switch (member) { 140 + case METRICS_CURR_GFXCLK: 141 + *value = metrics->Current.GfxclkFrequency; 142 + break; 143 + case METRICS_CURR_SOCCLK: 144 + *value = metrics->Current.SocclkFrequency; 145 + break; 146 + case METRICS_CURR_VCLK: 147 + *value = metrics->Current.VclkFrequency; 148 + break; 149 + case METRICS_CURR_DCLK: 150 + *value = metrics->Current.DclkFrequency; 151 + break; 152 + case METRICS_CURR_UCLK: 153 + *value = metrics->Current.MemclkFrequency; 154 + break; 155 + case METRICS_AVERAGE_SOCKETPOWER: 156 + *value = (metrics->Current.CurrentSocketPower << 8) / 157 + 1000; 158 + break; 159 + case METRICS_TEMPERATURE_EDGE: 160 + *value = metrics->Current.GfxTemperature / 100 * 161 + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 162 + break; 163 + case METRICS_TEMPERATURE_HOTSPOT: 164 + *value = metrics->Current.SocTemperature / 100 * 165 + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 166 + break; 167 + case METRICS_VOLTAGE_VDDSOC: 168 + *value = metrics->Current.Voltage[0]; 169 + break; 170 + case METRICS_VOLTAGE_VDDGFX: 171 + *value = metrics->Current.Voltage[1]; 172 + break; 173 + case METRICS_THROTTLER_STATUS: 174 + *value = metrics->Current.ThrottlerStatus; 175 + break; 176 + default: 177 + *value = UINT_MAX; 178 + break; 179 + } 180 + 181 + mutex_unlock(&smu->metrics_lock); 182 + 183 + return ret; 184 + } 185 + 186 + static int cyan_skillfish_read_sensor(struct smu_context *smu, 187 + enum amd_pp_sensors sensor, 188 + void *data, 189 + uint32_t *size) 190 + { 191 + int ret = 0; 192 + 193 + if (!data || !size) 194 + return -EINVAL; 195 + 196 + mutex_lock(&smu->sensor_lock); 197 + 198 + switch (sensor) { 199 + case AMDGPU_PP_SENSOR_GFX_SCLK: 200 + ret = cyan_skillfish_get_smu_metrics_data(smu, 201 + METRICS_CURR_GFXCLK, 202 + (uint32_t *)data); 203 + *(uint32_t *)data *= 100; 204 + *size = 4; 205 + break; 206 + case AMDGPU_PP_SENSOR_GFX_MCLK: 207 + ret = cyan_skillfish_get_smu_metrics_data(smu, 208 + METRICS_CURR_UCLK, 209 + (uint32_t *)data); 210 + *(uint32_t *)data *= 100; 211 + *size = 4; 212 + break; 213 + case AMDGPU_PP_SENSOR_GPU_POWER: 214 + ret = cyan_skillfish_get_smu_metrics_data(smu, 215 + METRICS_AVERAGE_SOCKETPOWER, 216 + (uint32_t *)data); 217 + *size = 4; 218 + break; 219 + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 220 + ret = cyan_skillfish_get_smu_metrics_data(smu, 221 + METRICS_TEMPERATURE_HOTSPOT, 222 + (uint32_t *)data); 223 + *size = 4; 224 + break; 225 + case AMDGPU_PP_SENSOR_EDGE_TEMP: 226 + ret = cyan_skillfish_get_smu_metrics_data(smu, 227 + METRICS_TEMPERATURE_EDGE, 228 + (uint32_t *)data); 229 + *size = 4; 230 + break; 231 + case AMDGPU_PP_SENSOR_VDDNB: 232 + ret = cyan_skillfish_get_smu_metrics_data(smu, 233 + METRICS_VOLTAGE_VDDSOC, 234 + (uint32_t *)data); 235 + *size = 4; 236 + break; 237 + case AMDGPU_PP_SENSOR_VDDGFX: 238 + ret = cyan_skillfish_get_smu_metrics_data(smu, 239 + METRICS_VOLTAGE_VDDGFX, 240 + (uint32_t *)data); 241 + *size = 4; 242 + break; 243 + default: 244 + ret = -EOPNOTSUPP; 245 + break; 246 + } 247 + 248 + mutex_unlock(&smu->sensor_lock); 249 + 250 + return ret; 251 + } 252 + 253 + static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu, 254 + enum smu_clk_type clk_type, 255 + uint32_t *value) 256 + { 257 + MetricsMember_t member_type; 258 + 259 + switch (clk_type) { 260 + case SMU_GFXCLK: 261 + case SMU_SCLK: 262 + member_type = METRICS_CURR_GFXCLK; 263 + break; 264 + case SMU_FCLK: 265 + case SMU_MCLK: 266 + member_type = METRICS_CURR_UCLK; 267 + break; 268 + case SMU_SOCCLK: 269 + member_type = METRICS_CURR_SOCCLK; 270 + break; 271 + case SMU_VCLK: 272 + member_type = METRICS_CURR_VCLK; 273 + break; 274 + case SMU_DCLK: 275 + member_type = METRICS_CURR_DCLK; 276 + break; 277 + default: 278 + return -EINVAL; 279 + } 280 + 281 + return cyan_skillfish_get_smu_metrics_data(smu, member_type, value); 282 + } 283 + 284 + static int cyan_skillfish_print_clk_levels(struct smu_context *smu, 285 + enum smu_clk_type clk_type, 286 + char *buf) 287 + { 288 + int ret = 0, size = 0; 289 + uint32_t cur_value = 0; 290 + 291 + smu_cmn_get_sysfs_buf(&buf, &size); 292 + 293 + switch (clk_type) { 294 + case SMU_OD_SCLK: 295 + ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value); 296 + if (ret) 297 + return ret; 298 + size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK"); 299 + size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); 300 + break; 301 + case SMU_OD_VDDC_CURVE: 302 + ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value); 303 + if (ret) 304 + return ret; 305 + size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC"); 306 + size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value); 307 + break; 308 + case SMU_OD_RANGE: 309 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 310 + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 311 + CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); 312 + size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n", 313 + CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); 314 + break; 315 + case SMU_GFXCLK: 316 + case SMU_SCLK: 317 + case SMU_FCLK: 318 + case SMU_MCLK: 319 + case SMU_SOCCLK: 320 + case SMU_VCLK: 321 + case SMU_DCLK: 322 + ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value); 323 + if (ret) 324 + return ret; 325 + size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); 326 + break; 327 + default: 328 + dev_warn(smu->adev->dev, "Unsupported clock type\n"); 329 + return ret; 330 + } 331 + 332 + return size; 333 + } 334 + 335 + static bool cyan_skillfish_is_dpm_running(struct smu_context *smu) 336 + { 337 + struct amdgpu_device *adev = smu->adev; 338 + int ret = 0; 339 + uint32_t feature_mask[2]; 340 + uint64_t feature_enabled; 341 + 342 + /* we need to re-init after suspend so return false */ 343 + if (adev->in_suspend) 344 + return false; 345 + 346 + ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); 347 + 348 + if (ret) 349 + return false; 350 + 351 + feature_enabled = (uint64_t)feature_mask[0] | 352 + ((uint64_t)feature_mask[1] << 32); 353 + 354 + return !!(feature_enabled & SMC_DPM_FEATURE); 355 + } 356 + 357 + static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu, 358 + void **table) 359 + { 360 + struct smu_table_context *smu_table = &smu->smu_table; 361 + struct gpu_metrics_v2_2 *gpu_metrics = 362 + (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 363 + SmuMetrics_t metrics; 364 + int i, ret = 0; 365 + 366 + ret = smu_cmn_get_metrics_table(smu, &metrics, true); 367 + if (ret) 368 + return ret; 369 + 370 + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 371 + 372 + gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 373 + gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 374 + 375 + gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; 376 + gpu_metrics->average_soc_power = metrics.Current.Power[0]; 377 + gpu_metrics->average_gfx_power = metrics.Current.Power[1]; 378 + 379 + gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 380 + gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 381 + gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 382 + gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 383 + gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 384 + gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 385 + 386 + gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 387 + gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 388 + gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 389 + gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 390 + gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 391 + gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 392 + 393 + for (i = 0; i < 6; i++) { 394 + gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i]; 395 + gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i]; 396 + gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i]; 397 + } 398 + 399 + for (i = 0; i < 2; i++) { 400 + gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i]; 401 + gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i]; 402 + } 403 + 404 + gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 405 + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 406 + 407 + *table = (void *)gpu_metrics; 408 + 409 + return sizeof(struct gpu_metrics_v2_2); 410 + } 411 + 412 + static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu, 413 + enum PP_OD_DPM_TABLE_COMMAND type, 414 + long input[], uint32_t size) 415 + { 416 + int ret = 0; 417 + uint32_t vid; 418 + 419 + switch (type) { 420 + case PP_OD_EDIT_VDDC_CURVE: 421 + if (size != 3 || input[0] != 0) { 422 + dev_err(smu->adev->dev, "Invalid parameter!\n"); 423 + return -EINVAL; 424 + } 425 + 426 + if (input[1] <= CYAN_SKILLFISH_SCLK_MIN || 427 + input[1] > CYAN_SKILLFISH_SCLK_MAX) { 428 + dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n", 429 + CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); 430 + return -EINVAL; 431 + } 432 + 433 + if (input[2] <= CYAN_SKILLFISH_VDDC_MIN || 434 + input[2] > CYAN_SKILLFISH_VDDC_MAX) { 435 + dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n", 436 + CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); 437 + return -EINVAL; 438 + } 439 + 440 + cyan_skillfish_user_settings.sclk = input[1]; 441 + cyan_skillfish_user_settings.vddc = input[2]; 442 + 443 + break; 444 + case PP_OD_RESTORE_DEFAULT_TABLE: 445 + if (size != 0) { 446 + dev_err(smu->adev->dev, "Invalid parameter!\n"); 447 + return -EINVAL; 448 + } 449 + 450 + cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT; 451 + cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC; 452 + 453 + break; 454 + case PP_OD_COMMIT_DPM_TABLE: 455 + if (size != 0) { 456 + dev_err(smu->adev->dev, "Invalid parameter!\n"); 457 + return -EINVAL; 458 + } 459 + 460 + if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN || 461 + cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) { 462 + dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n", 463 + CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); 464 + return -EINVAL; 465 + } 466 + 467 + if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) && 468 + (cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN || 469 + cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) { 470 + dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n", 471 + CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); 472 + return -EINVAL; 473 + } 474 + 475 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk, 476 + cyan_skillfish_user_settings.sclk, NULL); 477 + if (ret) { 478 + dev_err(smu->adev->dev, "Set sclk failed!\n"); 479 + return ret; 480 + } 481 + 482 + if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) { 483 + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL); 484 + if (ret) { 485 + dev_err(smu->adev->dev, "Unforce vddc failed!\n"); 486 + return ret; 487 + } 488 + } else { 489 + /* 490 + * PMFW accepts SVI2 VID code, convert voltage to VID: 491 + * vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001) 492 + */ 493 + vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000; 494 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL); 495 + if (ret) { 496 + dev_err(smu->adev->dev, "Force vddc failed!\n"); 497 + return ret; 498 + } 499 + } 500 + 501 + break; 502 + default: 503 + return -EOPNOTSUPP; 504 + } 505 + 506 + return ret; 507 + } 77 508 78 509 static const struct pptable_funcs cyan_skillfish_ppt_funcs = { 79 510 ··· 533 60 .check_fw_version = smu_v11_0_check_fw_version, 534 61 .init_power = smu_v11_0_init_power, 535 62 .fini_power = smu_v11_0_fini_power, 63 + .init_smc_tables = cyan_skillfish_init_smc_tables, 64 + .fini_smc_tables = cyan_skillfish_finit_smc_tables, 65 + .read_sensor = cyan_skillfish_read_sensor, 66 + .print_clk_levels = cyan_skillfish_print_clk_levels, 67 + .is_dpm_running = cyan_skillfish_is_dpm_running, 68 + .get_gpu_metrics = cyan_skillfish_get_gpu_metrics, 69 + .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table, 536 70 .register_irq_handler = smu_v11_0_register_irq_handler, 537 71 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, 538 72 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, ··· 552 72 { 553 73 smu->ppt_funcs = &cyan_skillfish_ppt_funcs; 554 74 smu->message_map = cyan_skillfish_message_map; 75 + smu->table_map = cyan_skillfish_table_map; 555 76 smu->is_apu = true; 556 77 }
+25 -3
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
··· 1279 1279 struct smu_11_0_overdrive_table *od_settings = smu->od_settings; 1280 1280 uint32_t min_value, max_value; 1281 1281 1282 + smu_cmn_get_sysfs_buf(&buf, &size); 1283 + 1282 1284 switch (clk_type) { 1283 1285 case SMU_GFXCLK: 1284 1286 case SMU_SCLK: ··· 1394 1392 case SMU_OD_RANGE: 1395 1393 if (!smu->od_enabled || !od_table || !od_settings) 1396 1394 break; 1397 - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); 1395 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1398 1396 1399 1397 if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) { 1400 1398 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN, ··· 2274 2272 { 2275 2273 struct amdgpu_device *adev = smu->adev; 2276 2274 2277 - if (adev->in_runpm) 2275 + /* 2276 + * This aims the case below: 2277 + * amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded 2278 + * 2279 + * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To 2280 + * make that possible, PMFW needs to acknowledge the dstate transition 2281 + * process for both gfx(function 0) and audio(function 1) function of 2282 + * the ASIC. 2283 + * 2284 + * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the 2285 + * device representing the audio function of the ASIC. And that means 2286 + * even if the sound driver(snd_hda_intel) was not loaded yet, it's still 2287 + * possible runpm suspend kicked on the ASIC. However without the dstate 2288 + * transition notification from audio function, pmfw cannot handle the 2289 + * BACO in/exit correctly. And that will cause driver hang on runpm 2290 + * resuming. 2291 + * 2292 + * To address this, we revert to legacy message way(driver masters the 2293 + * timing for BACO in/exit) on sound driver missing. 2294 + */ 2295 + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) 2278 2296 return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); 2279 2297 else 2280 2298 return smu_v11_0_baco_enter(smu); ··· 2304 2282 { 2305 2283 struct amdgpu_device *adev = smu->adev; 2306 2284 2307 - if (adev->in_runpm) { 2285 + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 2308 2286 /* Wait for PMFW handling for the Dstate change */ 2309 2287 msleep(10); 2310 2288 return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+5 -3
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
··· 1058 1058 uint32_t min_value, max_value; 1059 1059 uint32_t smu_version; 1060 1060 1061 + smu_cmn_get_sysfs_buf(&buf, &size); 1062 + 1061 1063 switch (clk_type) { 1062 1064 case SMU_GFXCLK: 1063 1065 case SMU_SCLK: ··· 1182 1180 if (!smu->od_enabled || !od_table || !od_settings) 1183 1181 break; 1184 1182 1185 - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); 1183 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1186 1184 1187 1185 if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) { 1188 1186 sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN, ··· 2189 2187 { 2190 2188 struct amdgpu_device *adev = smu->adev; 2191 2189 2192 - if (adev->in_runpm) 2190 + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) 2193 2191 return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); 2194 2192 else 2195 2193 return smu_v11_0_baco_enter(smu); ··· 2199 2197 { 2200 2198 struct amdgpu_device *adev = smu->adev; 2201 2199 2202 - if (adev->in_runpm) { 2200 + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 2203 2201 /* Wait for PMFW handling for the Dstate change */ 2204 2202 msleep(10); 2205 2203 return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+10 -6
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 589 589 if (ret) 590 590 return ret; 591 591 592 + smu_cmn_get_sysfs_buf(&buf, &size); 593 + 592 594 switch (clk_type) { 593 595 case SMU_OD_SCLK: 594 596 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 595 - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); 597 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 596 598 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 597 599 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 598 600 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", ··· 603 601 break; 604 602 case SMU_OD_CCLK: 605 603 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 606 - size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 604 + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 607 605 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 608 606 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 609 607 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", ··· 612 610 break; 613 611 case SMU_OD_RANGE: 614 612 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 615 - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); 613 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 616 614 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 617 615 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 618 616 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", ··· 690 688 if (ret) 691 689 return ret; 692 690 691 + smu_cmn_get_sysfs_buf(&buf, &size); 692 + 693 693 switch (clk_type) { 694 694 case SMU_OD_SCLK: 695 695 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 696 - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); 696 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 697 697 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 698 698 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 699 699 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", ··· 704 700 break; 705 701 case SMU_OD_CCLK: 706 702 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 707 - size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 703 + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 708 704 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 709 705 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 710 706 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", ··· 713 709 break; 714 710 case SMU_OD_RANGE: 715 711 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 716 - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); 712 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 717 713 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 718 714 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 719 715 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
+2
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
··· 497 497 if (ret) 498 498 return ret; 499 499 500 + smu_cmn_get_sysfs_buf(&buf, &size); 501 + 500 502 switch (clk_type) { 501 503 case SMU_OD_RANGE: 502 504 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+8 -4
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
··· 733 733 uint32_t freq_values[3] = {0}; 734 734 uint32_t min_clk, max_clk; 735 735 736 - if (amdgpu_ras_intr_triggered()) 737 - return sysfs_emit(buf, "unavailable\n"); 736 + smu_cmn_get_sysfs_buf(&buf, &size); 737 + 738 + if (amdgpu_ras_intr_triggered()) { 739 + size += sysfs_emit_at(buf, size, "unavailable\n"); 740 + return size; 741 + } 738 742 739 743 dpm_context = smu_dpm->dpm_context; 740 744 741 745 switch (type) { 742 746 743 747 case SMU_OD_SCLK: 744 - size = sysfs_emit(buf, "%s:\n", "GFXCLK"); 748 + size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK"); 745 749 fallthrough; 746 750 case SMU_SCLK: 747 751 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); ··· 799 795 break; 800 796 801 797 case SMU_OD_MCLK: 802 - size = sysfs_emit(buf, "%s:\n", "MCLK"); 798 + size += sysfs_emit_at(buf, size, "%s:\n", "MCLK"); 803 799 fallthrough; 804 800 case SMU_MCLK: 805 801 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
+4 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
··· 1052 1052 int i, size = 0, ret = 0; 1053 1053 uint32_t cur_value = 0, value = 0, count = 0; 1054 1054 1055 + smu_cmn_get_sysfs_buf(&buf, &size); 1056 + 1055 1057 switch (clk_type) { 1056 1058 case SMU_OD_SCLK: 1057 - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); 1059 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 1058 1060 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 1059 1061 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 1060 1062 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 1061 1063 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 1062 1064 break; 1063 1065 case SMU_OD_RANGE: 1064 - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); 1066 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1065 1067 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 1066 1068 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 1067 1069 break;
+21
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
··· 1053 1053 1054 1054 return ret; 1055 1055 } 1056 + 1057 + bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) 1058 + { 1059 + struct pci_dev *p = NULL; 1060 + bool snd_driver_loaded; 1061 + 1062 + /* 1063 + * If the ASIC comes with no audio function, we always assume 1064 + * it is "enabled". 1065 + */ 1066 + p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 1067 + adev->pdev->bus->number, 1); 1068 + if (!p) 1069 + return true; 1070 + 1071 + snd_driver_loaded = pci_is_enabled(p) ? true : false; 1072 + 1073 + pci_dev_put(p); 1074 + 1075 + return snd_driver_loaded; 1076 + }
+15
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
··· 110 110 int smu_cmn_set_mp1_state(struct smu_context *smu, 111 111 enum pp_mp1_state mp1_state); 112 112 113 + /* 114 + * Helper function to make sysfs_emit_at() happy. Align buf to 115 + * the current page boundary and record the offset. 116 + */ 117 + static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset) 118 + { 119 + if (!*buf || !offset) 120 + return; 121 + 122 + *offset = offset_in_page(*buf); 123 + *buf -= *offset; 124 + } 125 + 126 + bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev); 127 + 113 128 #endif 114 129 #endif
+1 -2
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
··· 397 397 if (switch_mmu_context) { 398 398 struct etnaviv_iommu_context *old_context = gpu->mmu_context; 399 399 400 - etnaviv_iommu_context_get(mmu_context); 401 - gpu->mmu_context = mmu_context; 400 + gpu->mmu_context = etnaviv_iommu_context_get(mmu_context); 402 401 etnaviv_iommu_context_put(old_context); 403 402 } 404 403
+1 -2
drivers/gpu/drm/etnaviv/etnaviv_gem.c
··· 294 294 list_del(&mapping->obj_node); 295 295 } 296 296 297 - etnaviv_iommu_context_get(mmu_context); 298 - mapping->context = mmu_context; 297 + mapping->context = etnaviv_iommu_context_get(mmu_context); 299 298 mapping->use = 1; 300 299 301 300 ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
+1 -2
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
··· 532 532 goto err_submit_objects; 533 533 534 534 submit->ctx = file->driver_priv; 535 - etnaviv_iommu_context_get(submit->ctx->mmu); 536 - submit->mmu_context = submit->ctx->mmu; 535 + submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu); 537 536 submit->exec_state = args->exec_state; 538 537 submit->flags = args->flags; 539 538
+25 -18
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
··· 569 569 /* We rely on the GPU running, so program the clock */ 570 570 etnaviv_gpu_update_clock(gpu); 571 571 572 + gpu->fe_running = false; 573 + gpu->exec_state = -1; 574 + if (gpu->mmu_context) 575 + etnaviv_iommu_context_put(gpu->mmu_context); 576 + gpu->mmu_context = NULL; 577 + 572 578 return 0; 573 579 } 574 580 ··· 643 637 VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE | 644 638 VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch)); 645 639 } 640 + 641 + gpu->fe_running = true; 646 642 } 647 643 648 - static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu) 644 + static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu, 645 + struct etnaviv_iommu_context *context) 649 646 { 650 - u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer, 651 - &gpu->mmu_context->cmdbuf_mapping); 652 647 u16 prefetch; 648 + u32 address; 653 649 654 650 /* setup the MMU */ 655 - etnaviv_iommu_restore(gpu, gpu->mmu_context); 651 + etnaviv_iommu_restore(gpu, context); 656 652 657 653 /* Start command processor */ 658 654 prefetch = etnaviv_buffer_init(gpu); 655 + address = etnaviv_cmdbuf_get_va(&gpu->buffer, 656 + &gpu->mmu_context->cmdbuf_mapping); 659 657 660 658 etnaviv_gpu_start_fe(gpu, address, prefetch); 661 659 } ··· 842 832 /* Now program the hardware */ 843 833 mutex_lock(&gpu->lock); 844 834 etnaviv_gpu_hw_init(gpu); 845 - gpu->exec_state = -1; 846 835 mutex_unlock(&gpu->lock); 847 836 848 837 pm_runtime_mark_last_busy(gpu->dev); ··· 1066 1057 spin_unlock(&gpu->event_spinlock); 1067 1058 1068 1059 etnaviv_gpu_hw_init(gpu); 1069 - gpu->exec_state = -1; 1070 - gpu->mmu_context = NULL; 1071 1060 1072 1061 mutex_unlock(&gpu->lock); 1073 1062 pm_runtime_mark_last_busy(gpu->dev); ··· 1377 1370 goto out_unlock; 1378 1371 } 1379 1372 1380 - if (!gpu->mmu_context) { 1381 - etnaviv_iommu_context_get(submit->mmu_context); 1382 - gpu->mmu_context = submit->mmu_context; 1383 - etnaviv_gpu_start_fe_idleloop(gpu); 1384 - } else { 1385 - etnaviv_iommu_context_get(gpu->mmu_context); 1386 - submit->prev_mmu_context = gpu->mmu_context; 1387 - } 1373 + if (!gpu->fe_running) 1374 + etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context); 1375 + 1376 + if (submit->prev_mmu_context) 1377 + etnaviv_iommu_context_put(submit->prev_mmu_context); 1378 + submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context); 1388 1379 1389 1380 if (submit->nr_pmrs) { 1390 1381 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; ··· 1584 1579 1585 1580 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) 1586 1581 { 1587 - if (gpu->initialized && gpu->mmu_context) { 1582 + if (gpu->initialized && gpu->fe_running) { 1588 1583 /* Replace the last WAIT with END */ 1589 1584 mutex_lock(&gpu->lock); 1590 1585 etnaviv_buffer_end(gpu); ··· 1597 1592 */ 1598 1593 etnaviv_gpu_wait_idle(gpu, 100); 1599 1594 1600 - etnaviv_iommu_context_put(gpu->mmu_context); 1601 - gpu->mmu_context = NULL; 1595 + gpu->fe_running = false; 1602 1596 } 1603 1597 1604 1598 gpu->exec_state = -1; ··· 1744 1740 #else 1745 1741 etnaviv_gpu_hw_suspend(gpu); 1746 1742 #endif 1743 + 1744 + if (gpu->mmu_context) 1745 + etnaviv_iommu_context_put(gpu->mmu_context); 1747 1746 1748 1747 if (gpu->initialized) { 1749 1748 etnaviv_cmdbuf_free(&gpu->buffer);
+1
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
··· 101 101 struct workqueue_struct *wq; 102 102 struct drm_gpu_scheduler sched; 103 103 bool initialized; 104 + bool fe_running; 104 105 105 106 /* 'ring'-buffer: */ 106 107 struct etnaviv_cmdbuf buffer;
+4
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
··· 92 92 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); 93 93 u32 pgtable; 94 94 95 + if (gpu->mmu_context) 96 + etnaviv_iommu_context_put(gpu->mmu_context); 97 + gpu->mmu_context = etnaviv_iommu_context_get(context); 98 + 95 99 /* set base addresses */ 96 100 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base); 97 101 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
+8
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
··· 172 172 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) 173 173 return; 174 174 175 + if (gpu->mmu_context) 176 + etnaviv_iommu_context_put(gpu->mmu_context); 177 + gpu->mmu_context = etnaviv_iommu_context_get(context); 178 + 175 179 prefetch = etnaviv_buffer_config_mmuv2(gpu, 176 180 (u32)v2_context->mtlb_dma, 177 181 (u32)context->global->bad_page_dma); ··· 195 191 /* If the MMU is already enabled the state is still there. */ 196 192 if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE) 197 193 return; 194 + 195 + if (gpu->mmu_context) 196 + etnaviv_iommu_context_put(gpu->mmu_context); 197 + gpu->mmu_context = etnaviv_iommu_context_get(context); 198 198 199 199 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW, 200 200 lower_32_bits(context->global->v2.pta_dma));
+1
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
··· 199 199 */ 200 200 list_for_each_entry_safe(m, n, &list, scan_node) { 201 201 etnaviv_iommu_remove_mapping(context, m); 202 + etnaviv_iommu_context_put(m->context); 202 203 m->context = NULL; 203 204 list_del_init(&m->mmu_node); 204 205 list_del_init(&m->scan_node);
+3 -1
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
··· 105 105 struct etnaviv_iommu_context * 106 106 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global, 107 107 struct etnaviv_cmdbuf_suballoc *suballoc); 108 - static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) 108 + static inline struct etnaviv_iommu_context * 109 + etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) 109 110 { 110 111 kref_get(&ctx->refcount); 112 + return ctx; 111 113 } 112 114 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx); 113 115 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
-1
drivers/gpu/drm/i915/Makefile
··· 19 19 subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) 20 20 # clang warnings 21 21 subdir-ccflags-y += $(call cc-disable-warning, sign-compare) 22 - subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized) 23 22 subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides) 24 23 subdir-ccflags-y += $(call cc-disable-warning, frame-address) 25 24 subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
+4 -1
drivers/gpu/drm/i915/display/intel_dp.c
··· 2445 2445 */ 2446 2446 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 2447 2447 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 2448 - sizeof(intel_dp->edp_dpcd)) 2448 + sizeof(intel_dp->edp_dpcd)) { 2449 2449 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 2450 2450 (int)sizeof(intel_dp->edp_dpcd), 2451 2451 intel_dp->edp_dpcd); 2452 + 2453 + intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; 2454 + } 2452 2455 2453 2456 /* 2454 2457 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
+3 -3
drivers/gpu/drm/i915/gem/i915_gem_context.c
··· 986 986 trace_i915_context_free(ctx); 987 987 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 988 988 989 + if (ctx->syncobj) 990 + drm_syncobj_put(ctx->syncobj); 991 + 989 992 mutex_destroy(&ctx->engines_mutex); 990 993 mutex_destroy(&ctx->lut_mutex); 991 994 ··· 1207 1204 vm = i915_gem_context_vm(ctx); 1208 1205 if (vm) 1209 1206 i915_vm_close(vm); 1210 - 1211 - if (ctx->syncobj) 1212 - drm_syncobj_put(ctx->syncobj); 1213 1207 1214 1208 ctx->file_priv = ERR_PTR(-EBADF); 1215 1209
+4 -3
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
··· 59 59 err = PTR_ERR(import); 60 60 goto out_dmabuf; 61 61 } 62 + import_obj = to_intel_bo(import); 62 63 63 64 if (import != &obj->base) { 64 65 pr_err("i915_gem_prime_import created a new object!\n"); 65 66 err = -EINVAL; 66 67 goto out_import; 67 68 } 68 - import_obj = to_intel_bo(import); 69 69 70 70 i915_gem_object_lock(import_obj, NULL); 71 71 err = __i915_gem_object_get_pages(import_obj); ··· 128 128 pr_err("i915_gem_prime_import failed with the wrong err=%ld\n", 129 129 PTR_ERR(import)); 130 130 err = PTR_ERR(import); 131 + } else { 132 + err = 0; 131 133 } 132 134 133 135 dma_buf_put(dmabuf); ··· 178 176 err = PTR_ERR(import); 179 177 goto out_dmabuf; 180 178 } 179 + import_obj = to_intel_bo(import); 181 180 182 181 if (import == &obj->base) { 183 182 pr_err("i915_gem_prime_import reused gem object!\n"); 184 183 err = -EINVAL; 185 184 goto out_import; 186 185 } 187 - 188 - import_obj = to_intel_bo(import); 189 186 190 187 i915_gem_object_lock(import_obj, NULL); 191 188 err = __i915_gem_object_get_pages(import_obj);
+20 -6
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
··· 581 581 return I915_MMAP_TYPE_GTT; 582 582 } 583 583 584 + static struct drm_i915_gem_object * 585 + create_sys_or_internal(struct drm_i915_private *i915, 586 + unsigned long size) 587 + { 588 + if (HAS_LMEM(i915)) { 589 + struct intel_memory_region *sys_region = 590 + i915->mm.regions[INTEL_REGION_SMEM]; 591 + 592 + return __i915_gem_object_create_user(i915, size, &sys_region, 1); 593 + } 594 + 595 + return i915_gem_object_create_internal(i915, size); 596 + } 597 + 584 598 static bool assert_mmap_offset(struct drm_i915_private *i915, 585 599 unsigned long size, 586 600 int expected) ··· 603 589 u64 offset; 604 590 int ret; 605 591 606 - obj = i915_gem_object_create_internal(i915, size); 592 + obj = create_sys_or_internal(i915, size); 607 593 if (IS_ERR(obj)) 608 594 return expected && expected == PTR_ERR(obj); 609 595 ··· 647 633 struct drm_mm_node *hole, *next; 648 634 int loop, err = 0; 649 635 u64 offset; 636 + int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC; 650 637 651 638 /* Disable background reaper */ 652 639 disable_retire_worker(i915); ··· 698 683 } 699 684 700 685 /* Too large */ 701 - if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) { 686 + if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) { 702 687 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); 703 688 err = -EINVAL; 704 689 goto out; 705 690 } 706 691 707 692 /* Fill the hole, further allocation attempts should then fail */ 708 - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 693 + obj = create_sys_or_internal(i915, PAGE_SIZE); 709 694 if (IS_ERR(obj)) { 710 695 err = PTR_ERR(obj); 711 696 pr_err("Unable to create object for reclaimed hole\n"); ··· 718 703 goto err_obj; 719 704 } 720 705 721 - if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) { 706 + if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) { 722 707 pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); 723 708 err = -EINVAL; 724 709 goto err_obj; ··· 854 839 855 840 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) 856 841 { 857 - struct drm_i915_private *i915 = to_i915(obj->base.dev); 858 842 bool no_map; 859 843 860 - if (HAS_LMEM(i915)) 844 + if (obj->ops->mmap_offset) 861 845 return type == I915_MMAP_TYPE_FIXED; 862 846 else if (type == I915_MMAP_TYPE_FIXED) 863 847 return false;
+7 -1
drivers/gpu/drm/i915/gt/intel_rps.c
··· 1973 1973 u32 intel_rps_read_punit_req(struct intel_rps *rps) 1974 1974 { 1975 1975 struct intel_uncore *uncore = rps_to_uncore(rps); 1976 + struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; 1977 + intel_wakeref_t wakeref; 1978 + u32 freq = 0; 1976 1979 1977 - return intel_uncore_read(uncore, GEN6_RPNSWREQ); 1980 + with_intel_runtime_pm_if_in_use(rpm, wakeref) 1981 + freq = intel_uncore_read(uncore, GEN6_RPNSWREQ); 1982 + 1983 + return freq; 1978 1984 } 1979 1985 1980 1986 static u32 intel_rps_get_req(u32 pureq)
+3 -8
drivers/gpu/drm/i915/gt/uc/intel_uc.c
··· 172 172 __uc_free_load_err_log(uc); 173 173 } 174 174 175 - static inline bool guc_communication_enabled(struct intel_guc *guc) 176 - { 177 - return intel_guc_ct_enabled(&guc->ct); 178 - } 179 - 180 175 /* 181 176 * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 182 177 * register using the same bits used in the CT message payload. Since our ··· 205 210 static void guc_handle_mmio_msg(struct intel_guc *guc) 206 211 { 207 212 /* we need communication to be enabled to reply to GuC */ 208 - GEM_BUG_ON(!guc_communication_enabled(guc)); 213 + GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct)); 209 214 210 215 spin_lock_irq(&guc->irq_lock); 211 216 if (guc->mmio_msg) { ··· 221 226 struct drm_i915_private *i915 = gt->i915; 222 227 int ret; 223 228 224 - GEM_BUG_ON(guc_communication_enabled(guc)); 229 + GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct)); 225 230 226 231 ret = i915_inject_probe_error(i915, -ENXIO); 227 232 if (ret) ··· 657 662 return 0; 658 663 659 664 /* Make sure we enable communication if and only if it's disabled */ 660 - GEM_BUG_ON(enable_communication == guc_communication_enabled(guc)); 665 + GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct)); 661 666 662 667 if (enable_communication) 663 668 guc_enable_communication(guc);
+1 -1
drivers/gpu/drm/radeon/radeon_kms.c
··· 119 119 #endif 120 120 121 121 if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) 122 - rdev->agp = radeon_agp_head_init(rdev->ddev); 122 + rdev->agp = radeon_agp_head_init(dev); 123 123 if (rdev->agp) { 124 124 rdev->agp->agp_mtrr = arch_phys_wc_add( 125 125 rdev->agp->agp_info.aper_base,
+1 -1
drivers/gpu/drm/rockchip/cdn-dp-core.c
··· 1123 1123 return ret; 1124 1124 } 1125 1125 1126 - static int cdn_dp_resume(struct device *dev) 1126 + static __maybe_unused int cdn_dp_resume(struct device *dev) 1127 1127 { 1128 1128 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1129 1129
+2 -1
drivers/gpu/drm/ttm/ttm_pool.c
··· 383 383 else 384 384 gfp_flags |= GFP_HIGHUSER; 385 385 386 - for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages; 386 + for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages)); 387 + num_pages; 387 388 order = min_t(unsigned int, order, __fls(num_pages))) { 388 389 bool apply_caching = false; 389 390 struct ttm_pool_type *pt;
+23 -61
drivers/gpu/drm/vc4/vc4_hdmi.c
··· 167 167 struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); 168 168 bool connected = false; 169 169 170 - WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); 171 - 172 170 if (vc4_hdmi->hpd_gpio && 173 171 gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) { 174 172 connected = true; ··· 187 189 } 188 190 } 189 191 190 - pm_runtime_put(&vc4_hdmi->pdev->dev); 191 192 return connector_status_connected; 192 193 } 193 194 194 195 cec_phys_addr_invalidate(vc4_hdmi->cec_adap); 195 - pm_runtime_put(&vc4_hdmi->pdev->dev); 196 196 return connector_status_disconnected; 197 197 } 198 198 ··· 432 436 struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); 433 437 struct drm_connector *connector = &vc4_hdmi->connector; 434 438 struct drm_connector_state *cstate = connector->state; 435 - struct drm_crtc *crtc = cstate->crtc; 439 + struct drm_crtc *crtc = encoder->crtc; 436 440 const struct drm_display_mode *mode = &crtc->state->adjusted_mode; 437 441 union hdmi_infoframe frame; 438 442 int ret; ··· 537 541 538 542 static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) 539 543 { 544 + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; 540 545 struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); 541 - struct drm_connector *connector = &vc4_hdmi->connector; 542 - struct drm_connector_state *cstate = connector->state; 543 - struct drm_crtc *crtc = cstate->crtc; 544 - struct drm_display_mode *mode = &crtc->state->adjusted_mode; 545 546 546 547 if (!vc4_hdmi_supports_scrambling(encoder, mode)) 547 548 return; ··· 559 566 static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder) 560 567 { 561 568 struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); 562 - struct drm_connector *connector = &vc4_hdmi->connector; 563 - struct drm_connector_state *cstate = connector->state; 569 + struct drm_crtc *crtc = encoder->crtc; 564 570 565 571 /* 566 - * At boot, connector->state will be NULL. Since we don't know the 572 + * At boot, encoder->crtc will be NULL. Since we don't know the 567 573 * state of the scrambler and in order to avoid any 568 574 * inconsistency, let's disable it all the time. 569 575 */ 570 - if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode)) 576 + if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode)) 571 577 return; 572 578 573 - if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode)) 579 + if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode)) 574 580 return; 575 581 576 582 if (delayed_work_pending(&vc4_hdmi->scrambling_work)) ··· 627 635 vc4_hdmi->variant->phy_disable(vc4_hdmi); 628 636 629 637 clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock); 638 + clk_disable_unprepare(vc4_hdmi->hsm_clock); 630 639 clk_disable_unprepare(vc4_hdmi->pixel_clock); 631 640 632 641 ret = pm_runtime_put(&vc4_hdmi->pdev->dev); ··· 891 898 vc4_hdmi_encoder_get_connector_state(encoder, state); 892 899 struct vc4_hdmi_connector_state *vc4_conn_state = 893 900 conn_state_to_vc4_hdmi_conn_state(conn_state); 894 - struct drm_crtc_state *crtc_state = 895 - drm_atomic_get_new_crtc_state(state, conn_state->crtc); 896 - struct drm_display_mode *mode = &crtc_state->adjusted_mode; 901 + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; 897 902 struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); 898 903 unsigned long bvb_rate, pixel_rate, hsm_rate; 899 904 int ret; ··· 938 947 return; 939 948 } 940 949 950 + ret = clk_prepare_enable(vc4_hdmi->hsm_clock); 951 + if (ret) { 952 + DRM_ERROR("Failed to turn on HSM clock: %d\n", ret); 953 + clk_disable_unprepare(vc4_hdmi->pixel_clock); 954 + return; 955 + } 956 + 941 957 vc4_hdmi_cec_update_clk_div(vc4_hdmi); 942 958 943 959 if (pixel_rate > 297000000) ··· 957 959 ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate); 958 960 if (ret) { 959 961 DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret); 962 + clk_disable_unprepare(vc4_hdmi->hsm_clock); 960 963 clk_disable_unprepare(vc4_hdmi->pixel_clock); 961 964 return; 962 965 } ··· 965 966 ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock); 966 967 if (ret) { 967 968 DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret); 969 + clk_disable_unprepare(vc4_hdmi->hsm_clock); 968 970 clk_disable_unprepare(vc4_hdmi->pixel_clock); 969 971 return; 970 972 } ··· 985 985 static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, 986 986 struct drm_atomic_state *state) 987 987 { 988 - struct drm_connector_state *conn_state = 989 - vc4_hdmi_encoder_get_connector_state(encoder, state); 990 - struct drm_crtc_state *crtc_state = 991 - drm_atomic_get_new_crtc_state(state, conn_state->crtc); 992 - struct drm_display_mode *mode = &crtc_state->adjusted_mode; 988 + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; 993 989 struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); 994 990 struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); 995 991 ··· 1008 1012 static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, 1009 1013 struct drm_atomic_state *state) 1010 1014 { 1011 - struct drm_connector_state *conn_state = 1012 - vc4_hdmi_encoder_get_connector_state(encoder, state); 1013 - struct drm_crtc_state *crtc_state = 1014 - drm_atomic_get_new_crtc_state(state, conn_state->crtc); 1015 - struct drm_display_mode *mode = &crtc_state->adjusted_mode; 1015 + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; 1016 1016 struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); 1017 1017 struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); 1018 1018 bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; ··· 1196 1204 1197 1205 static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate) 1198 1206 { 1199 - struct drm_connector *connector = &vc4_hdmi->connector; 1200 - struct drm_crtc *crtc = connector->state->crtc; 1207 + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; 1208 + struct drm_crtc *crtc = encoder->crtc; 1201 1209 const struct drm_display_mode *mode = &crtc->state->adjusted_mode; 1202 1210 u32 n, cts; 1203 1211 u64 tmp; ··· 1230 1238 static int vc4_hdmi_audio_startup(struct device *dev, void *data) 1231 1239 { 1232 1240 struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); 1233 - struct drm_connector *connector = &vc4_hdmi->connector; 1241 + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; 1234 1242 1235 1243 /* 1236 1244 * If the HDMI encoder hasn't probed, or the encoder is 1237 1245 * currently in DVI mode, treat the codec dai as missing. 1238 1246 */ 1239 - if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & 1247 + if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & 1240 1248 VC4_HDMI_RAM_PACKET_ENABLE)) 1241 1249 return -ENODEV; 1242 1250 ··· 2106 2114 return 0; 2107 2115 } 2108 2116 2109 - #ifdef CONFIG_PM 2110 - static int vc4_hdmi_runtime_suspend(struct device *dev) 2111 - { 2112 - struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); 2113 - 2114 - clk_disable_unprepare(vc4_hdmi->hsm_clock); 2115 - 2116 - return 0; 2117 - } 2118 - 2119 - static int vc4_hdmi_runtime_resume(struct device *dev) 2120 - { 2121 - struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); 2122 - int ret; 2123 - 2124 - ret = clk_prepare_enable(vc4_hdmi->hsm_clock); 2125 - if (ret) 2126 - return ret; 2127 - 2128 - return 0; 2129 - } 2130 - #endif 2131 - 2132 2117 static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) 2133 2118 { 2134 2119 const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev); ··· 2360 2391 {} 2361 2392 }; 2362 2393 2363 - static const struct dev_pm_ops vc4_hdmi_pm_ops = { 2364 - SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend, 2365 - vc4_hdmi_runtime_resume, 2366 - NULL) 2367 - }; 2368 - 2369 2394 struct platform_driver vc4_hdmi_driver = { 2370 2395 .probe = vc4_hdmi_dev_probe, 2371 2396 .remove = vc4_hdmi_dev_remove, 2372 2397 .driver = { 2373 2398 .name = "vc4_hdmi", 2374 2399 .of_match_table = vc4_hdmi_dt_match, 2375 - .pm = &vc4_hdmi_pm_ops, 2376 2400 }, 2377 2401 };
+1
drivers/hv/ring_buffer.c
··· 245 245 mutex_unlock(&ring_info->ring_buffer_mutex); 246 246 247 247 kfree(ring_info->pkt_buffer); 248 + ring_info->pkt_buffer = NULL; 248 249 ring_info->pkt_buffer_size = 0; 249 250 } 250 251
+1 -1
drivers/macintosh/smu.c
··· 570 570 fail_db_node: 571 571 of_node_put(smu->db_node); 572 572 fail_bootmem: 573 - memblock_free(__pa(smu), sizeof(struct smu_device)); 573 + memblock_free_ptr(smu, sizeof(struct smu_device)); 574 574 smu = NULL; 575 575 fail_np: 576 576 of_node_put(np);
+6
drivers/net/dsa/lantiq_gswip.c
··· 1885 1885 1886 1886 reset_control_assert(gphy_fw->reset); 1887 1887 1888 + /* The vendor BSP uses a 200ms delay after asserting the reset line. 1889 + * Without this some users are observing that the PHY is not coming up 1890 + * on the MDIO bus. 1891 + */ 1892 + msleep(200); 1893 + 1888 1894 ret = request_firmware(&fw, gphy_fw->fw_name, dev); 1889 1895 if (ret) { 1890 1896 dev_err(dev, "failed to load firmware: %s, error: %i\n",
+22 -8
drivers/net/dsa/qca8k.c
··· 643 643 } 644 644 645 645 static int 646 - qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data) 646 + qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data) 647 647 { 648 - struct qca8k_priv *priv = salve_bus->priv; 649 - struct mii_bus *bus = priv->bus; 650 648 u16 r1, r2, page; 651 649 u32 val; 652 650 int ret; ··· 680 682 } 681 683 682 684 static int 683 - qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum) 685 + qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum) 684 686 { 685 - struct qca8k_priv *priv = salve_bus->priv; 686 - struct mii_bus *bus = priv->bus; 687 687 u16 r1, r2, page; 688 688 u32 val; 689 689 int ret; ··· 720 724 ret = val & QCA8K_MDIO_MASTER_DATA_MASK; 721 725 722 726 return ret; 727 + } 728 + 729 + static int 730 + qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data) 731 + { 732 + struct qca8k_priv *priv = slave_bus->priv; 733 + struct mii_bus *bus = priv->bus; 734 + 735 + return qca8k_mdio_write(bus, phy, regnum, data); 736 + } 737 + 738 + static int 739 + qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) 740 + { 741 + struct qca8k_priv *priv = slave_bus->priv; 742 + struct mii_bus *bus = priv->bus; 743 + 744 + return qca8k_mdio_read(bus, phy, regnum); 723 745 } 724 746 725 747 static int ··· 789 775 790 776 bus->priv = (void *)priv; 791 777 bus->name = "qca8k slave mii"; 792 - bus->read = qca8k_mdio_read; 793 - bus->write = qca8k_mdio_write; 778 + bus->read = qca8k_internal_mdio_read; 779 + bus->write = qca8k_internal_mdio_write; 794 780 snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", 795 781 ds->index); 796 782
+1 -1
drivers/net/ethernet/3com/3c515.c
··· 1050 1050 #ifdef VORTEX_BUS_MASTER 1051 1051 if (vp->bus_master) { 1052 1052 /* Set the bus-master controller to transfer the packet. */ 1053 - outl((int) (skb->data), ioaddr + Wn7_MasterAddr); 1053 + outl(isa_virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr); 1054 1054 outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); 1055 1055 vp->tx_skb = skb; 1056 1056 outw(StartDMADown, ioaddr + EL3_CMD);
+7 -15
drivers/net/ethernet/8390/ne.c
··· 922 922 } 923 923 } 924 924 925 - #ifdef MODULE 926 925 static int __init ne_init(void) 927 926 { 928 927 int retval; 929 - ne_add_devices(); 928 + 929 + if (IS_MODULE(CONFIG_NE2000)) 930 + ne_add_devices(); 931 + 930 932 retval = platform_driver_probe(&ne_driver, ne_drv_probe); 931 - if (retval) { 933 + 934 + if (IS_MODULE(CONFIG_NE2000) && retval) { 932 935 if (io[0] == 0) 933 936 pr_notice("ne.c: You must supply \"io=0xNNN\"" 934 937 " value(s) for ISA cards.\n"); ··· 944 941 return retval; 945 942 } 946 943 module_init(ne_init); 947 - #else /* MODULE */ 948 - static int __init ne_init(void) 949 - { 950 - int retval = platform_driver_probe(&ne_driver, ne_drv_probe); 951 944 952 - /* Unregister unused platform_devices. */ 953 - ne_loop_rm_unreg(0); 954 - return retval; 955 - } 956 - module_init(ne_init); 957 - 958 - #ifdef CONFIG_NETDEV_LEGACY_INIT 945 + #if !defined(MODULE) && defined(CONFIG_NETDEV_LEGACY_INIT) 959 946 struct net_device * __init ne_probe(int unit) 960 947 { 961 948 int this_dev; ··· 987 994 return ERR_PTR(-ENODEV); 988 995 } 989 996 #endif 990 - #endif /* MODULE */ 991 997 992 998 static void __exit ne_exit(void) 993 999 {
+1 -1
drivers/net/ethernet/amd/ni65.c
··· 748 748 #ifdef XMT_VIA_SKB 749 749 skb_save[i] = p->tmd_skb[i]; 750 750 #endif 751 - buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer); 751 + buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer); 752 752 blen[i] = tmdp->blen; 753 753 tmdp->u.s.status = 0x0; 754 754 }
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
··· 1224 1224 1225 1225 /* SR-IOV capability was enabled but there are no VFs*/ 1226 1226 if (iov->total == 0) { 1227 - err = -EINVAL; 1227 + err = 0; 1228 1228 goto failed; 1229 1229 } 1230 1230
+29 -4
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 2213 2213 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2214 2214 bp->current_interval * 10); 2215 2215 fw_health->tmr_counter = fw_health->tmr_multiplier; 2216 - if (!fw_health->enabled) { 2216 + if (!fw_health->enabled) 2217 2217 fw_health->last_fw_heartbeat = 2218 2218 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2219 - fw_health->last_fw_reset_cnt = 2220 - bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2221 - } 2219 + fw_health->last_fw_reset_cnt = 2220 + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2222 2221 netif_info(bp, drv, bp->dev, 2223 2222 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n", 2224 2223 fw_health->master, fw_health->last_fw_reset_cnt, ··· 2729 2730 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2730 2731 int j; 2731 2732 2733 + if (!txr->tx_buf_ring) 2734 + continue; 2735 + 2732 2736 for (j = 0; j < max_idx;) { 2733 2737 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 2734 2738 struct sk_buff *skb; ··· 2816 2814 } 2817 2815 2818 2816 skip_rx_tpa_free: 2817 + if (!rxr->rx_buf_ring) 2818 + goto skip_rx_buf_free; 2819 + 2819 2820 for (i = 0; i < max_idx; i++) { 2820 2821 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; 2821 2822 dma_addr_t mapping = rx_buf->mapping; ··· 2841 2836 kfree(data); 2842 2837 } 2843 2838 } 2839 + 2840 + skip_rx_buf_free: 2841 + if (!rxr->rx_agg_ring) 2842 + goto skip_rx_agg_free; 2843 + 2844 2844 for (i = 0; i < max_agg_idx; i++) { 2845 2845 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; 2846 2846 struct page *page = rx_agg_buf->page; ··· 2862 2852 2863 2853 __free_page(page); 2864 2854 } 2855 + 2856 + skip_rx_agg_free: 2865 2857 if (rxr->rx_page) { 2866 2858 __free_page(rxr->rx_page); 2867 2859 rxr->rx_page = NULL; ··· 2912 2900 struct pci_dev *pdev = bp->pdev; 2913 2901 int i; 2914 2902 2903 + if (!rmem->pg_arr) 2904 + goto skip_pages; 2905 + 2915 2906 for (i = 0; i < rmem->nr_pages; i++) { 2916 2907 if (!rmem->pg_arr[i]) 2917 2908 continue; ··· 2924 2909 2925 2910 rmem->pg_arr[i] = NULL; 2926 2911 } 2912 + skip_pages: 2927 2913 if (rmem->pg_tbl) { 2928 2914 size_t pg_tbl_size = rmem->nr_pages * 8; 2929 2915 ··· 3244 3228 3245 3229 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) 3246 3230 { 3231 + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3232 + 3247 3233 kfree(cpr->cp_desc_ring); 3248 3234 cpr->cp_desc_ring = NULL; 3235 + ring->ring_mem.pg_arr = NULL; 3249 3236 kfree(cpr->cp_desc_mapping); 3250 3237 cpr->cp_desc_mapping = NULL; 3238 + ring->ring_mem.dma_arr = NULL; 3251 3239 } 3252 3240 3253 3241 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) ··· 12227 12207 return; 12228 12208 } 12229 12209 12210 + if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && 12211 + bp->fw_health->enabled) { 12212 + bp->fw_health->last_fw_reset_cnt = 12213 + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 12214 + } 12230 12215 bp->fw_reset_state = 0; 12231 12216 /* Make sure fw_reset_state is 0 before clearing the flag */ 12232 12217 smp_mb__before_atomic();
-3
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
··· 1884 1884 { 1885 1885 struct bnxt_flower_indr_block_cb_priv *cb_priv; 1886 1886 1887 - /* All callback list access should be protected by RTNL. */ 1888 - ASSERT_RTNL(); 1889 - 1890 1887 list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list) 1891 1888 if (cb_priv->tunnel_netdev == netdev) 1892 1889 return cb_priv;
+1 -1
drivers/net/ethernet/cadence/macb_pci.c
··· 111 111 struct platform_device *plat_dev = pci_get_drvdata(pdev); 112 112 struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev); 113 113 114 - platform_device_unregister(plat_dev); 115 114 clk_unregister(plat_data->pclk); 116 115 clk_unregister(plat_data->hclk); 116 + platform_device_unregister(plat_dev); 117 117 } 118 118 119 119 static const struct pci_device_id dev_id_table[] = {
+11 -3
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 61 61 module_param(tx_sgl, uint, 0600); 62 62 MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping"); 63 63 64 + static bool page_pool_enabled = true; 65 + module_param(page_pool_enabled, bool, 0400); 66 + 64 67 #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \ 65 68 sizeof(struct sg_table)) 66 69 #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \ ··· 76 73 #define HNS3_OUTER_VLAN_TAG 2 77 74 78 75 #define HNS3_MIN_TX_LEN 33U 76 + #define HNS3_MIN_TUN_PKT_LEN 65U 79 77 80 78 /* hns3_pci_tbl - PCI Device ID Table 81 79 * ··· 1428 1424 l4.tcp->doff); 1429 1425 break; 1430 1426 case IPPROTO_UDP: 1431 - if (hns3_tunnel_csum_bug(skb)) 1432 - return skb_checksum_help(skb); 1427 + if (hns3_tunnel_csum_bug(skb)) { 1428 + int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN); 1429 + 1430 + return ret ? ret : skb_checksum_help(skb); 1431 + } 1433 1432 1434 1433 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 1435 1434 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, ··· 4760 4753 goto out_with_desc_cb; 4761 4754 4762 4755 if (!HNAE3_IS_TX_RING(ring)) { 4763 - hns3_alloc_page_pool(ring); 4756 + if (page_pool_enabled) 4757 + hns3_alloc_page_pool(ring); 4764 4758 4765 4759 ret = hns3_alloc_ring_buffers(ring); 4766 4760 if (ret)
+4
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
··· 1724 1724 } 1725 1725 1726 1726 bd_num = le32_to_cpu(req->bd_num); 1727 + if (!bd_num) { 1728 + dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n"); 1729 + return -EINVAL; 1730 + } 1727 1731 1728 1732 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); 1729 1733 if (!desc_src)
+11 -8
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 1528 1528 static int hclge_configure(struct hclge_dev *hdev) 1529 1529 { 1530 1530 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1531 + const struct cpumask *cpumask = cpu_online_mask; 1531 1532 struct hclge_cfg cfg; 1532 1533 unsigned int i; 1533 - int ret; 1534 + int node, ret; 1534 1535 1535 1536 ret = hclge_get_cfg(hdev, &cfg); 1536 1537 if (ret) ··· 1596 1595 1597 1596 hclge_init_kdump_kernel_config(hdev); 1598 1597 1599 - /* Set the init affinity based on pci func number */ 1600 - i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev))); 1601 - i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0; 1602 - cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)), 1603 - &hdev->affinity_mask); 1598 + /* Set the affinity based on numa node */ 1599 + node = dev_to_node(&hdev->pdev->dev); 1600 + if (node != NUMA_NO_NODE) 1601 + cpumask = cpumask_of_node(node); 1602 + 1603 + cpumask_copy(&hdev->affinity_mask, cpumask); 1604 1604 1605 1605 return ret; 1606 1606 } ··· 8127 8125 hclge_clear_arfs_rules(hdev); 8128 8126 spin_unlock_bh(&hdev->fd_rule_lock); 8129 8127 8130 - /* If it is not PF reset, the firmware will disable the MAC, 8128 + /* If it is not PF reset or FLR, the firmware will disable the MAC, 8131 8129 * so it only need to stop phy here. 8132 8130 */ 8133 8131 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && 8134 - hdev->reset_type != HNAE3_FUNC_RESET) { 8132 + hdev->reset_type != HNAE3_FUNC_RESET && 8133 + hdev->reset_type != HNAE3_FLR_RESET) { 8135 8134 hclge_mac_stop_phy(hdev); 8136 8135 hclge_update_link_status(hdev); 8137 8136 return;
+3 -3
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
··· 2465 2465 2466 2466 hclgevf_enable_vector(&hdev->misc_vector, false); 2467 2467 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2468 + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2469 + hclgevf_clear_event_cause(hdev, clearval); 2468 2470 2469 2471 switch (event_cause) { 2470 2472 case HCLGEVF_VECTOR0_EVENT_RST: ··· 2479 2477 break; 2480 2478 } 2481 2479 2482 - if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2483 - hclgevf_clear_event_cause(hdev, clearval); 2480 + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2484 2481 hclgevf_enable_vector(&hdev->misc_vector, true); 2485 - } 2486 2482 2487 2483 return IRQ_HANDLED; 2488 2484 }
+1 -1
drivers/net/ethernet/i825xx/82596.c
··· 1144 1144 err = -ENODEV; 1145 1145 goto out; 1146 1146 } 1147 - memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */ 1147 + memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */ 1148 1148 dev->base_addr = MVME_I596_BASE; 1149 1149 dev->irq = (unsigned) MVME16x_IRQ_I596; 1150 1150 goto found;
+16
drivers/net/ethernet/ibm/ibmvnic.c
··· 4700 4700 return 0; 4701 4701 } 4702 4702 4703 + if (adapter->failover_pending) { 4704 + adapter->init_done_rc = -EAGAIN; 4705 + netdev_dbg(netdev, "Failover pending, ignoring login response\n"); 4706 + complete(&adapter->init_done); 4707 + /* login response buffer will be released on reset */ 4708 + return 0; 4709 + } 4710 + 4711 + if (adapter->failover_pending) { 4712 + adapter->init_done_rc = -EAGAIN; 4713 + netdev_dbg(netdev, "Failover pending, ignoring login response\n"); 4714 + complete(&adapter->init_done); 4715 + /* login response buffer will be released on reset */ 4716 + return 0; 4717 + } 4718 + 4703 4719 netdev->mtu = adapter->req_mtu - ETH_HLEN; 4704 4720 4705 4721 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
+2
drivers/net/ethernet/intel/ice/ice.h
··· 695 695 { 696 696 if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { 697 697 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 698 + set_bit(ICE_FLAG_AUX_ENA, pf->flags); 698 699 ice_plug_aux_dev(pf); 699 700 } 700 701 } ··· 708 707 { 709 708 ice_unplug_aux_dev(pf); 710 709 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 710 + clear_bit(ICE_FLAG_AUX_ENA, pf->flags); 711 711 } 712 712 #endif /* _ICE_H_ */
+6
drivers/net/ethernet/intel/ice/ice_idc.c
··· 271 271 struct auxiliary_device *adev; 272 272 int ret; 273 273 274 + /* if this PF doesn't support a technology that requires auxiliary 275 + * devices, then gracefully exit 276 + */ 277 + if (!ice_is_aux_ena(pf)) 278 + return 0; 279 + 274 280 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); 275 281 if (!iadev) 276 282 return -ENOMEM;
+3 -1
drivers/net/ethernet/intel/igc/igc_main.c
··· 6350 6350 if (pci_using_dac) 6351 6351 netdev->features |= NETIF_F_HIGHDMA; 6352 6352 6353 - netdev->vlan_features |= netdev->features; 6353 + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 6354 + netdev->mpls_features |= NETIF_F_HW_CSUM; 6355 + netdev->hw_enc_features |= netdev->vlan_features; 6354 6356 6355 6357 /* MTU range: 68 - 9216 */ 6356 6358 netdev->min_mtu = ETH_MIN_MTU;
+2 -5
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
··· 658 658 659 659 static int mlx5_devlink_rdma_param_register(struct devlink *devlink) 660 660 { 661 - struct mlx5_core_dev *dev = devlink_priv(devlink); 662 661 union devlink_param_value value; 663 662 int err; 664 663 665 - if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) 664 + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) 666 665 return 0; 667 666 668 667 err = devlink_param_register(devlink, &enable_rdma_param); ··· 678 679 679 680 static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink) 680 681 { 681 - struct mlx5_core_dev *dev = devlink_priv(devlink); 682 - 683 - if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) 682 + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) 684 683 return; 685 684 686 685 devlink_param_unpublish(devlink, &enable_rdma_param);
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
··· 1007 1007 err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn); 1008 1008 if (err) { 1009 1009 mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err); 1010 - return err; 1010 + goto err_cancel_work; 1011 1011 } 1012 1012 1013 1013 err = mlx5_fw_tracer_create_mkey(tracer); ··· 1031 1031 mlx5_core_destroy_mkey(dev, &tracer->buff.mkey); 1032 1032 err_dealloc_pd: 1033 1033 mlx5_core_dealloc_pd(dev, tracer->buff.pdn); 1034 + err_cancel_work: 1034 1035 cancel_work_sync(&tracer->read_fw_strings_work); 1035 1036 return err; 1036 1037 }
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 922 922 923 923 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); 924 924 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); 925 - int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); 925 + int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter); 926 926 927 927 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, 928 928 u16 vid);
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
··· 137 137 u16 vport_num, esw_owner_vhca_id; 138 138 struct netlink_ext_ack *extack; 139 139 int ifindex = upper->ifindex; 140 - int err; 140 + int err = 0; 141 141 142 142 if (!netif_is_bridge_master(upper)) 143 143 return 0; ··· 244 244 struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info); 245 245 const struct switchdev_attr *attr = port_attr_info->attr; 246 246 u16 vport_num, esw_owner_vhca_id; 247 - int err; 247 + int err = 0; 248 248 249 249 if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, 250 250 &esw_owner_vhca_id))
-3
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
··· 300 300 { 301 301 struct mlx5e_rep_indr_block_priv *cb_priv; 302 302 303 - /* All callback list access should be protected by RTNL. */ 304 - ASSERT_RTNL(); 305 - 306 303 list_for_each_entry(cb_priv, 307 304 &rpriv->uplink_priv.tc_indr_block_priv_list, 308 305 list)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
··· 572 572 if (res->features & MLX5E_RX_RES_FEATURE_PTP) { 573 573 u32 rqn; 574 574 575 - if (mlx5e_channels_get_ptp_rqn(chs, &rqn)) 575 + if (!mlx5e_channels_get_ptp_rqn(chs, &rqn)) 576 576 rqn = res->drop_rqn; 577 577 578 578 err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
+6 -5
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 1884 1884 return set_pflag_cqe_based_moder(netdev, enable, true); 1885 1885 } 1886 1886 1887 - int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val) 1887 + int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter) 1888 1888 { 1889 1889 bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS); 1890 1890 struct mlx5e_params new_params; ··· 1896 1896 if (curr_val == new_val) 1897 1897 return 0; 1898 1898 1899 - if (new_val && !priv->profile->rx_ptp_support && 1900 - priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) { 1899 + if (new_val && !priv->profile->rx_ptp_support && rx_filter) { 1901 1900 netdev_err(priv->netdev, 1902 1901 "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n"); 1903 1902 return -EINVAL; ··· 1904 1905 1905 1906 new_params = priv->channels.params; 1906 1907 MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); 1907 - if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) 1908 + if (rx_filter) 1908 1909 new_params.ptp_rx = new_val; 1909 1910 1910 1911 if (new_params.ptp_rx == priv->channels.params.ptp_rx) ··· 1927 1928 { 1928 1929 struct mlx5e_priv *priv = netdev_priv(netdev); 1929 1930 struct mlx5_core_dev *mdev = priv->mdev; 1931 + bool rx_filter; 1930 1932 int err; 1931 1933 1932 1934 if (!MLX5_CAP_GEN(mdev, cqe_compression)) 1933 1935 return -EOPNOTSUPP; 1934 1936 1935 - err = mlx5e_modify_rx_cqe_compression_locked(priv, enable); 1937 + rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE; 1938 + err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter); 1936 1939 if (err) 1937 1940 return err; 1938 1941
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 3554 3554 3555 3555 if (!rx_filter) 3556 3556 /* Reset CQE compression to Admin default */ 3557 - return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def); 3557 + return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false); 3558 3558 3559 3559 if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) 3560 3560 return 0; 3561 3561 3562 3562 /* Disable CQE compression */ 3563 3563 netdev_warn(priv->netdev, "Disabling RX cqe compression\n"); 3564 - err = mlx5e_modify_rx_cqe_compression_locked(priv, false); 3564 + err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true); 3565 3565 if (err) 3566 3566 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); 3567 3567
+2 -3
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1682 1682 1683 1683 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); 1684 1684 if (!curr_match) { 1685 + rcu_read_unlock(); 1685 1686 free_match_list(match_head, ft_locked); 1686 - err = -ENOMEM; 1687 - goto out; 1687 + return -ENOMEM; 1688 1688 } 1689 1689 curr_match->g = g; 1690 1690 list_add_tail(&curr_match->list, &match_head->list); 1691 1691 } 1692 - out: 1693 1692 rcu_read_unlock(); 1694 1693 return err; 1695 1694 }
+8 -2
drivers/net/ethernet/mellanox/mlx5/core/lag.c
··· 927 927 struct mlx5_core_dev *dev1; 928 928 struct mlx5_lag *ldev; 929 929 930 + ldev = mlx5_lag_dev(dev); 931 + if (!ldev) 932 + return; 933 + 930 934 mlx5_dev_list_lock(); 931 935 932 - ldev = mlx5_lag_dev(dev); 933 936 dev0 = ldev->pf[MLX5_LAG_P1].dev; 934 937 dev1 = ldev->pf[MLX5_LAG_P2].dev; 935 938 ··· 949 946 { 950 947 struct mlx5_lag *ldev; 951 948 952 - mlx5_dev_list_lock(); 953 949 ldev = mlx5_lag_dev(dev); 950 + if (!ldev) 951 + return; 952 + 953 + mlx5_dev_list_lock(); 954 954 ldev->mode_changes_in_progress--; 955 955 mlx5_dev_list_unlock(); 956 956 mlx5_queue_bond_work(ldev, 0);
+7
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
··· 142 142 err = mlxbf_gige_clean_port(priv); 143 143 if (err) 144 144 goto free_irqs; 145 + 146 + /* Clear driver's valid_polarity to match hardware, 147 + * since the above call to clean_port() resets the 148 + * receive polarity used by hardware. 149 + */ 150 + priv->valid_polarity = 0; 151 + 145 152 err = mlxbf_gige_rx_init(priv); 146 153 if (err) 147 154 goto free_irqs;
+1 -3
drivers/net/ethernet/microsoft/mana/hw_channel.c
··· 398 398 int err; 399 399 u16 i; 400 400 401 - dma_buf = kzalloc(sizeof(*dma_buf) + 402 - q_depth * sizeof(struct hwc_work_request), 403 - GFP_KERNEL); 401 + dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL); 404 402 if (!dma_buf) 405 403 return -ENOMEM; 406 404
-3
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 1767 1767 struct nfp_flower_indr_block_cb_priv *cb_priv; 1768 1768 struct nfp_flower_priv *priv = app->priv; 1769 1769 1770 - /* All callback list access should be protected by RTNL. */ 1771 - ASSERT_RTNL(); 1772 - 1773 1770 list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) 1774 1771 if (cb_priv->netdev == netdev) 1775 1772 return cb_priv;
+5 -1
drivers/net/ethernet/qlogic/qed/qed_mcp.c
··· 3367 3367 struct qed_nvm_image_att *p_image_att) 3368 3368 { 3369 3369 enum nvm_image_type type; 3370 + int rc; 3370 3371 u32 i; 3371 3372 3372 3373 /* Translate image_id into MFW definitions */ ··· 3396 3395 return -EINVAL; 3397 3396 } 3398 3397 3399 - qed_mcp_nvm_info_populate(p_hwfn); 3398 + rc = qed_mcp_nvm_info_populate(p_hwfn); 3399 + if (rc) 3400 + return rc; 3401 + 3400 3402 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 3401 3403 if (type == p_hwfn->nvm_info.image_att[i].image_type) 3402 3404 break;
+1 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
··· 1354 1354 struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; 1355 1355 const struct firmware *fw = fw_info->fw; 1356 1356 u32 dest, *p_cache, *temp; 1357 - int i, ret = -EIO; 1358 1357 __le32 *temp_le; 1359 1358 u8 data[16]; 1360 1359 size_t size; 1360 + int i, ret; 1361 1361 u64 addr; 1362 1362 1363 1363 temp = vzalloc(fw->size);
+8 -1
drivers/net/ethernet/rdc/r6040.c
··· 119 119 #define PHY_ST 0x8A /* PHY status register */ 120 120 #define MAC_SM 0xAC /* MAC status machine */ 121 121 #define MAC_SM_RST 0x0002 /* MAC status machine reset */ 122 + #define MD_CSC 0xb6 /* MDC speed control register */ 123 + #define MD_CSC_DEFAULT 0x0030 122 124 #define MAC_ID 0xBE /* Identifier register */ 123 125 124 126 #define TX_DCNT 0x80 /* TX descriptor count */ ··· 357 355 { 358 356 void __iomem *ioaddr = lp->base; 359 357 int limit = MAC_DEF_TIMEOUT; 360 - u16 cmd; 358 + u16 cmd, md_csc; 361 359 360 + md_csc = ioread16(ioaddr + MD_CSC); 362 361 iowrite16(MAC_RST, ioaddr + MCR1); 363 362 while (limit--) { 364 363 cmd = ioread16(ioaddr + MCR1); ··· 371 368 iowrite16(MAC_SM_RST, ioaddr + MAC_SM); 372 369 iowrite16(0, ioaddr + MAC_SM); 373 370 mdelay(5); 371 + 372 + /* Restore MDIO clock frequency */ 373 + if (md_csc != MD_CSC_DEFAULT) 374 + iowrite16(md_csc, ioaddr + MD_CSC); 374 375 } 375 376 376 377 static void r6040_init_mac_regs(struct net_device *dev)
+74 -32
drivers/net/ethernet/sfc/efx_channels.c
··· 166 166 * We need a channel per event queue, plus a VI per tx queue. 167 167 * This may be more pessimistic than it needs to be. 168 168 */ 169 - if (n_channels + n_xdp_ev > max_channels) { 170 - netif_err(efx, drv, efx->net_dev, 171 - "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", 172 - n_xdp_ev, n_channels, max_channels); 173 - netif_err(efx, drv, efx->net_dev, 174 - "XDP_TX and XDP_REDIRECT will not work on this interface"); 175 - efx->n_xdp_channels = 0; 176 - efx->xdp_tx_per_channel = 0; 177 - efx->xdp_tx_queue_count = 0; 169 + if (n_channels >= max_channels) { 170 + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; 171 + netif_warn(efx, drv, efx->net_dev, 172 + "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", 173 + n_xdp_ev, n_channels, max_channels); 174 + netif_warn(efx, drv, efx->net_dev, 175 + "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); 178 176 } else if (n_channels + n_xdp_tx > efx->max_vis) { 179 - netif_err(efx, drv, efx->net_dev, 180 - "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", 181 - n_xdp_tx, n_channels, efx->max_vis); 182 - netif_err(efx, drv, efx->net_dev, 183 - "XDP_TX and XDP_REDIRECT will not work on this interface"); 184 - efx->n_xdp_channels = 0; 185 - efx->xdp_tx_per_channel = 0; 186 - efx->xdp_tx_queue_count = 0; 177 + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; 178 + netif_warn(efx, drv, efx->net_dev, 179 + "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", 180 + n_xdp_tx, n_channels, efx->max_vis); 181 + netif_warn(efx, drv, efx->net_dev, 182 + "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); 183 + } else if (n_channels + n_xdp_ev > max_channels) { 184 + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED; 185 + netif_warn(efx, drv, efx->net_dev, 186 + "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", 187 + n_xdp_ev, n_channels, max_channels); 188 + 189 + n_xdp_ev = max_channels - n_channels; 190 + netif_warn(efx, drv, efx->net_dev, 191 + "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n", 192 + DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev)); 187 193 } else { 194 + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED; 195 + } 196 + 197 + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) { 188 198 efx->n_xdp_channels = n_xdp_ev; 189 199 efx->xdp_tx_per_channel = tx_per_ev; 190 200 efx->xdp_tx_queue_count = n_xdp_tx; 191 201 n_channels += n_xdp_ev; 192 202 netif_dbg(efx, drv, efx->net_dev, 193 203 "Allocating %d TX and %d event queues for XDP\n", 194 - n_xdp_tx, n_xdp_ev); 204 + n_xdp_ev * tx_per_ev, n_xdp_ev); 205 + } else { 206 + efx->n_xdp_channels = 0; 207 + efx->xdp_tx_per_channel = 0; 208 + efx->xdp_tx_queue_count = n_xdp_tx; 195 209 } 196 210 197 211 if (vec_count < n_channels) { ··· 872 858 goto out; 873 859 } 874 860 861 + static inline int 862 + efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, 863 + struct efx_tx_queue *tx_queue) 864 + { 865 + if (xdp_queue_number >= efx->xdp_tx_queue_count) 866 + return -EINVAL; 867 + 868 + netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", 869 + tx_queue->channel->channel, tx_queue->label, 870 + xdp_queue_number, tx_queue->queue); 871 + efx->xdp_tx_queues[xdp_queue_number] = tx_queue; 872 + return 0; 873 + } 874 + 875 875 int efx_set_channels(struct efx_nic *efx) 876 876 { 877 877 struct efx_tx_queue *tx_queue; ··· 924 896 if (efx_channel_is_xdp_tx(channel)) { 925 897 efx_for_each_channel_tx_queue(tx_queue, channel) { 926 898 tx_queue->queue = next_queue++; 927 - 928 - /* We may have a few left-over XDP TX 929 - * queues owing to xdp_tx_queue_count 930 - * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL. 931 - * We still allocate and probe those 932 - * TXQs, but never use them. 933 - */ 934 - if (xdp_queue_number < efx->xdp_tx_queue_count) { 935 - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", 936 - channel->channel, tx_queue->label, 937 - xdp_queue_number, tx_queue->queue); 938 - efx->xdp_tx_queues[xdp_queue_number] = tx_queue; 899 + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); 900 + if (rc == 0) 939 901 xdp_queue_number++; 940 - } 941 902 } 942 903 } else { 943 904 efx_for_each_channel_tx_queue(tx_queue, channel) { ··· 935 918 channel->channel, tx_queue->label, 936 919 tx_queue->queue); 937 920 } 921 + 922 + /* If XDP is borrowing queues from net stack, it must use the queue 923 + * with no csum offload, which is the first one of the channel 924 + * (note: channel->tx_queue_by_type is not initialized yet) 925 + */ 926 + if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { 927 + tx_queue = &channel->tx_queue[0]; 928 + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); 929 + if (rc == 0) 930 + xdp_queue_number++; 931 + } 938 932 } 939 933 } 940 934 } 941 - WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count); 935 + WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && 936 + xdp_queue_number != efx->xdp_tx_queue_count); 937 + WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && 938 + xdp_queue_number > efx->xdp_tx_queue_count); 939 + 940 + /* If we have more CPUs than assigned XDP TX queues, assign the already 941 + * existing queues to the exceeding CPUs 942 + */ 943 + next_queue = 0; 944 + while (xdp_queue_number < efx->xdp_tx_queue_count) { 945 + tx_queue = efx->xdp_tx_queues[next_queue++]; 946 + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); 947 + if (rc == 0) 948 + xdp_queue_number++; 949 + } 942 950 943 951 rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); 944 952 if (rc)
+8
drivers/net/ethernet/sfc/net_driver.h
··· 782 782 #define EFX_RPS_MAX_IN_FLIGHT 8 783 783 #endif /* CONFIG_RFS_ACCEL */ 784 784 785 + enum efx_xdp_tx_queues_mode { 786 + EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */ 787 + EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */ 788 + EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */ 789 + }; 790 + 785 791 /** 786 792 * struct efx_nic - an Efx NIC 787 793 * @name: Device name (net device name or bus id before net device registered) ··· 826 820 * should be allocated for this NIC 827 821 * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues. 828 822 * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit. 823 + * @xdp_txq_queues_mode: XDP TX queues sharing strategy. 829 824 * @rxq_entries: Size of receive queues requested by user. 830 825 * @txq_entries: Size of transmit queues requested by user. 831 826 * @txq_stop_thresh: TX queue fill level at or above which we stop it. ··· 986 979 987 980 unsigned int xdp_tx_queue_count; 988 981 struct efx_tx_queue **xdp_tx_queues; 982 + enum efx_xdp_tx_queues_mode xdp_txq_queues_mode; 989 983 990 984 unsigned rxq_entries; 991 985 unsigned txq_entries;
+21 -8
drivers/net/ethernet/sfc/tx.c
··· 428 428 unsigned int len; 429 429 int space; 430 430 int cpu; 431 - int i; 431 + int i = 0; 432 + 433 + if (unlikely(n && !xdpfs)) 434 + return -EINVAL; 435 + if (unlikely(!n)) 436 + return 0; 432 437 433 438 cpu = raw_smp_processor_id(); 434 - 435 - if (!efx->xdp_tx_queue_count || 436 - unlikely(cpu >= efx->xdp_tx_queue_count)) 439 + if (unlikely(cpu >= efx->xdp_tx_queue_count)) 437 440 return -EINVAL; 438 441 439 442 tx_queue = efx->xdp_tx_queues[cpu]; 440 443 if (unlikely(!tx_queue)) 441 444 return -EINVAL; 442 445 443 - if (unlikely(n && !xdpfs)) 444 - return -EINVAL; 446 + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) 447 + HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); 445 448 446 - if (!n) 447 - return 0; 449 + /* If we're borrowing net stack queues we have to handle stop-restart 450 + * or we might block the queue and it will be considered as frozen 451 + */ 452 + if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { 453 + if (netif_tx_queue_stopped(tx_queue->core_txq)) 454 + goto unlock; 455 + efx_tx_maybe_stop_queue(tx_queue); 456 + } 448 457 449 458 /* Check for available space. We should never need multiple 450 459 * descriptors per frame. ··· 492 483 /* Pass mapped frames to hardware. */ 493 484 if (flush && i > 0) 494 485 efx_nic_push_buffers(tx_queue); 486 + 487 + unlock: 488 + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) 489 + HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq); 495 490 496 491 return i == 0 ? -EIO : i; 497 492 }
+1 -15
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 309 309 priv->clk_csr = STMMAC_CSR_100_150M; 310 310 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 311 311 priv->clk_csr = STMMAC_CSR_150_250M; 312 - else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 312 + else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) 313 313 priv->clk_csr = STMMAC_CSR_250_300M; 314 314 } 315 315 ··· 7118 7118 struct net_device *ndev = dev_get_drvdata(dev); 7119 7119 struct stmmac_priv *priv = netdev_priv(ndev); 7120 7120 u32 chan; 7121 - int ret; 7122 7121 7123 7122 if (!ndev || !netif_running(ndev)) 7124 7123 return 0; ··· 7149 7150 } else { 7150 7151 stmmac_mac_set(priv, priv->ioaddr, false); 7151 7152 pinctrl_pm_select_sleep_state(priv->device); 7152 - /* Disable clock in case of PWM is off */ 7153 - clk_disable_unprepare(priv->plat->clk_ptp_ref); 7154 - ret = pm_runtime_force_suspend(dev); 7155 - if (ret) { 7156 - mutex_unlock(&priv->lock); 7157 - return ret; 7158 - } 7159 7153 } 7160 7154 7161 7155 mutex_unlock(&priv->lock); ··· 7234 7242 priv->irq_wake = 0; 7235 7243 } else { 7236 7244 pinctrl_pm_select_default_state(priv->device); 7237 - /* enable the clk previously disabled */ 7238 - ret = pm_runtime_force_resume(dev); 7239 - if (ret) 7240 - return ret; 7241 - if (priv->plat->clk_ptp_ref) 7242 - clk_prepare_enable(priv->plat->clk_ptp_ref); 7243 7245 /* reset the phy so that it's ready */ 7244 7246 if (priv->mii) 7245 7247 stmmac_mdio_reset(priv->mii);
+44
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 9 9 *******************************************************************************/ 10 10 11 11 #include <linux/platform_device.h> 12 + #include <linux/pm_runtime.h> 12 13 #include <linux/module.h> 13 14 #include <linux/io.h> 14 15 #include <linux/of.h> ··· 772 771 return stmmac_bus_clks_config(priv, true); 773 772 } 774 773 774 + static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev) 775 + { 776 + struct net_device *ndev = dev_get_drvdata(dev); 777 + struct stmmac_priv *priv = netdev_priv(ndev); 778 + int ret; 779 + 780 + if (!netif_running(ndev)) 781 + return 0; 782 + 783 + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { 784 + /* Disable clock in case of PWM is off */ 785 + clk_disable_unprepare(priv->plat->clk_ptp_ref); 786 + 787 + ret = pm_runtime_force_suspend(dev); 788 + if (ret) 789 + return ret; 790 + } 791 + 792 + return 0; 793 + } 794 + 795 + static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) 796 + { 797 + struct net_device *ndev = dev_get_drvdata(dev); 798 + struct stmmac_priv *priv = netdev_priv(ndev); 799 + int ret; 800 + 801 + if (!netif_running(ndev)) 802 + return 0; 803 + 804 + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { 805 + /* enable the clk previously disabled */ 806 + ret = pm_runtime_force_resume(dev); 807 + if (ret) 808 + return ret; 809 + 810 + clk_prepare_enable(priv->plat->clk_ptp_ref); 811 + } 812 + 813 + return 0; 814 + } 815 + 775 816 const struct dev_pm_ops stmmac_pltfr_pm_ops = { 776 817 SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume) 777 818 SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL) 819 + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume) 778 820 }; 779 821 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); 780 822
+2 -2
drivers/net/hamradio/6pack.c
··· 68 68 #define SIXP_DAMA_OFF 0 69 69 70 70 /* default level 2 parameters */ 71 - #define SIXP_TXDELAY (HZ/4) /* in 1 s */ 71 + #define SIXP_TXDELAY 25 /* 250 ms */ 72 72 #define SIXP_PERSIST 50 /* in 256ths */ 73 - #define SIXP_SLOTTIME (HZ/10) /* in 1 s */ 73 + #define SIXP_SLOTTIME 10 /* 100 ms */ 74 74 #define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */ 75 75 #define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */ 76 76
+3 -3
drivers/net/hamradio/dmascc.c
··· 973 973 flags = claim_dma_lock(); 974 974 set_dma_mode(priv->param.dma, DMA_MODE_WRITE); 975 975 set_dma_addr(priv->param.dma, 976 - (int) priv->tx_buf[priv->tx_tail] + n); 976 + virt_to_bus(priv->tx_buf[priv->tx_tail]) + n); 977 977 set_dma_count(priv->param.dma, 978 978 priv->tx_len[priv->tx_tail] - n); 979 979 release_dma_lock(flags); ··· 1020 1020 flags = claim_dma_lock(); 1021 1021 set_dma_mode(priv->param.dma, DMA_MODE_READ); 1022 1022 set_dma_addr(priv->param.dma, 1023 - (int) priv->rx_buf[priv->rx_head]); 1023 + virt_to_bus(priv->rx_buf[priv->rx_head])); 1024 1024 set_dma_count(priv->param.dma, BUF_SIZE); 1025 1025 release_dma_lock(flags); 1026 1026 enable_dma(priv->param.dma); ··· 1233 1233 if (priv->param.dma >= 0) { 1234 1234 flags = claim_dma_lock(); 1235 1235 set_dma_addr(priv->param.dma, 1236 - (int) priv->rx_buf[priv->rx_head]); 1236 + virt_to_bus(priv->rx_buf[priv->rx_head])); 1237 1237 set_dma_count(priv->param.dma, BUF_SIZE); 1238 1238 release_dma_lock(flags); 1239 1239 } else {
+2 -1
drivers/net/ipa/ipa_table.c
··· 430 430 * table region determines the number of entries it has. 431 431 */ 432 432 if (filter) { 433 - count = hweight32(ipa->filter_map); 433 + /* Include one extra "slot" to hold the filter map itself */ 434 + count = 1 + hweight32(ipa->filter_map); 434 435 hash_count = hash_mem->size ? count : 0; 435 436 } else { 436 437 count = mem->size / sizeof(__le64);
+1 -1
drivers/net/phy/dp83640_reg.h
··· 5 5 #ifndef HAVE_DP83640_REGISTERS 6 6 #define HAVE_DP83640_REGISTERS 7 7 8 - #define PAGE0 0x0000 8 + /* #define PAGE0 0x0000 */ 9 9 #define PHYCR2 0x001c /* PHY Control Register 2 */ 10 10 11 11 #define PAGE4 0x0004
+3 -1
drivers/net/phy/phy_device.c
··· 233 233 234 234 static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) 235 235 { 236 + struct device_driver *drv = phydev->mdio.dev.driver; 237 + struct phy_driver *phydrv = to_phy_driver(drv); 236 238 struct net_device *netdev = phydev->attached_dev; 237 239 238 - if (!phydev->drv->suspend) 240 + if (!drv || !phydrv->suspend) 239 241 return false; 240 242 241 243 /* PHY not attached? May suspend if the PHY has not already been
+29 -1
drivers/net/phy/phylink.c
··· 1607 1607 if (config.an_enabled && phylink_is_empty_linkmode(config.advertising)) 1608 1608 return -EINVAL; 1609 1609 1610 + /* If this link is with an SFP, ensure that changes to advertised modes 1611 + * also cause the associated interface to be selected such that the 1612 + * link can be configured correctly. 1613 + */ 1614 + if (pl->sfp_port && pl->sfp_bus) { 1615 + config.interface = sfp_select_interface(pl->sfp_bus, 1616 + config.advertising); 1617 + if (config.interface == PHY_INTERFACE_MODE_NA) { 1618 + phylink_err(pl, 1619 + "selection of interface failed, advertisement %*pb\n", 1620 + __ETHTOOL_LINK_MODE_MASK_NBITS, 1621 + config.advertising); 1622 + return -EINVAL; 1623 + } 1624 + 1625 + /* Revalidate with the selected interface */ 1626 + linkmode_copy(support, pl->supported); 1627 + if (phylink_validate(pl, support, &config)) { 1628 + phylink_err(pl, "validation of %s/%s with support %*pb failed\n", 1629 + phylink_an_mode_str(pl->cur_link_an_mode), 1630 + phy_modes(config.interface), 1631 + __ETHTOOL_LINK_MODE_MASK_NBITS, support); 1632 + return -EINVAL; 1633 + } 1634 + } 1635 + 1610 1636 mutex_lock(&pl->state_mutex); 1611 1637 pl->link_config.speed = config.speed; 1612 1638 pl->link_config.duplex = config.duplex; ··· 2212 2186 if (phy_interface_mode_is_8023z(iface) && pl->phydev) 2213 2187 return -EINVAL; 2214 2188 2215 - changed = !linkmode_equal(pl->supported, support); 2189 + changed = !linkmode_equal(pl->supported, support) || 2190 + !linkmode_equal(pl->link_config.advertising, 2191 + config.advertising); 2216 2192 if (changed) { 2217 2193 linkmode_copy(pl->supported, support); 2218 2194 linkmode_copy(pl->link_config.advertising, config.advertising);
+2
drivers/net/wan/Makefile
··· 34 34 clean-files := wanxlfw.inc 35 35 $(obj)/wanxl.o: $(obj)/wanxlfw.inc 36 36 37 + CROSS_COMPILE_M68K = m68k-linux-gnu- 38 + 37 39 ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y) 38 40 ifeq ($(ARCH),m68k) 39 41 M68KCC = $(CC)
+7 -10
drivers/nvme/host/core.c
··· 3524 3524 lockdep_assert_held(&subsys->lock); 3525 3525 3526 3526 list_for_each_entry(h, &subsys->nsheads, entry) { 3527 - if (h->ns_id == nsid && nvme_tryget_ns_head(h)) 3527 + if (h->ns_id != nsid) 3528 + continue; 3529 + if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) 3528 3530 return h; 3529 3531 } 3530 3532 ··· 3845 3843 3846 3844 mutex_lock(&ns->ctrl->subsys->lock); 3847 3845 list_del_rcu(&ns->siblings); 3846 + if (list_empty(&ns->head->list)) { 3847 + list_del_init(&ns->head->entry); 3848 + last_path = true; 3849 + } 3848 3850 mutex_unlock(&ns->ctrl->subsys->lock); 3849 3851 3850 3852 /* guarantee not available in head->list */ ··· 3862 3856 nvme_cdev_del(&ns->cdev, &ns->cdev_device); 3863 3857 del_gendisk(ns->disk); 3864 3858 blk_cleanup_queue(ns->queue); 3865 - if (blk_get_integrity(ns->disk)) 3866 - blk_integrity_unregister(ns->disk); 3867 3859 3868 3860 down_write(&ns->ctrl->namespaces_rwsem); 3869 3861 list_del_init(&ns->list); 3870 3862 up_write(&ns->ctrl->namespaces_rwsem); 3871 3863 3872 - /* Synchronize with nvme_init_ns_head() */ 3873 - mutex_lock(&ns->head->subsys->lock); 3874 - if (list_empty(&ns->head->list)) { 3875 - list_del_init(&ns->head->entry); 3876 - last_path = true; 3877 - } 3878 - mutex_unlock(&ns->head->subsys->lock); 3879 3864 if (last_path) 3880 3865 nvme_mpath_shutdown_disk(ns->head); 3881 3866 nvme_put_ns(ns);
+5 -2
drivers/nvme/host/multipath.c
··· 600 600 601 601 down_read(&ctrl->namespaces_rwsem); 602 602 list_for_each_entry(ns, &ctrl->namespaces, list) { 603 - unsigned nsid = le32_to_cpu(desc->nsids[n]); 604 - 603 + unsigned nsid; 604 + again: 605 + nsid = le32_to_cpu(desc->nsids[n]); 605 606 if (ns->head->ns_id < nsid) 606 607 continue; 607 608 if (ns->head->ns_id == nsid) 608 609 nvme_update_ns_ana_state(desc, ns); 609 610 if (++n == nr_nsids) 610 611 break; 612 + if (ns->head->ns_id > nsid) 613 + goto again; 611 614 } 612 615 up_read(&ctrl->namespaces_rwsem); 613 616 return 0;
+3 -13
drivers/nvme/host/rdma.c
··· 656 656 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) 657 657 return; 658 658 659 - nvme_rdma_destroy_queue_ib(queue); 660 659 rdma_destroy_id(queue->cm_id); 660 + nvme_rdma_destroy_queue_ib(queue); 661 661 mutex_destroy(&queue->queue_lock); 662 662 } 663 663 ··· 1815 1815 for (i = 0; i < queue->queue_size; i++) { 1816 1816 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); 1817 1817 if (ret) 1818 - goto out_destroy_queue_ib; 1818 + return ret; 1819 1819 } 1820 1820 1821 1821 return 0; 1822 - 1823 - out_destroy_queue_ib: 1824 - nvme_rdma_destroy_queue_ib(queue); 1825 - return ret; 1826 1822 } 1827 1823 1828 1824 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, ··· 1912 1916 if (ret) { 1913 1917 dev_err(ctrl->ctrl.device, 1914 1918 "rdma_connect_locked failed (%d).\n", ret); 1915 - goto out_destroy_queue_ib; 1919 + return ret; 1916 1920 } 1917 1921 1918 1922 return 0; 1919 - 1920 - out_destroy_queue_ib: 1921 - nvme_rdma_destroy_queue_ib(queue); 1922 - return ret; 1923 1923 } 1924 1924 1925 1925 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, ··· 1946 1954 case RDMA_CM_EVENT_ROUTE_ERROR: 1947 1955 case RDMA_CM_EVENT_CONNECT_ERROR: 1948 1956 case RDMA_CM_EVENT_UNREACHABLE: 1949 - nvme_rdma_destroy_queue_ib(queue); 1950 - fallthrough; 1951 1957 case RDMA_CM_EVENT_ADDR_ERROR: 1952 1958 dev_dbg(queue->ctrl->ctrl.device, 1953 1959 "CM error event %d\n", ev->event);
+10 -10
drivers/nvme/host/tcp.c
··· 274 274 } while (ret > 0); 275 275 } 276 276 277 + static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) 278 + { 279 + return !list_empty(&queue->send_list) || 280 + !llist_empty(&queue->req_list) || queue->more_requests; 281 + } 282 + 277 283 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, 278 284 bool sync, bool last) 279 285 { ··· 300 294 nvme_tcp_send_all(queue); 301 295 queue->more_requests = false; 302 296 mutex_unlock(&queue->send_mutex); 303 - } else if (last) { 304 - queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 305 297 } 298 + 299 + if (last && nvme_tcp_queue_more(queue)) 300 + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 306 301 } 307 302 308 303 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) ··· 913 906 read_unlock_bh(&sk->sk_callback_lock); 914 907 } 915 908 916 - static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) 917 - { 918 - return !list_empty(&queue->send_list) || 919 - !llist_empty(&queue->req_list) || queue->more_requests; 920 - } 921 - 922 909 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) 923 910 { 924 911 queue->request = NULL; ··· 1146 1145 pending = true; 1147 1146 else if (unlikely(result < 0)) 1148 1147 break; 1149 - } else 1150 - pending = !llist_empty(&queue->req_list); 1148 + } 1151 1149 1152 1150 result = nvme_tcp_try_recv(queue); 1153 1151 if (result > 0)
+1 -1
drivers/nvme/target/configfs.c
··· 1067 1067 { 1068 1068 struct nvmet_subsys *subsys = to_subsys(item); 1069 1069 1070 - return snprintf(page, PAGE_SIZE, "%*s\n", 1070 + return snprintf(page, PAGE_SIZE, "%.*s\n", 1071 1071 NVMET_SN_MAX_SIZE, subsys->serial); 1072 1072 } 1073 1073
+5 -1
drivers/of/device.c
··· 85 85 break; 86 86 } 87 87 88 - if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i)) 88 + /* 89 + * Attempt to initialize a restricted-dma-pool region if one was found. 90 + * Note that count can hold a negative error code. 91 + */ 92 + if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i)) 89 93 dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n"); 90 94 } 91 95
-2
drivers/of/property.c
··· 1291 1291 DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells") 1292 1292 DEFINE_SIMPLE_PROP(leds, "leds", NULL) 1293 1293 DEFINE_SIMPLE_PROP(backlight, "backlight", NULL) 1294 - DEFINE_SIMPLE_PROP(phy_handle, "phy-handle", NULL) 1295 1294 DEFINE_SUFFIX_PROP(regulators, "-supply", NULL) 1296 1295 DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells") 1297 1296 ··· 1379 1380 { .parse_prop = parse_resets, }, 1380 1381 { .parse_prop = parse_leds, }, 1381 1382 { .parse_prop = parse_backlight, }, 1382 - { .parse_prop = parse_phy_handle, }, 1383 1383 { .parse_prop = parse_gpio_compat, }, 1384 1384 { .parse_prop = parse_interrupts, }, 1385 1385 { .parse_prop = parse_regulators, },
+1 -1
drivers/pci/pci-acpi.c
··· 937 937 938 938 void pci_set_acpi_fwnode(struct pci_dev *dev) 939 939 { 940 - if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev)) 940 + if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev)) 941 941 ACPI_COMPANION_SET(&dev->dev, 942 942 acpi_pci_find_companion(&dev->dev)); 943 943 }
+7 -2
drivers/pci/quirks.c
··· 5435 5435 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); 5436 5436 5437 5437 /* 5438 - * Create device link for NVIDIA GPU with integrated USB xHCI Host 5438 + * Create device link for GPUs with integrated USB xHCI Host 5439 5439 * controller to VGA. 5440 5440 */ 5441 5441 static void quirk_gpu_usb(struct pci_dev *usb) ··· 5444 5444 } 5445 5445 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 5446 5446 PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); 5447 + DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, 5448 + PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); 5447 5449 5448 5450 /* 5449 - * Create device link for NVIDIA GPU with integrated Type-C UCSI controller 5451 + * Create device link for GPUs with integrated Type-C UCSI controller 5450 5452 * to VGA. Currently there is no class code defined for UCSI device over PCI 5451 5453 * so using UNKNOWN class for now and it will be updated when UCSI 5452 5454 * over PCI gets a class code. ··· 5459 5457 pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16); 5460 5458 } 5461 5459 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 5460 + PCI_CLASS_SERIAL_UNKNOWN, 8, 5461 + quirk_gpu_usb_typec_ucsi); 5462 + DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, 5462 5463 PCI_CLASS_SERIAL_UNKNOWN, 8, 5463 5464 quirk_gpu_usb_typec_ucsi); 5464 5465
+26 -10
drivers/pci/vpd.c
··· 99 99 return off ?: PCI_VPD_SZ_INVALID; 100 100 } 101 101 102 + static bool pci_vpd_available(struct pci_dev *dev) 103 + { 104 + struct pci_vpd *vpd = &dev->vpd; 105 + 106 + if (!vpd->cap) 107 + return false; 108 + 109 + if (vpd->len == 0) { 110 + vpd->len = pci_vpd_size(dev); 111 + if (vpd->len == PCI_VPD_SZ_INVALID) { 112 + vpd->cap = 0; 113 + return false; 114 + } 115 + } 116 + 117 + return true; 118 + } 119 + 102 120 /* 103 121 * Wait for last operation to complete. 104 122 * This code has to spin since there is no other notification from the PCI ··· 163 145 loff_t end = pos + count; 164 146 u8 *buf = arg; 165 147 166 - if (!vpd->cap) 148 + if (!pci_vpd_available(dev)) 167 149 return -ENODEV; 168 150 169 151 if (pos < 0) ··· 224 206 loff_t end = pos + count; 225 207 int ret = 0; 226 208 227 - if (!vpd->cap) 209 + if (!pci_vpd_available(dev)) 228 210 return -ENODEV; 229 211 230 212 if (pos < 0 || (pos & 3) || (count & 3)) ··· 260 242 261 243 void pci_vpd_init(struct pci_dev *dev) 262 244 { 245 + if (dev->vpd.len == PCI_VPD_SZ_INVALID) 246 + return; 247 + 263 248 dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD); 264 249 mutex_init(&dev->vpd.lock); 265 - 266 - if (!dev->vpd.len) 267 - dev->vpd.len = pci_vpd_size(dev); 268 - 269 - if (dev->vpd.len == PCI_VPD_SZ_INVALID) 270 - dev->vpd.cap = 0; 271 250 } 272 251 273 252 static ssize_t vpd_read(struct file *filp, struct kobject *kobj, ··· 309 294 310 295 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size) 311 296 { 312 - unsigned int len = dev->vpd.len; 297 + unsigned int len; 313 298 void *buf; 314 299 int cnt; 315 300 316 - if (!dev->vpd.cap) 301 + if (!pci_vpd_available(dev)) 317 302 return ERR_PTR(-ENODEV); 318 303 304 + len = dev->vpd.len; 319 305 buf = kmalloc(len, GFP_KERNEL); 320 306 if (!buf) 321 307 return ERR_PTR(-ENOMEM);
+2
drivers/rtc/rtc-cmos.c
··· 1047 1047 * ACK the rtc irq here 1048 1048 */ 1049 1049 if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) { 1050 + local_irq_disable(); 1050 1051 cmos_interrupt(0, (void *)cmos->rtc); 1052 + local_irq_enable(); 1051 1053 return; 1052 1054 } 1053 1055
+2 -1
drivers/s390/char/sclp_early.c
··· 45 45 sclp.has_gisaf = !!(sccb->fac118 & 0x08); 46 46 sclp.has_hvs = !!(sccb->fac119 & 0x80); 47 47 sclp.has_kss = !!(sccb->fac98 & 0x01); 48 - sclp.has_sipl = !!(sccb->cbl & 0x4000); 49 48 if (sccb->fac85 & 0x02) 50 49 S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; 51 50 if (sccb->fac91 & 0x40) 52 51 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST; 53 52 if (sccb->cpuoff > 134) 54 53 sclp.has_diag318 = !!(sccb->byte_134 & 0x80); 54 + if (sccb->cpuoff > 137) 55 + sclp.has_sipl = !!(sccb->cbl & 0x4000); 55 56 sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; 56 57 sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; 57 58 sclp.rzm <<= 20;
+2 -1
drivers/s390/crypto/ap_bus.c
··· 213 213 * ap_init_qci_info(): Allocate and query qci config info. 214 214 * Does also update the static variables ap_max_domain_id 215 215 * and ap_max_adapter_id if this info is available. 216 - 217 216 */ 218 217 static void __init ap_init_qci_info(void) 219 218 { ··· 438 439 /** 439 440 * ap_interrupt_handler() - Schedule ap_tasklet on interrupt 440 441 * @airq: pointer to adapter interrupt descriptor 442 + * @floating: ignored 441 443 */ 442 444 static void ap_interrupt_handler(struct airq_struct *airq, bool floating) 443 445 { ··· 1786 1786 /** 1787 1787 * ap_scan_bus(): Scan the AP bus for new devices 1788 1788 * Runs periodically, workqueue timer (ap_config_time) 1789 + * @unused: Unused pointer. 1789 1790 */ 1790 1791 static void ap_scan_bus(struct work_struct *unused) 1791 1792 {
+2 -2
drivers/s390/crypto/ap_queue.c
··· 20 20 21 21 /** 22 22 * ap_queue_enable_irq(): Enable interrupt support on this AP queue. 23 - * @qid: The AP queue number 23 + * @aq: The AP queue 24 24 * @ind: the notification indicator byte 25 25 * 26 26 * Enables interruption on AP queue via ap_aqic(). Based on the return ··· 311 311 312 312 /** 313 313 * ap_sm_reset(): Reset an AP queue. 314 - * @qid: The AP queue number 314 + * @aq: The AP queue 315 315 * 316 316 * Submit the Reset command to an AP queue. 317 317 */
+2 -4
drivers/spi/spi-tegra20-slink.c
··· 1182 1182 } 1183 1183 #endif 1184 1184 1185 - #ifdef CONFIG_PM 1186 - static int tegra_slink_runtime_suspend(struct device *dev) 1185 + static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev) 1187 1186 { 1188 1187 struct spi_master *master = dev_get_drvdata(dev); 1189 1188 struct tegra_slink_data *tspi = spi_master_get_devdata(master); ··· 1194 1195 return 0; 1195 1196 } 1196 1197 1197 - static int tegra_slink_runtime_resume(struct device *dev) 1198 + static int __maybe_unused tegra_slink_runtime_resume(struct device *dev) 1198 1199 { 1199 1200 struct spi_master *master = dev_get_drvdata(dev); 1200 1201 struct tegra_slink_data *tspi = spi_master_get_devdata(master); ··· 1207 1208 } 1208 1209 return 0; 1209 1210 } 1210 - #endif /* CONFIG_PM */ 1211 1211 1212 1212 static const struct dev_pm_ops slink_pm_ops = { 1213 1213 SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
+10 -1
drivers/vhost/net.c
··· 467 467 .num = nvq->batched_xdp, 468 468 .ptr = nvq->xdp, 469 469 }; 470 - int err; 470 + int i, err; 471 471 472 472 if (nvq->batched_xdp == 0) 473 473 goto signal_used; ··· 476 476 err = sock->ops->sendmsg(sock, msghdr, 0); 477 477 if (unlikely(err < 0)) { 478 478 vq_err(&nvq->vq, "Fail to batch sending packets\n"); 479 + 480 + /* free pages owned by XDP; since this is an unlikely error path, 481 + * keep it simple and avoid more complex bulk update for the 482 + * used pages 483 + */ 484 + for (i = 0; i < nvq->batched_xdp; ++i) 485 + put_page(virt_to_head_page(nvq->xdp[i].data)); 486 + nvq->batched_xdp = 0; 487 + nvq->done_idx = 0; 479 488 return; 480 489 } 481 490
+3 -1
drivers/video/fbdev/Kconfig
··· 582 582 583 583 config FB_TGA 584 584 tristate "TGA/SFB+ framebuffer support" 585 - depends on FB && (ALPHA || TC) 585 + depends on FB 586 + depends on PCI || TC 587 + depends on ALPHA || TC 586 588 select FB_CFB_FILLRECT 587 589 select FB_CFB_COPYAREA 588 590 select FB_CFB_IMAGEBLIT
+1 -1
drivers/xen/Kconfig
··· 214 214 implements them. 215 215 216 216 config XEN_PVCALLS_BACKEND 217 - bool "XEN PV Calls backend driver" 217 + tristate "XEN PV Calls backend driver" 218 218 depends on INET && XEN && XEN_BACKEND 219 219 help 220 220 Experimental backend for the Xen PV Calls protocol
+45 -17
drivers/xen/balloon.c
··· 43 43 #include <linux/sched.h> 44 44 #include <linux/cred.h> 45 45 #include <linux/errno.h> 46 + #include <linux/freezer.h> 47 + #include <linux/kthread.h> 46 48 #include <linux/mm.h> 47 49 #include <linux/memblock.h> 48 50 #include <linux/pagemap.h> ··· 117 115 #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1) 118 116 119 117 /* 120 - * balloon_process() state: 118 + * balloon_thread() state: 121 119 * 122 120 * BP_DONE: done or nothing to do, 123 121 * BP_WAIT: wait to be rescheduled, ··· 132 130 BP_ECANCELED 133 131 }; 134 132 133 + /* Main waiting point for xen-balloon thread. */ 134 + static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq); 135 135 136 136 static DEFINE_MUTEX(balloon_mutex); 137 137 ··· 147 143 /* List of ballooned pages, threaded through the mem_map array. */ 148 144 static LIST_HEAD(ballooned_pages); 149 145 static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); 150 - 151 - /* Main work function, always executed in process context. */ 152 - static void balloon_process(struct work_struct *work); 153 - static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); 154 146 155 147 /* When ballooning out (allocating memory to return to Xen) we don't really 156 148 want the kernel to try too hard since that can trigger the oom killer. */ ··· 366 366 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) 367 367 { 368 368 if (val == MEM_ONLINE) 369 - schedule_delayed_work(&balloon_worker, 0); 369 + wake_up(&balloon_thread_wq); 370 370 371 371 return NOTIFY_OK; 372 372 } ··· 491 491 } 492 492 493 493 /* 494 - * As this is a work item it is guaranteed to run as a single instance only. 494 + * Stop waiting if either state is not BP_EAGAIN and ballooning action is 495 + * needed, or if the credit has changed while state is BP_EAGAIN. 496 + */ 497 + static bool balloon_thread_cond(enum bp_state state, long credit) 498 + { 499 + if (state != BP_EAGAIN) 500 + credit = 0; 501 + 502 + return current_credit() != credit || kthread_should_stop(); 503 + } 504 + 505 + /* 506 + * As this is a kthread it is guaranteed to run as a single instance only. 495 507 * We may of course race updates of the target counts (which are protected 496 508 * by the balloon lock), or with changes to the Xen hard limit, but we will 497 509 * recover from these in time. 498 510 */ 499 - static void balloon_process(struct work_struct *work) 511 + static int balloon_thread(void *unused) 500 512 { 501 513 enum bp_state state = BP_DONE; 502 514 long credit; 515 + unsigned long timeout; 503 516 517 + set_freezable(); 518 + for (;;) { 519 + if (state == BP_EAGAIN) 520 + timeout = balloon_stats.schedule_delay * HZ; 521 + else 522 + timeout = 3600 * HZ; 523 + credit = current_credit(); 504 524 505 - do { 525 + wait_event_interruptible_timeout(balloon_thread_wq, 526 + balloon_thread_cond(state, credit), timeout); 527 + 528 + if (kthread_should_stop()) 529 + return 0; 530 + 506 531 mutex_lock(&balloon_mutex); 507 532 508 533 credit = current_credit(); ··· 554 529 mutex_unlock(&balloon_mutex); 555 530 556 531 cond_resched(); 557 - 558 - } while (credit && state == BP_DONE); 559 - 560 - /* Schedule more work if there is some still to be done. */ 561 - if (state == BP_EAGAIN) 562 - schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); 532 + } 563 533 } 564 534 565 535 /* Resets the Xen limit, sets new target, and kicks off processing. */ ··· 562 542 { 563 543 /* No need for lock. Not read-modify-write updates. */ 564 544 balloon_stats.target_pages = target; 565 - schedule_delayed_work(&balloon_worker, 0); 545 + wake_up(&balloon_thread_wq); 566 546 } 567 547 EXPORT_SYMBOL_GPL(balloon_set_new_target); 568 548 ··· 667 647 668 648 /* The balloon may be too large now. Shrink it if needed. */ 669 649 if (current_credit()) 670 - schedule_delayed_work(&balloon_worker, 0); 650 + wake_up(&balloon_thread_wq); 671 651 672 652 mutex_unlock(&balloon_mutex); 673 653 } ··· 699 679 700 680 static int __init balloon_init(void) 701 681 { 682 + struct task_struct *task; 683 + 702 684 if (!xen_domain()) 703 685 return -ENODEV; 704 686 ··· 743 721 xen_extra_mem[i].n_pfns); 744 722 } 745 723 #endif 724 + 725 + task = kthread_run(balloon_thread, NULL, "xen-balloon"); 726 + if (IS_ERR(task)) { 727 + pr_err("xen-balloon thread could not be started, ballooning will not work!\n"); 728 + return PTR_ERR(task); 729 + } 746 730 747 731 /* Init the xen-balloon driver. */ 748 732 xen_balloon_init();
+17 -20
drivers/xen/swiotlb-xen.c
··· 106 106 107 107 static int xen_swiotlb_fixup(void *buf, unsigned long nslabs) 108 108 { 109 - int i, rc; 110 - int dma_bits; 109 + int rc; 110 + unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT); 111 + unsigned int i, dma_bits = order + PAGE_SHIFT; 111 112 dma_addr_t dma_handle; 112 113 phys_addr_t p = virt_to_phys(buf); 113 114 114 - dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; 115 + BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1)); 116 + BUG_ON(nslabs % IO_TLB_SEGSIZE); 115 117 116 118 i = 0; 117 119 do { 118 - int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); 119 - 120 120 do { 121 121 rc = xen_create_contiguous_region( 122 - p + (i << IO_TLB_SHIFT), 123 - get_order(slabs << IO_TLB_SHIFT), 122 + p + (i << IO_TLB_SHIFT), order, 124 123 dma_bits, &dma_handle); 125 124 } while (rc && dma_bits++ < MAX_DMA_BITS); 126 125 if (rc) 127 126 return rc; 128 127 129 - i += slabs; 128 + i += IO_TLB_SEGSIZE; 130 129 } while (i < nslabs); 131 130 return 0; 132 131 } ··· 152 153 return ""; 153 154 } 154 155 155 - #define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE) 156 - 157 - int __ref xen_swiotlb_init(void) 156 + int xen_swiotlb_init(void) 158 157 { 159 158 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; 160 159 unsigned long bytes = swiotlb_size_or_default(); ··· 182 185 order--; 183 186 } 184 187 if (!start) 185 - goto error; 188 + goto exit; 186 189 if (order != get_order(bytes)) { 187 190 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", 188 191 (PAGE_SIZE << order) >> 20); ··· 205 208 swiotlb_set_max_segment(PAGE_SIZE); 206 209 return 0; 207 210 error: 208 - if (repeat--) { 211 + if (nslabs > 1024 && repeat--) { 209 212 /* Min is 2MB */ 210 - nslabs = max(1024UL, (nslabs >> 1)); 211 - pr_info("Lowering to %luMB\n", 212 - (nslabs << IO_TLB_SHIFT) >> 20); 213 + nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); 214 + bytes = nslabs << IO_TLB_SHIFT; 215 + pr_info("Lowering to %luMB\n", bytes >> 20); 213 216 goto retry; 214 217 } 218 + exit: 215 219 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); 216 - free_pages((unsigned long)start, order); 217 220 return rc; 218 221 } 219 222 ··· 241 244 rc = xen_swiotlb_fixup(start, nslabs); 242 245 if (rc) { 243 246 memblock_free(__pa(start), PAGE_ALIGN(bytes)); 244 - if (repeat--) { 247 + if (nslabs > 1024 && repeat--) { 245 248 /* Min is 2MB */ 246 - nslabs = max(1024UL, (nslabs >> 1)); 249 + nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); 247 250 bytes = nslabs << IO_TLB_SHIFT; 248 251 pr_info("Lowering to %luMB\n", bytes >> 20); 249 252 goto retry; ··· 251 254 panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc); 252 255 } 253 256 254 - if (swiotlb_init_with_tbl(start, nslabs, false)) 257 + if (swiotlb_init_with_tbl(start, nslabs, true)) 255 258 panic("Cannot allocate SWIOTLB buffer"); 256 259 swiotlb_set_max_segment(PAGE_SIZE); 257 260 }
+13 -14
fs/io-wq.c
··· 14 14 #include <linux/rculist_nulls.h> 15 15 #include <linux/cpu.h> 16 16 #include <linux/tracehook.h> 17 + #include <uapi/linux/io_uring.h> 17 18 18 19 #include "io-wq.h" 19 20 ··· 177 176 static void io_worker_exit(struct io_worker *worker) 178 177 { 179 178 struct io_wqe *wqe = worker->wqe; 180 - struct io_wqe_acct *acct = io_wqe_get_acct(worker); 181 179 182 180 if (refcount_dec_and_test(&worker->ref)) 183 181 complete(&worker->ref_done); ··· 186 186 if (worker->flags & IO_WORKER_F_FREE) 187 187 hlist_nulls_del_rcu(&worker->nulls_node); 188 188 list_del_rcu(&worker->all_list); 189 - acct->nr_workers--; 190 189 preempt_disable(); 191 190 io_wqe_dec_running(worker); 192 191 worker->flags = 0; ··· 245 246 */ 246 247 static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) 247 248 { 248 - bool do_create = false; 249 - 250 249 /* 251 250 * Most likely an attempt to queue unbounded work on an io_wq that 252 251 * wasn't setup with any unbounded workers. ··· 253 256 pr_warn_once("io-wq is not configured for unbound workers"); 254 257 255 258 raw_spin_lock(&wqe->lock); 256 - if (acct->nr_workers < acct->max_workers) { 257 - acct->nr_workers++; 258 - do_create = true; 259 + if (acct->nr_workers == acct->max_workers) { 260 + raw_spin_unlock(&wqe->lock); 261 + return true; 259 262 } 263 + acct->nr_workers++; 260 264 raw_spin_unlock(&wqe->lock); 261 - if (do_create) { 262 - atomic_inc(&acct->nr_running); 263 - atomic_inc(&wqe->wq->worker_refs); 264 - return create_io_worker(wqe->wq, wqe, acct->index); 265 - } 266 - 267 - return true; 265 + atomic_inc(&acct->nr_running); 266 + atomic_inc(&wqe->wq->worker_refs); 267 + return create_io_worker(wqe->wq, wqe, acct->index); 268 268 } 269 269 270 270 static void io_wqe_inc_running(struct io_worker *worker) ··· 568 574 } 569 575 /* timed out, exit unless we're the last worker */ 570 576 if (last_timeout && acct->nr_workers > 1) { 577 + acct->nr_workers--; 571 578 raw_spin_unlock(&wqe->lock); 572 579 __set_current_state(TASK_RUNNING); 573 580 break; ··· 1281 1286 int io_wq_max_workers(struct io_wq *wq, int *new_count) 1282 1287 { 1283 1288 int i, node, prev = 0; 1289 + 1290 + BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND); 1291 + BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND); 1292 + BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2); 1284 1293 1285 1294 for (i = 0; i < 2; i++) { 1286 1295 if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
+144 -77
fs/io_uring.c
··· 712 712 struct iovec fast_iov[UIO_FASTIOV]; 713 713 const struct iovec *free_iovec; 714 714 struct iov_iter iter; 715 + struct iov_iter_state iter_state; 715 716 size_t bytes_done; 716 717 struct wait_page_queue wpq; 717 718 }; ··· 736 735 REQ_F_BUFFER_SELECTED_BIT, 737 736 REQ_F_COMPLETE_INLINE_BIT, 738 737 REQ_F_REISSUE_BIT, 739 - REQ_F_DONT_REISSUE_BIT, 740 738 REQ_F_CREDS_BIT, 741 739 REQ_F_REFCOUNT_BIT, 742 740 REQ_F_ARM_LTIMEOUT_BIT, ··· 782 782 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), 783 783 /* caller should reissue async */ 784 784 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), 785 - /* don't attempt request reissue, see io_rw_reissue() */ 786 - REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT), 787 785 /* supports async reads */ 788 786 REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT), 789 787 /* supports async writes */ ··· 2442 2444 req = list_first_entry(done, struct io_kiocb, inflight_entry); 2443 2445 list_del(&req->inflight_entry); 2444 2446 2445 - if (READ_ONCE(req->result) == -EAGAIN && 2446 - !(req->flags & REQ_F_DONT_REISSUE)) { 2447 - req->iopoll_completed = 0; 2448 - io_req_task_queue_reissue(req); 2449 - continue; 2450 - } 2451 - 2452 2447 __io_cqring_fill_event(ctx, req->user_data, req->result, 2453 2448 io_put_rw_kbuf(req)); 2454 2449 (*nr_events)++; ··· 2604 2613 2605 2614 if (!rw) 2606 2615 return !io_req_prep_async(req); 2607 - /* may have left rw->iter inconsistent on -EIOCBQUEUED */ 2608 - iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter)); 2616 + iov_iter_restore(&rw->iter, &rw->iter_state); 2609 2617 return true; 2610 2618 } 2611 2619 ··· 2704 2714 if (kiocb->ki_flags & IOCB_WRITE) 2705 2715 kiocb_end_write(req); 2706 2716 if (unlikely(res != req->result)) { 2707 - if (!(res == -EAGAIN && io_rw_should_reissue(req) && 2708 - io_resubmit_prep(req))) { 2709 - req_set_fail(req); 2710 - req->flags |= REQ_F_DONT_REISSUE; 2717 + if (res == -EAGAIN && io_rw_should_reissue(req)) { 2718 + req->flags |= REQ_F_REISSUE; 2719 + return; 2711 2720 } 2712 2721 } 2713 2722 ··· 2832 2843 return __io_file_supports_nowait(req->file, rw); 2833 2844 } 2834 2845 2835 - static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) 2846 + static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, 2847 + int rw) 2836 2848 { 2837 2849 struct io_ring_ctx *ctx = req->ctx; 2838 2850 struct kiocb *kiocb = &req->rw.kiocb; ··· 2855 2865 if (unlikely(ret)) 2856 2866 return ret; 2857 2867 2858 - /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */ 2859 - if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK)) 2868 + /* 2869 + * If the file is marked O_NONBLOCK, still allow retry for it if it 2870 + * supports async. Otherwise it's impossible to use O_NONBLOCK files 2871 + * reliably. If not, or it IOCB_NOWAIT is set, don't retry. 2872 + */ 2873 + if ((kiocb->ki_flags & IOCB_NOWAIT) || 2874 + ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw))) 2860 2875 req->flags |= REQ_F_NOWAIT; 2861 2876 2862 2877 ioprio = READ_ONCE(sqe->ioprio); ··· 2926 2931 { 2927 2932 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); 2928 2933 struct io_async_rw *io = req->async_data; 2929 - bool check_reissue = kiocb->ki_complete == io_complete_rw; 2930 2934 2931 2935 /* add previously done IO, if any */ 2932 2936 if (io && io->bytes_done > 0) { ··· 2937 2943 2938 2944 if (req->flags & REQ_F_CUR_POS) 2939 2945 req->file->f_pos = kiocb->ki_pos; 2940 - if (ret >= 0 && check_reissue) 2946 + if (ret >= 0 && (kiocb->ki_complete == io_complete_rw)) 2941 2947 __io_complete_rw(req, ret, 0, issue_flags); 2942 2948 else 2943 2949 io_rw_done(kiocb, ret); 2944 2950 2945 - if (check_reissue && (req->flags & REQ_F_REISSUE)) { 2951 + if (req->flags & REQ_F_REISSUE) { 2946 2952 req->flags &= ~REQ_F_REISSUE; 2947 2953 if (io_resubmit_prep(req)) { 2948 2954 io_req_task_queue_reissue(req); 2949 2955 } else { 2956 + unsigned int cflags = io_put_rw_kbuf(req); 2957 + struct io_ring_ctx *ctx = req->ctx; 2958 + 2950 2959 req_set_fail(req); 2951 - __io_req_complete(req, issue_flags, ret, 2952 - io_put_rw_kbuf(req)); 2960 + if (issue_flags & IO_URING_F_NONBLOCK) { 2961 + mutex_lock(&ctx->uring_lock); 2962 + __io_req_complete(req, issue_flags, ret, cflags); 2963 + mutex_unlock(&ctx->uring_lock); 2964 + } else { 2965 + __io_req_complete(req, issue_flags, ret, cflags); 2966 + } 2953 2967 } 2954 2968 } 2955 2969 } ··· 3265 3263 ret = nr; 3266 3264 break; 3267 3265 } 3266 + if (!iov_iter_is_bvec(iter)) { 3267 + iov_iter_advance(iter, nr); 3268 + } else { 3269 + req->rw.len -= nr; 3270 + req->rw.addr += nr; 3271 + } 3268 3272 ret += nr; 3269 3273 if (nr != iovec.iov_len) 3270 3274 break; 3271 - req->rw.len -= nr; 3272 - req->rw.addr += nr; 3273 - iov_iter_advance(iter, nr); 3274 3275 } 3275 3276 3276 3277 return ret; ··· 3320 3315 if (!force && !io_op_defs[req->opcode].needs_async_setup) 3321 3316 return 0; 3322 3317 if (!req->async_data) { 3318 + struct io_async_rw *iorw; 3319 + 3323 3320 if (io_alloc_async_data(req)) { 3324 3321 kfree(iovec); 3325 3322 return -ENOMEM; 3326 3323 } 3327 3324 3328 3325 io_req_map_rw(req, iovec, fast_iov, iter); 3326 + iorw = req->async_data; 3327 + /* we've copied and mapped the iter, ensure state is saved */ 3328 + iov_iter_save_state(&iorw->iter, &iorw->iter_state); 3329 3329 } 3330 3330 return 0; 3331 3331 } ··· 3349 3339 iorw->free_iovec = iov; 3350 3340 if (iov) 3351 3341 req->flags |= REQ_F_NEED_CLEANUP; 3342 + iov_iter_save_state(&iorw->iter, &iorw->iter_state); 3352 3343 return 0; 3353 3344 } 3354 3345 ··· 3357 3346 { 3358 3347 if (unlikely(!(req->file->f_mode & FMODE_READ))) 3359 3348 return -EBADF; 3360 - return io_prep_rw(req, sqe); 3349 + return io_prep_rw(req, sqe, READ); 3361 3350 } 3362 3351 3363 3352 /* ··· 3453 3442 struct kiocb *kiocb = &req->rw.kiocb; 3454 3443 struct iov_iter __iter, *iter = &__iter; 3455 3444 struct io_async_rw *rw = req->async_data; 3456 - ssize_t io_size, ret, ret2; 3457 3445 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 3446 + struct iov_iter_state __state, *state; 3447 + ssize_t ret, ret2; 3458 3448 3459 3449 if (rw) { 3460 3450 iter = &rw->iter; 3451 + state = &rw->iter_state; 3452 + /* 3453 + * We come here from an earlier attempt, restore our state to 3454 + * match in case it doesn't. It's cheap enough that we don't 3455 + * need to make this conditional. 3456 + */ 3457 + iov_iter_restore(iter, state); 3461 3458 iovec = NULL; 3462 3459 } else { 3463 3460 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); 3464 3461 if (ret < 0) 3465 3462 return ret; 3463 + state = &__state; 3464 + iov_iter_save_state(iter, state); 3466 3465 } 3467 - io_size = iov_iter_count(iter); 3468 - req->result = io_size; 3466 + req->result = iov_iter_count(iter); 3469 3467 3470 3468 /* Ensure we clear previously set non-block flag */ 3471 3469 if (!force_nonblock) ··· 3488 3468 return ret ?: -EAGAIN; 3489 3469 } 3490 3470 3491 - ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size); 3471 + ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result); 3492 3472 if (unlikely(ret)) { 3493 3473 kfree(iovec); 3494 3474 return ret; ··· 3504 3484 /* no retry on NONBLOCK nor RWF_NOWAIT */ 3505 3485 if (req->flags & REQ_F_NOWAIT) 3506 3486 goto done; 3507 - /* some cases will consume bytes even on error returns */ 3508 - iov_iter_reexpand(iter, iter->count + iter->truncated); 3509 - iov_iter_revert(iter, io_size - iov_iter_count(iter)); 3510 3487 ret = 0; 3511 3488 } else if (ret == -EIOCBQUEUED) { 3512 3489 goto out_free; 3513 - } else if (ret <= 0 || ret == io_size || !force_nonblock || 3490 + } else if (ret <= 0 || ret == req->result || !force_nonblock || 3514 3491 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { 3515 3492 /* read all, failed, already did sync or don't want to retry */ 3516 3493 goto done; 3517 3494 } 3495 + 3496 + /* 3497 + * Don't depend on the iter state matching what was consumed, or being 3498 + * untouched in case of error. Restore it and we'll advance it 3499 + * manually if we need to. 3500 + */ 3501 + iov_iter_restore(iter, state); 3518 3502 3519 3503 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); 3520 3504 if (ret2) ··· 3526 3502 3527 3503 iovec = NULL; 3528 3504 rw = req->async_data; 3529 - /* now use our persistent iterator, if we aren't already */ 3530 - iter = &rw->iter; 3505 + /* 3506 + * Now use our persistent iterator and state, if we aren't already. 3507 + * We've restored and mapped the iter to match. 3508 + */ 3509 + if (iter != &rw->iter) { 3510 + iter = &rw->iter; 3511 + state = &rw->iter_state; 3512 + } 3531 3513 3532 3514 do { 3533 - io_size -= ret; 3515 + /* 3516 + * We end up here because of a partial read, either from 3517 + * above or inside this loop. Advance the iter by the bytes 3518 + * that were consumed. 3519 + */ 3520 + iov_iter_advance(iter, ret); 3521 + if (!iov_iter_count(iter)) 3522 + break; 3534 3523 rw->bytes_done += ret; 3524 + iov_iter_save_state(iter, state); 3525 + 3535 3526 /* if we can retry, do so with the callbacks armed */ 3536 3527 if (!io_rw_should_retry(req)) { 3537 3528 kiocb->ki_flags &= ~IOCB_WAITQ; ··· 3564 3525 return 0; 3565 3526 /* we got some bytes, but not all. retry. */ 3566 3527 kiocb->ki_flags &= ~IOCB_WAITQ; 3567 - } while (ret > 0 && ret < io_size); 3528 + iov_iter_restore(iter, state); 3529 + } while (ret > 0); 3568 3530 done: 3569 3531 kiocb_done(kiocb, ret, issue_flags); 3570 3532 out_free: ··· 3579 3539 { 3580 3540 if (unlikely(!(req->file->f_mode & FMODE_WRITE))) 3581 3541 return -EBADF; 3582 - return io_prep_rw(req, sqe); 3542 + return io_prep_rw(req, sqe, WRITE); 3583 3543 } 3584 3544 3585 3545 static int io_write(struct io_kiocb *req, unsigned int issue_flags) ··· 3588 3548 struct kiocb *kiocb = &req->rw.kiocb; 3589 3549 struct iov_iter __iter, *iter = &__iter; 3590 3550 struct io_async_rw *rw = req->async_data; 3591 - ssize_t ret, ret2, io_size; 3592 3551 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 3552 + struct iov_iter_state __state, *state; 3553 + ssize_t ret, ret2; 3593 3554 3594 3555 if (rw) { 3595 3556 iter = &rw->iter; 3557 + state = &rw->iter_state; 3558 + iov_iter_restore(iter, state); 3596 3559 iovec = NULL; 3597 3560 } else { 3598 3561 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); 3599 3562 if (ret < 0) 3600 3563 return ret; 3564 + state = &__state; 3565 + iov_iter_save_state(iter, state); 3601 3566 } 3602 - io_size = iov_iter_count(iter); 3603 - req->result = io_size; 3567 + req->result = iov_iter_count(iter); 3568 + ret2 = 0; 3604 3569 3605 3570 /* Ensure we clear previously set non-block flag */ 3606 3571 if (!force_nonblock) ··· 3622 3577 (req->flags & REQ_F_ISREG)) 3623 3578 goto copy_iov; 3624 3579 3625 - ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size); 3580 + ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result); 3626 3581 if (unlikely(ret)) 3627 3582 goto out_free; 3628 3583 ··· 3669 3624 kiocb_done(kiocb, ret2, issue_flags); 3670 3625 } else { 3671 3626 copy_iov: 3672 - /* some cases will consume bytes even on error returns */ 3673 - iov_iter_reexpand(iter, iter->count + iter->truncated); 3674 - iov_iter_revert(iter, io_size - iov_iter_count(iter)); 3627 + iov_iter_restore(iter, state); 3628 + if (ret2 > 0) 3629 + iov_iter_advance(iter, ret2); 3675 3630 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); 3676 3631 return ret ?: -EAGAIN; 3677 3632 } ··· 7560 7515 break; 7561 7516 } while (1); 7562 7517 7518 + if (uts) { 7519 + struct timespec64 ts; 7520 + 7521 + if (get_timespec64(&ts, uts)) 7522 + return -EFAULT; 7523 + timeout = timespec64_to_jiffies(&ts); 7524 + } 7525 + 7563 7526 if (sig) { 7564 7527 #ifdef CONFIG_COMPAT 7565 7528 if (in_compat_syscall()) ··· 7579 7526 7580 7527 if (ret) 7581 7528 return ret; 7582 - } 7583 - 7584 - if (uts) { 7585 - struct timespec64 ts; 7586 - 7587 - if (get_timespec64(&ts, uts)) 7588 - return -EFAULT; 7589 - timeout = timespec64_to_jiffies(&ts); 7590 7529 } 7591 7530 7592 7531 init_waitqueue_func_entry(&iowq.wq, io_wake_function); ··· 8329 8284 #endif 8330 8285 } 8331 8286 8287 + static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, 8288 + struct io_rsrc_node *node, void *rsrc) 8289 + { 8290 + struct io_rsrc_put *prsrc; 8291 + 8292 + prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL); 8293 + if (!prsrc) 8294 + return -ENOMEM; 8295 + 8296 + prsrc->tag = *io_get_tag_slot(data, idx); 8297 + prsrc->rsrc = rsrc; 8298 + list_add(&prsrc->list, &node->rsrc_list); 8299 + return 0; 8300 + } 8301 + 8332 8302 static int io_install_fixed_file(struct io_kiocb *req, struct file *file, 8333 8303 unsigned int issue_flags, u32 slot_index) 8334 8304 { 8335 8305 struct io_ring_ctx *ctx = req->ctx; 8336 8306 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 8307 + bool needs_switch = false; 8337 8308 struct io_fixed_file *file_slot; 8338 8309 int ret = -EBADF; 8339 8310 ··· 8365 8304 8366 8305 slot_index = array_index_nospec(slot_index, ctx->nr_user_files); 8367 8306 file_slot = io_fixed_file_slot(&ctx->file_table, slot_index); 8368 - ret = -EBADF; 8369 - if (file_slot->file_ptr) 8370 - goto err; 8307 + 8308 + if (file_slot->file_ptr) { 8309 + struct file *old_file; 8310 + 8311 + ret = io_rsrc_node_switch_start(ctx); 8312 + if (ret) 8313 + goto err; 8314 + 8315 + old_file = (struct file *)(file_slot->file_ptr & FFS_MASK); 8316 + ret = io_queue_rsrc_removal(ctx->file_data, slot_index, 8317 + ctx->rsrc_node, old_file); 8318 + if (ret) 8319 + goto err; 8320 + file_slot->file_ptr = 0; 8321 + needs_switch = true; 8322 + } 8371 8323 8372 8324 *io_get_tag_slot(ctx->file_data, slot_index) = 0; 8373 8325 io_fixed_file_set(file_slot, file); ··· 8392 8318 8393 8319 ret = 0; 8394 8320 err: 8321 + if (needs_switch) 8322 + io_rsrc_node_switch(ctx, ctx->file_data); 8395 8323 io_ring_submit_unlock(ctx, !force_nonblock); 8396 8324 if (ret) 8397 8325 fput(file); 8398 8326 return ret; 8399 - } 8400 - 8401 - static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, 8402 - struct io_rsrc_node *node, void *rsrc) 8403 - { 8404 - struct io_rsrc_put *prsrc; 8405 - 8406 - prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL); 8407 - if (!prsrc) 8408 - return -ENOMEM; 8409 - 8410 - prsrc->tag = *io_get_tag_slot(data, idx); 8411 - prsrc->rsrc = rsrc; 8412 - list_add(&prsrc->list, &node->rsrc_list); 8413 - return 0; 8414 8327 } 8415 8328 8416 8329 static int __io_sqe_files_update(struct io_ring_ctx *ctx, ··· 10621 10560 * ordering. Fine to drop uring_lock here, we hold 10622 10561 * a ref to the ctx. 10623 10562 */ 10563 + refcount_inc(&sqd->refs); 10624 10564 mutex_unlock(&ctx->uring_lock); 10625 10565 mutex_lock(&sqd->lock); 10626 10566 mutex_lock(&ctx->uring_lock); 10627 - tctx = sqd->thread->io_uring; 10567 + if (sqd->thread) 10568 + tctx = sqd->thread->io_uring; 10628 10569 } 10629 10570 } else { 10630 10571 tctx = current->io_uring; ··· 10640 10577 if (ret) 10641 10578 goto err; 10642 10579 10643 - if (sqd) 10580 + if (sqd) { 10644 10581 mutex_unlock(&sqd->lock); 10582 + io_put_sq_data(sqd); 10583 + } 10645 10584 10646 10585 if (copy_to_user(arg, new_count, sizeof(new_count))) 10647 10586 return -EFAULT; 10648 10587 10649 10588 return 0; 10650 10589 err: 10651 - if (sqd) 10590 + if (sqd) { 10652 10591 mutex_unlock(&sqd->lock); 10592 + io_put_sq_data(sqd); 10593 + } 10653 10594 return ret; 10654 10595 } 10655 10596
+34 -17
fs/qnx4/dir.c
··· 15 15 #include <linux/buffer_head.h> 16 16 #include "qnx4.h" 17 17 18 + /* 19 + * A qnx4 directory entry is an inode entry or link info 20 + * depending on the status field in the last byte. The 21 + * first byte is where the name start either way, and a 22 + * zero means it's empty. 23 + */ 24 + union qnx4_directory_entry { 25 + struct { 26 + char de_name; 27 + char de_pad[62]; 28 + char de_status; 29 + }; 30 + struct qnx4_inode_entry inode; 31 + struct qnx4_link_info link; 32 + }; 33 + 18 34 static int qnx4_readdir(struct file *file, struct dir_context *ctx) 19 35 { 20 36 struct inode *inode = file_inode(file); 21 37 unsigned int offset; 22 38 struct buffer_head *bh; 23 - struct qnx4_inode_entry *de; 24 - struct qnx4_link_info *le; 25 39 unsigned long blknum; 26 40 int ix, ino; 27 41 int size; ··· 52 38 } 53 39 ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK; 54 40 for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) { 41 + union qnx4_directory_entry *de; 42 + const char *name; 43 + 55 44 offset = ix * QNX4_DIR_ENTRY_SIZE; 56 - de = (struct qnx4_inode_entry *) (bh->b_data + offset); 57 - if (!de->di_fname[0]) 45 + de = (union qnx4_directory_entry *) (bh->b_data + offset); 46 + 47 + if (!de->de_name) 58 48 continue; 59 - if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK))) 49 + if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK))) 60 50 continue; 61 - if (!(de->di_status & QNX4_FILE_LINK)) 62 - size = QNX4_SHORT_NAME_MAX; 63 - else 64 - size = QNX4_NAME_MAX; 65 - size = strnlen(de->di_fname, size); 66 - QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname)); 67 - if (!(de->di_status & QNX4_FILE_LINK)) 51 + if (!(de->de_status & QNX4_FILE_LINK)) { 52 + size = sizeof(de->inode.di_fname); 53 + name = de->inode.di_fname; 68 54 ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1; 69 - else { 70 - le = (struct qnx4_link_info*)de; 71 - ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) * 55 + } else { 56 + size = sizeof(de->link.dl_fname); 57 + name = de->link.dl_fname; 58 + ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) * 72 59 QNX4_INODES_PER_BLOCK + 73 - le->dl_inode_ndx; 60 + de->link.dl_inode_ndx; 74 61 } 75 - if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) { 62 + size = strnlen(name, size); 63 + QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name)); 64 + if (!dir_emit(ctx, name, size, ino, DT_UNKNOWN)) { 76 65 brelse(bh); 77 66 return 0; 78 67 }
+3 -23
include/asm-generic/io.h
··· 1023 1023 port &= IO_SPACE_LIMIT; 1024 1024 return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; 1025 1025 } 1026 - #define __pci_ioport_unmap __pci_ioport_unmap 1027 - static inline void __pci_ioport_unmap(void __iomem *p) 1028 - { 1029 - uintptr_t start = (uintptr_t) PCI_IOBASE; 1030 - uintptr_t addr = (uintptr_t) p; 1031 - 1032 - if (addr >= start && addr < start + IO_SPACE_LIMIT) 1033 - return; 1034 - iounmap(p); 1035 - } 1026 + #define ARCH_HAS_GENERIC_IOPORT_MAP 1036 1027 #endif 1037 1028 1038 1029 #ifndef ioport_unmap ··· 1039 1048 #endif /* CONFIG_HAS_IOPORT_MAP */ 1040 1049 1041 1050 #ifndef CONFIG_GENERIC_IOMAP 1042 - struct pci_dev; 1043 - extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 1044 - 1045 - #ifndef __pci_ioport_unmap 1046 - static inline void __pci_ioport_unmap(void __iomem *p) {} 1047 - #endif 1048 - 1049 1051 #ifndef pci_iounmap 1050 - #define pci_iounmap pci_iounmap 1051 - static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) 1052 - { 1053 - __pci_ioport_unmap(p); 1054 - } 1052 + #define ARCH_WANTS_GENERIC_PCI_IOUNMAP 1055 1053 #endif 1056 - #endif /* CONFIG_GENERIC_IOMAP */ 1054 + #endif 1057 1055 1058 1056 #ifndef xlate_dev_mem_ptr 1059 1057 #define xlate_dev_mem_ptr xlate_dev_mem_ptr
-10
include/asm-generic/iomap.h
··· 110 110 } 111 111 #endif 112 112 113 - #ifdef CONFIG_PCI 114 - /* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */ 115 - struct pci_dev; 116 - extern void pci_iounmap(struct pci_dev *dev, void __iomem *); 117 - #elif defined(CONFIG_GENERIC_IOMAP) 118 - struct pci_dev; 119 - static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) 120 - { } 121 - #endif 122 - 123 113 #include <asm-generic/pci_iomap.h> 124 114 125 115 #endif
+19 -2
include/asm-generic/mshyperv.h
··· 197 197 return hv_vp_index[cpu_number]; 198 198 } 199 199 200 - static inline int cpumask_to_vpset(struct hv_vpset *vpset, 201 - const struct cpumask *cpus) 200 + static inline int __cpumask_to_vpset(struct hv_vpset *vpset, 201 + const struct cpumask *cpus, 202 + bool exclude_self) 202 203 { 203 204 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; 205 + int this_cpu = smp_processor_id(); 204 206 205 207 /* valid_bank_mask can represent up to 64 banks */ 206 208 if (hv_max_vp_index / 64 >= 64) ··· 220 218 * Some banks may end up being empty but this is acceptable. 221 219 */ 222 220 for_each_cpu(cpu, cpus) { 221 + if (exclude_self && cpu == this_cpu) 222 + continue; 223 223 vcpu = hv_cpu_number_to_vp_number(cpu); 224 224 if (vcpu == VP_INVAL) 225 225 return -1; ··· 234 230 } 235 231 vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); 236 232 return nr_bank; 233 + } 234 + 235 + static inline int cpumask_to_vpset(struct hv_vpset *vpset, 236 + const struct cpumask *cpus) 237 + { 238 + return __cpumask_to_vpset(vpset, cpus, false); 239 + } 240 + 241 + static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset, 242 + const struct cpumask *cpus) 243 + { 244 + WARN_ON_ONCE(preemptible()); 245 + return __cpumask_to_vpset(vpset, cpus, true); 237 246 } 238 247 239 248 void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
+3
include/asm-generic/pci_iomap.h
··· 18 18 extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, 19 19 unsigned long offset, 20 20 unsigned long maxlen); 21 + extern void pci_iounmap(struct pci_dev *dev, void __iomem *); 21 22 /* Create a virtual mapping cookie for a port on a given PCI device. 22 23 * Do not call this directly, it exists to make it easier for architectures 23 24 * to override */ ··· 51 50 { 52 51 return NULL; 53 52 } 53 + static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) 54 + { } 54 55 #endif 55 56 56 57 #endif /* __ASM_GENERIC_PCI_IOMAP_H */
-4
include/asm-generic/vmlinux.lds.h
··· 116 116 * GCC 4.5 and later have a 32 bytes section alignment for structures. 117 117 * Except GCC 4.9, that feels the need to align on 64 bytes. 118 118 */ 119 - #if __GNUC__ == 4 && __GNUC_MINOR__ == 9 120 - #define STRUCT_ALIGNMENT 64 121 - #else 122 119 #define STRUCT_ALIGNMENT 32 123 - #endif 124 120 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 125 121 126 122 /*
+27 -80
include/linux/cgroup-defs.h
··· 752 752 * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains 753 753 * per-socket cgroup information except for memcg association. 754 754 * 755 - * On legacy hierarchies, net_prio and net_cls controllers directly set 756 - * attributes on each sock which can then be tested by the network layer. 757 - * On the default hierarchy, each sock is associated with the cgroup it was 758 - * created in and the networking layer can match the cgroup directly. 759 - * 760 - * To avoid carrying all three cgroup related fields separately in sock, 761 - * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer. 762 - * On boot, sock_cgroup_data records the cgroup that the sock was created 763 - * in so that cgroup2 matches can be made; however, once either net_prio or 764 - * net_cls starts being used, the area is overridden to carry prioidx and/or 765 - * classid. The two modes are distinguished by whether the lowest bit is 766 - * set. Clear bit indicates cgroup pointer while set bit prioidx and 767 - * classid. 768 - * 769 - * While userland may start using net_prio or net_cls at any time, once 770 - * either is used, cgroup2 matching no longer works. There is no reason to 771 - * mix the two and this is in line with how legacy and v2 compatibility is 772 - * handled. On mode switch, cgroup references which are already being 773 - * pointed to by socks may be leaked. While this can be remedied by adding 774 - * synchronization around sock_cgroup_data, given that the number of leaked 775 - * cgroups is bound and highly unlikely to be high, this seems to be the 776 - * better trade-off. 755 + * On legacy hierarchies, net_prio and net_cls controllers directly 756 + * set attributes on each sock which can then be tested by the network 757 + * layer. On the default hierarchy, each sock is associated with the 758 + * cgroup it was created in and the networking layer can match the 759 + * cgroup directly. 777 760 */ 778 761 struct sock_cgroup_data { 779 - union { 780 - #ifdef __LITTLE_ENDIAN 781 - struct { 782 - u8 is_data : 1; 783 - u8 no_refcnt : 1; 784 - u8 unused : 6; 785 - u8 padding; 786 - u16 prioidx; 787 - u32 classid; 788 - } __packed; 789 - #else 790 - struct { 791 - u32 classid; 792 - u16 prioidx; 793 - u8 padding; 794 - u8 unused : 6; 795 - u8 no_refcnt : 1; 796 - u8 is_data : 1; 797 - } __packed; 762 + struct cgroup *cgroup; /* v2 */ 763 + #ifdef CONFIG_CGROUP_NET_CLASSID 764 + u32 classid; /* v1 */ 798 765 #endif 799 - u64 val; 800 - }; 766 + #ifdef CONFIG_CGROUP_NET_PRIO 767 + u16 prioidx; /* v1 */ 768 + #endif 801 769 }; 802 770 803 - /* 804 - * There's a theoretical window where the following accessors race with 805 - * updaters and return part of the previous pointer as the prioidx or 806 - * classid. Such races are short-lived and the result isn't critical. 807 - */ 808 771 static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) 809 772 { 810 - /* fallback to 1 which is always the ID of the root cgroup */ 811 - return (skcd->is_data & 1) ? skcd->prioidx : 1; 773 + #ifdef CONFIG_CGROUP_NET_PRIO 774 + return READ_ONCE(skcd->prioidx); 775 + #else 776 + return 1; 777 + #endif 812 778 } 813 779 814 780 static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) 815 781 { 816 - /* fallback to 0 which is the unconfigured default classid */ 817 - return (skcd->is_data & 1) ? skcd->classid : 0; 782 + #ifdef CONFIG_CGROUP_NET_CLASSID 783 + return READ_ONCE(skcd->classid); 784 + #else 785 + return 0; 786 + #endif 818 787 } 819 788 820 - /* 821 - * If invoked concurrently, the updaters may clobber each other. The 822 - * caller is responsible for synchronization. 823 - */ 824 789 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, 825 790 u16 prioidx) 826 791 { 827 - struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; 828 - 829 - if (sock_cgroup_prioidx(&skcd_buf) == prioidx) 830 - return; 831 - 832 - if (!(skcd_buf.is_data & 1)) { 833 - skcd_buf.val = 0; 834 - skcd_buf.is_data = 1; 835 - } 836 - 837 - skcd_buf.prioidx = prioidx; 838 - WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ 792 + #ifdef CONFIG_CGROUP_NET_PRIO 793 + WRITE_ONCE(skcd->prioidx, prioidx); 794 + #endif 839 795 } 840 796 841 797 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, 842 798 u32 classid) 843 799 { 844 - struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; 845 - 846 - if (sock_cgroup_classid(&skcd_buf) == classid) 847 - return; 848 - 849 - if (!(skcd_buf.is_data & 1)) { 850 - skcd_buf.val = 0; 851 - skcd_buf.is_data = 1; 852 - } 853 - 854 - skcd_buf.classid = classid; 855 - WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ 800 + #ifdef CONFIG_CGROUP_NET_CLASSID 801 + WRITE_ONCE(skcd->classid, classid); 802 + #endif 856 803 } 857 804 858 805 #else /* CONFIG_SOCK_CGROUP_DATA */
+1 -21
include/linux/cgroup.h
··· 829 829 */ 830 830 #ifdef CONFIG_SOCK_CGROUP_DATA 831 831 832 - #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) 833 - extern spinlock_t cgroup_sk_update_lock; 834 - #endif 835 - 836 - void cgroup_sk_alloc_disable(void); 837 832 void cgroup_sk_alloc(struct sock_cgroup_data *skcd); 838 833 void cgroup_sk_clone(struct sock_cgroup_data *skcd); 839 834 void cgroup_sk_free(struct sock_cgroup_data *skcd); 840 835 841 836 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) 842 837 { 843 - #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) 844 - unsigned long v; 845 - 846 - /* 847 - * @skcd->val is 64bit but the following is safe on 32bit too as we 848 - * just need the lower ulong to be written and read atomically. 849 - */ 850 - v = READ_ONCE(skcd->val); 851 - 852 - if (v & 3) 853 - return &cgrp_dfl_root.cgrp; 854 - 855 - return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; 856 - #else 857 - return (struct cgroup *)(unsigned long)skcd->val; 858 - #endif 838 + return skcd->cgroup; 859 839 } 860 840 861 841 #else /* CONFIG_CGROUP_DATA */
-13
include/linux/compiler-clang.h
··· 62 62 #define __no_sanitize_coverage 63 63 #endif 64 64 65 - /* 66 - * Not all versions of clang implement the type-generic versions 67 - * of the builtin overflow checkers. Fortunately, clang implements 68 - * __has_builtin allowing us to avoid awkward version 69 - * checks. Unfortunately, we don't know which version of gcc clang 70 - * pretends to be, so the macro may or may not be defined. 71 - */ 72 - #if __has_builtin(__builtin_mul_overflow) && \ 73 - __has_builtin(__builtin_add_overflow) && \ 74 - __has_builtin(__builtin_sub_overflow) 75 - #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 76 - #endif 77 - 78 65 #if __has_feature(shadow_call_stack) 79 66 # define __noscs __attribute__((__no_sanitize__("shadow-call-stack"))) 80 67 #endif
+1 -7
include/linux/compiler-gcc.h
··· 95 95 96 96 #if GCC_VERSION >= 70000 97 97 #define KASAN_ABI_VERSION 5 98 - #elif GCC_VERSION >= 50000 98 + #else 99 99 #define KASAN_ABI_VERSION 4 100 - #elif GCC_VERSION >= 40902 101 - #define KASAN_ABI_VERSION 3 102 100 #endif 103 101 104 102 #if __has_attribute(__no_sanitize_address__) ··· 121 123 #define __no_sanitize_coverage __attribute__((no_sanitize_coverage)) 122 124 #else 123 125 #define __no_sanitize_coverage 124 - #endif 125 - 126 - #if GCC_VERSION >= 50100 127 - #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 128 126 #endif 129 127 130 128 /*
+2
include/linux/compiler.h
··· 188 188 (typeof(ptr)) (__ptr + (off)); }) 189 189 #endif 190 190 191 + #define absolute_pointer(val) RELOC_HIDE((void *)(val), 0) 192 + 191 193 #ifndef OPTIMIZER_HIDE_VAR 192 194 /* Make the optimizer believe the variable can be manipulated arbitrarily. */ 193 195 #define OPTIMIZER_HIDE_VAR(var) \
-24
include/linux/compiler_attributes.h
··· 21 21 */ 22 22 23 23 /* 24 - * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17. 25 - * In the meantime, to support gcc < 5, we implement __has_attribute 26 - * by hand. 27 - */ 28 - #ifndef __has_attribute 29 - # define __has_attribute(x) __GCC4_has_attribute_##x 30 - # define __GCC4_has_attribute___assume_aligned__ 1 31 - # define __GCC4_has_attribute___copy__ 0 32 - # define __GCC4_has_attribute___designated_init__ 0 33 - # define __GCC4_has_attribute___error__ 1 34 - # define __GCC4_has_attribute___externally_visible__ 1 35 - # define __GCC4_has_attribute___no_caller_saved_registers__ 0 36 - # define __GCC4_has_attribute___noclone__ 1 37 - # define __GCC4_has_attribute___no_profile_instrument_function__ 0 38 - # define __GCC4_has_attribute___nonstring__ 0 39 - # define __GCC4_has_attribute___no_sanitize_address__ 1 40 - # define __GCC4_has_attribute___no_sanitize_undefined__ 1 41 - # define __GCC4_has_attribute___no_sanitize_coverage__ 0 42 - # define __GCC4_has_attribute___fallthrough__ 0 43 - # define __GCC4_has_attribute___warning__ 1 44 - #endif 45 - 46 - /* 47 24 * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute 48 25 */ 49 26 #define __alias(symbol) __attribute__((__alias__(#symbol))) ··· 54 77 * compiler should see some alignment anyway, when the return value is 55 78 * massaged by 'flags = ptr & 3; ptr &= ~3;'). 56 79 * 57 - * Optional: only supported since gcc >= 4.9 58 80 * Optional: not supported by icc 59 81 * 60 82 * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
+1
include/linux/memblock.h
··· 118 118 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); 119 119 120 120 void memblock_free_all(void); 121 + void memblock_free_ptr(void *ptr, size_t size); 121 122 void reset_node_managed_pages(pg_data_t *pgdat); 122 123 void reset_all_zones_managed_pages(void); 123 124
-9
include/linux/mmap_lock.h
··· 144 144 up_read(&mm->mmap_lock); 145 145 } 146 146 147 - static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm) 148 - { 149 - if (mmap_read_trylock(mm)) { 150 - rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); 151 - return true; 152 - } 153 - return false; 154 - } 155 - 156 147 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) 157 148 { 158 149 __mmap_lock_trace_released(mm, false);
+3 -135
include/linux/overflow.h
··· 6 6 #include <linux/limits.h> 7 7 8 8 /* 9 - * In the fallback code below, we need to compute the minimum and 10 - * maximum values representable in a given type. These macros may also 11 - * be useful elsewhere, so we provide them outside the 12 - * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. 13 - * 14 - * It would seem more obvious to do something like 9 + * We need to compute the minimum and maximum values representable in a given 10 + * type. These macros may also be useful elsewhere. It would seem more obvious 11 + * to do something like: 15 12 * 16 13 * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) 17 14 * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) ··· 51 54 return unlikely(overflow); 52 55 } 53 56 54 - #ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 55 57 /* 56 58 * For simplicity and code hygiene, the fallback code below insists on 57 59 * a, b and *d having the same type (similar to the min() and max() ··· 85 89 (void) (&__a == __d); \ 86 90 __builtin_mul_overflow(__a, __b, __d); \ 87 91 })) 88 - 89 - #else 90 - 91 - 92 - /* Checking for unsigned overflow is relatively easy without causing UB. */ 93 - #define __unsigned_add_overflow(a, b, d) ({ \ 94 - typeof(a) __a = (a); \ 95 - typeof(b) __b = (b); \ 96 - typeof(d) __d = (d); \ 97 - (void) (&__a == &__b); \ 98 - (void) (&__a == __d); \ 99 - *__d = __a + __b; \ 100 - *__d < __a; \ 101 - }) 102 - #define __unsigned_sub_overflow(a, b, d) ({ \ 103 - typeof(a) __a = (a); \ 104 - typeof(b) __b = (b); \ 105 - typeof(d) __d = (d); \ 106 - (void) (&__a == &__b); \ 107 - (void) (&__a == __d); \ 108 - *__d = __a - __b; \ 109 - __a < __b; \ 110 - }) 111 - /* 112 - * If one of a or b is a compile-time constant, this avoids a division. 113 - */ 114 - #define __unsigned_mul_overflow(a, b, d) ({ \ 115 - typeof(a) __a = (a); \ 116 - typeof(b) __b = (b); \ 117 - typeof(d) __d = (d); \ 118 - (void) (&__a == &__b); \ 119 - (void) (&__a == __d); \ 120 - *__d = __a * __b; \ 121 - __builtin_constant_p(__b) ? \ 122 - __b > 0 && __a > type_max(typeof(__a)) / __b : \ 123 - __a > 0 && __b > type_max(typeof(__b)) / __a; \ 124 - }) 125 - 126 - /* 127 - * For signed types, detecting overflow is much harder, especially if 128 - * we want to avoid UB. But the interface of these macros is such that 129 - * we must provide a result in *d, and in fact we must produce the 130 - * result promised by gcc's builtins, which is simply the possibly 131 - * wrapped-around value. Fortunately, we can just formally do the 132 - * operations in the widest relevant unsigned type (u64) and then 133 - * truncate the result - gcc is smart enough to generate the same code 134 - * with and without the (u64) casts. 135 - */ 136 - 137 - /* 138 - * Adding two signed integers can overflow only if they have the same 139 - * sign, and overflow has happened iff the result has the opposite 140 - * sign. 141 - */ 142 - #define __signed_add_overflow(a, b, d) ({ \ 143 - typeof(a) __a = (a); \ 144 - typeof(b) __b = (b); \ 145 - typeof(d) __d = (d); \ 146 - (void) (&__a == &__b); \ 147 - (void) (&__a == __d); \ 148 - *__d = (u64)__a + (u64)__b; \ 149 - (((~(__a ^ __b)) & (*__d ^ __a)) \ 150 - & type_min(typeof(__a))) != 0; \ 151 - }) 152 - 153 - /* 154 - * Subtraction is similar, except that overflow can now happen only 155 - * when the signs are opposite. In this case, overflow has happened if 156 - * the result has the opposite sign of a. 157 - */ 158 - #define __signed_sub_overflow(a, b, d) ({ \ 159 - typeof(a) __a = (a); \ 160 - typeof(b) __b = (b); \ 161 - typeof(d) __d = (d); \ 162 - (void) (&__a == &__b); \ 163 - (void) (&__a == __d); \ 164 - *__d = (u64)__a - (u64)__b; \ 165 - ((((__a ^ __b)) & (*__d ^ __a)) \ 166 - & type_min(typeof(__a))) != 0; \ 167 - }) 168 - 169 - /* 170 - * Signed multiplication is rather hard. gcc always follows C99, so 171 - * division is truncated towards 0. This means that we can write the 172 - * overflow check like this: 173 - * 174 - * (a > 0 && (b > MAX/a || b < MIN/a)) || 175 - * (a < -1 && (b > MIN/a || b < MAX/a) || 176 - * (a == -1 && b == MIN) 177 - * 178 - * The redundant casts of -1 are to silence an annoying -Wtype-limits 179 - * (included in -Wextra) warning: When the type is u8 or u16, the 180 - * __b_c_e in check_mul_overflow obviously selects 181 - * __unsigned_mul_overflow, but unfortunately gcc still parses this 182 - * code and warns about the limited range of __b. 183 - */ 184 - 185 - #define __signed_mul_overflow(a, b, d) ({ \ 186 - typeof(a) __a = (a); \ 187 - typeof(b) __b = (b); \ 188 - typeof(d) __d = (d); \ 189 - typeof(a) __tmax = type_max(typeof(a)); \ 190 - typeof(a) __tmin = type_min(typeof(a)); \ 191 - (void) (&__a == &__b); \ 192 - (void) (&__a == __d); \ 193 - *__d = (u64)__a * (u64)__b; \ 194 - (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ 195 - (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ 196 - (__b == (typeof(__b))-1 && __a == __tmin); \ 197 - }) 198 - 199 - 200 - #define check_add_overflow(a, b, d) __must_check_overflow( \ 201 - __builtin_choose_expr(is_signed_type(typeof(a)), \ 202 - __signed_add_overflow(a, b, d), \ 203 - __unsigned_add_overflow(a, b, d))) 204 - 205 - #define check_sub_overflow(a, b, d) __must_check_overflow( \ 206 - __builtin_choose_expr(is_signed_type(typeof(a)), \ 207 - __signed_sub_overflow(a, b, d), \ 208 - __unsigned_sub_overflow(a, b, d))) 209 - 210 - #define check_mul_overflow(a, b, d) __must_check_overflow( \ 211 - __builtin_choose_expr(is_signed_type(typeof(a)), \ 212 - __signed_mul_overflow(a, b, d), \ 213 - __unsigned_mul_overflow(a, b, d))) 214 - 215 - #endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ 216 92 217 93 /** check_shl_overflow() - Calculate a left-shifted value and check overflow 218 94 *
+1
include/linux/sched.h
··· 1471 1471 mce_whole_page : 1, 1472 1472 __mce_reserved : 62; 1473 1473 struct callback_head mce_kill_me; 1474 + int mce_count; 1474 1475 #endif 1475 1476 1476 1477 #ifdef CONFIG_KRETPROBES
+1 -1
include/linux/skbuff.h
··· 1940 1940 WRITE_ONCE(newsk->prev, prev); 1941 1941 WRITE_ONCE(next->prev, newsk); 1942 1942 WRITE_ONCE(prev->next, newsk); 1943 - list->qlen++; 1943 + WRITE_ONCE(list->qlen, list->qlen + 1); 1944 1944 } 1945 1945 1946 1946 static inline void __skb_queue_splice(const struct sk_buff_head *list,
+16 -5
include/linux/uio.h
··· 27 27 ITER_DISCARD, 28 28 }; 29 29 30 + struct iov_iter_state { 31 + size_t iov_offset; 32 + size_t count; 33 + unsigned long nr_segs; 34 + }; 35 + 30 36 struct iov_iter { 31 37 u8 iter_type; 32 38 bool data_source; ··· 53 47 }; 54 48 loff_t xarray_start; 55 49 }; 56 - size_t truncated; 57 50 }; 58 51 59 52 static inline enum iter_type iov_iter_type(const struct iov_iter *i) 60 53 { 61 54 return i->iter_type; 55 + } 56 + 57 + static inline void iov_iter_save_state(struct iov_iter *iter, 58 + struct iov_iter_state *state) 59 + { 60 + state->iov_offset = iter->iov_offset; 61 + state->count = iter->count; 62 + state->nr_segs = iter->nr_segs; 62 63 } 63 64 64 65 static inline bool iter_is_iovec(const struct iov_iter *i) ··· 246 233 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, 247 234 size_t maxsize, size_t *start); 248 235 int iov_iter_npages(const struct iov_iter *i, int maxpages); 236 + void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state); 249 237 250 238 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); 251 239 ··· 269 255 * conversion in assignement is by definition greater than all 270 256 * values of size_t, including old i->count. 271 257 */ 272 - if (i->count > count) { 273 - i->truncated += i->count - count; 258 + if (i->count > count) 274 259 i->count = count; 275 - } 276 260 } 277 261 278 262 /* ··· 279 267 */ 280 268 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) 281 269 { 282 - i->truncated -= count - i->count; 283 270 i->count = count; 284 271 } 285 272
+5
include/net/dsa.h
··· 447 447 return dp->type == DSA_PORT_TYPE_USER; 448 448 } 449 449 450 + static inline bool dsa_port_is_unused(struct dsa_port *dp) 451 + { 452 + return dp->type == DSA_PORT_TYPE_UNUSED; 453 + } 454 + 450 455 static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p) 451 456 { 452 457 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
+7 -1
include/uapi/linux/io_uring.h
··· 317 317 IORING_REGISTER_IOWQ_AFF = 17, 318 318 IORING_UNREGISTER_IOWQ_AFF = 18, 319 319 320 - /* set/get max number of workers */ 320 + /* set/get max number of io-wq workers */ 321 321 IORING_REGISTER_IOWQ_MAX_WORKERS = 19, 322 322 323 323 /* this goes last */ 324 324 IORING_REGISTER_LAST 325 + }; 326 + 327 + /* io-wq worker categories */ 328 + enum { 329 + IO_WQ_BOUND, 330 + IO_WQ_UNBOUND, 325 331 }; 326 332 327 333 /* deprecated, see struct io_uring_rsrc_update */
+1 -1
init/main.c
··· 924 924 end += sprintf(end, " %s", *p); 925 925 926 926 pr_notice("Unknown command line parameters:%s\n", unknown_options); 927 - memblock_free(__pa(unknown_options), len); 927 + memblock_free_ptr(unknown_options, len); 928 928 } 929 929 930 930 asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
+1 -1
ipc/sem.c
··· 2238 2238 return -EINVAL; 2239 2239 2240 2240 if (nsops > SEMOPM_FAST) { 2241 - sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL_ACCOUNT); 2241 + sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); 2242 2242 if (sops == NULL) 2243 2243 return -ENOMEM; 2244 2244 }
+1 -1
kernel/bpf/disasm.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 1 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 3 * Copyright (c) 2016 Facebook 4 4 */
+1 -1
kernel/bpf/disasm.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 1 + /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 3 * Copyright (c) 2016 Facebook 4 4 */
+8 -2
kernel/bpf/stackmap.c
··· 179 179 * with build_id. 180 180 */ 181 181 if (!user || !current || !current->mm || irq_work_busy || 182 - !mmap_read_trylock_non_owner(current->mm)) { 182 + !mmap_read_trylock(current->mm)) { 183 183 /* cannot access current->mm, fall back to ips */ 184 184 for (i = 0; i < trace_nr; i++) { 185 185 id_offs[i].status = BPF_STACK_BUILD_ID_IP; ··· 204 204 } 205 205 206 206 if (!work) { 207 - mmap_read_unlock_non_owner(current->mm); 207 + mmap_read_unlock(current->mm); 208 208 } else { 209 209 work->mm = current->mm; 210 + 211 + /* The lock will be released once we're out of interrupt 212 + * context. Tell lockdep that we've released it now so 213 + * it doesn't complain that we forgot to release it. 214 + */ 215 + rwsem_release(&current->mm->mmap_lock.dep_map, _RET_IP_); 210 216 irq_work_queue(&work->irq_work); 211 217 } 212 218 }
+2
kernel/bpf/verifier.c
··· 9912 9912 nr_linfo = attr->line_info_cnt; 9913 9913 if (!nr_linfo) 9914 9914 return 0; 9915 + if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info)) 9916 + return -EINVAL; 9915 9917 9916 9918 rec_size = attr->line_info_rec_size; 9917 9919 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
+10 -40
kernel/cgroup/cgroup.c
··· 6572 6572 */ 6573 6573 #ifdef CONFIG_SOCK_CGROUP_DATA 6574 6574 6575 - #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) 6576 - 6577 - DEFINE_SPINLOCK(cgroup_sk_update_lock); 6578 - static bool cgroup_sk_alloc_disabled __read_mostly; 6579 - 6580 - void cgroup_sk_alloc_disable(void) 6581 - { 6582 - if (cgroup_sk_alloc_disabled) 6583 - return; 6584 - pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n"); 6585 - cgroup_sk_alloc_disabled = true; 6586 - } 6587 - 6588 - #else 6589 - 6590 - #define cgroup_sk_alloc_disabled false 6591 - 6592 - #endif 6593 - 6594 6575 void cgroup_sk_alloc(struct sock_cgroup_data *skcd) 6595 6576 { 6596 - if (cgroup_sk_alloc_disabled) { 6597 - skcd->no_refcnt = 1; 6598 - return; 6599 - } 6600 - 6601 6577 /* Don't associate the sock with unrelated interrupted task's cgroup. */ 6602 6578 if (in_interrupt()) 6603 6579 return; 6604 6580 6605 6581 rcu_read_lock(); 6606 - 6607 6582 while (true) { 6608 6583 struct css_set *cset; 6609 6584 6610 6585 cset = task_css_set(current); 6611 6586 if (likely(cgroup_tryget(cset->dfl_cgrp))) { 6612 - skcd->val = (unsigned long)cset->dfl_cgrp; 6587 + skcd->cgroup = cset->dfl_cgrp; 6613 6588 cgroup_bpf_get(cset->dfl_cgrp); 6614 6589 break; 6615 6590 } 6616 6591 cpu_relax(); 6617 6592 } 6618 - 6619 6593 rcu_read_unlock(); 6620 6594 } 6621 6595 6622 6596 void cgroup_sk_clone(struct sock_cgroup_data *skcd) 6623 6597 { 6624 - if (skcd->val) { 6625 - if (skcd->no_refcnt) 6626 - return; 6627 - /* 6628 - * We might be cloning a socket which is left in an empty 6629 - * cgroup and the cgroup might have already been rmdir'd. 6630 - * Don't use cgroup_get_live(). 6631 - */ 6632 - cgroup_get(sock_cgroup_ptr(skcd)); 6633 - cgroup_bpf_get(sock_cgroup_ptr(skcd)); 6634 - } 6598 + struct cgroup *cgrp = sock_cgroup_ptr(skcd); 6599 + 6600 + /* 6601 + * We might be cloning a socket which is left in an empty 6602 + * cgroup and the cgroup might have already been rmdir'd. 6603 + * Don't use cgroup_get_live(). 6604 + */ 6605 + cgroup_get(cgrp); 6606 + cgroup_bpf_get(cgrp); 6635 6607 } 6636 6608 6637 6609 void cgroup_sk_free(struct sock_cgroup_data *skcd) 6638 6610 { 6639 6611 struct cgroup *cgrp = sock_cgroup_ptr(skcd); 6640 6612 6641 - if (skcd->no_refcnt) 6642 - return; 6643 6613 cgroup_bpf_put(cgrp); 6644 6614 cgroup_put(cgrp); 6645 6615 }
+2 -1
kernel/dma/debug.c
··· 567 567 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); 568 568 global_disable = true; 569 569 } else if (rc == -EEXIST) { 570 - pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n"); 570 + err_printk(entry->dev, entry, 571 + "cacheline tracking EEXIST, overlapping mappings aren't supported\n"); 571 572 } 572 573 } 573 574
+2 -1
kernel/dma/mapping.c
··· 206 206 /** 207 207 * dma_map_sg_attrs - Map the given buffer for DMA 208 208 * @dev: The device for which to perform the DMA operation 209 - * @sg: The sg_table object describing the buffer 209 + * @sg: The sg_table object describing the buffer 210 + * @nents: Number of entries to map 210 211 * @dir: DMA direction 211 212 * @attrs: Optional DMA attributes for the map operation 212 213 *
+1 -1
kernel/events/core.c
··· 10193 10193 return; 10194 10194 10195 10195 if (ifh->nr_file_filters) { 10196 - mm = get_task_mm(event->ctx->task); 10196 + mm = get_task_mm(task); 10197 10197 if (!mm) 10198 10198 goto restart; 10199 10199
+46 -21
kernel/locking/rwbase_rt.c
··· 41 41 * The risk of writer starvation is there, but the pathological use cases 42 42 * which trigger it are not necessarily the typical RT workloads. 43 43 * 44 + * Fast-path orderings: 45 + * The lock/unlock of readers can run in fast paths: lock and unlock are only 46 + * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE 47 + * semantics of rwbase_rt. Atomic ops should thus provide _acquire() 48 + * and _release() (or stronger). 49 + * 44 50 * Common code shared between RT rw_semaphore and rwlock 45 51 */ 46 52 ··· 59 53 * set. 60 54 */ 61 55 for (r = atomic_read(&rwb->readers); r < 0;) { 56 + /* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */ 62 57 if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1))) 63 58 return 1; 64 59 } ··· 169 162 /* 170 163 * rwb->readers can only hit 0 when a writer is waiting for the 171 164 * active readers to leave the critical section. 165 + * 166 + * dec_and_test() is fully ordered, provides RELEASE. 172 167 */ 173 168 if (unlikely(atomic_dec_and_test(&rwb->readers))) 174 169 __rwbase_read_unlock(rwb, state); ··· 181 172 { 182 173 struct rt_mutex_base *rtm = &rwb->rtmutex; 183 174 184 - atomic_add(READER_BIAS - bias, &rwb->readers); 175 + /* 176 + * _release() is needed in case that reader is in fast path, pairing 177 + * with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE 178 + */ 179 + (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers); 185 180 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); 186 181 rwbase_rtmutex_unlock(rtm); 187 182 } ··· 209 196 __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags); 210 197 } 211 198 199 + static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb) 200 + { 201 + /* Can do without CAS because we're serialized by wait_lock. */ 202 + lockdep_assert_held(&rwb->rtmutex.wait_lock); 203 + 204 + /* 205 + * _acquire is needed in case the reader is in the fast path, pairing 206 + * with rwbase_read_unlock(), provides ACQUIRE. 207 + */ 208 + if (!atomic_read_acquire(&rwb->readers)) { 209 + atomic_set(&rwb->readers, WRITER_BIAS); 210 + return 1; 211 + } 212 + 213 + return 0; 214 + } 215 + 212 216 static int __sched rwbase_write_lock(struct rwbase_rt *rwb, 213 217 unsigned int state) 214 218 { ··· 240 210 atomic_sub(READER_BIAS, &rwb->readers); 241 211 242 212 raw_spin_lock_irqsave(&rtm->wait_lock, flags); 243 - /* 244 - * set_current_state() for rw_semaphore 245 - * current_save_and_set_rtlock_wait_state() for rwlock 246 - */ 247 - rwbase_set_and_save_current_state(state); 213 + if (__rwbase_write_trylock(rwb)) 214 + goto out_unlock; 248 215 249 - /* Block until all readers have left the critical section. */ 250 - for (; atomic_read(&rwb->readers);) { 216 + rwbase_set_and_save_current_state(state); 217 + for (;;) { 251 218 /* Optimized out for rwlocks */ 252 219 if (rwbase_signal_pending_state(state, current)) { 253 - __set_current_state(TASK_RUNNING); 220 + rwbase_restore_current_state(); 254 221 __rwbase_write_unlock(rwb, 0, flags); 255 222 return -EINTR; 256 223 } 224 + 225 + if (__rwbase_write_trylock(rwb)) 226 + break; 227 + 257 228 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); 258 - 259 - /* 260 - * Schedule and wait for the readers to leave the critical 261 - * section. The last reader leaving it wakes the waiter. 262 - */ 263 - if (atomic_read(&rwb->readers) != 0) 264 - rwbase_schedule(); 265 - set_current_state(state); 229 + rwbase_schedule(); 266 230 raw_spin_lock_irqsave(&rtm->wait_lock, flags); 267 - } 268 231 269 - atomic_set(&rwb->readers, WRITER_BIAS); 232 + set_current_state(state); 233 + } 270 234 rwbase_restore_current_state(); 235 + 236 + out_unlock: 271 237 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); 272 238 return 0; 273 239 } ··· 279 253 atomic_sub(READER_BIAS, &rwb->readers); 280 254 281 255 raw_spin_lock_irqsave(&rtm->wait_lock, flags); 282 - if (!atomic_read(&rwb->readers)) { 283 - atomic_set(&rwb->readers, WRITER_BIAS); 256 + if (__rwbase_write_trylock(rwb)) { 284 257 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); 285 258 return 1; 286 259 }
+2 -2
kernel/printk/printk.c
··· 1166 1166 return; 1167 1167 1168 1168 err_free_descs: 1169 - memblock_free(__pa(new_descs), new_descs_size); 1169 + memblock_free_ptr(new_descs, new_descs_size); 1170 1170 err_free_log_buf: 1171 - memblock_free(__pa(new_log_buf), new_log_buf_len); 1171 + memblock_free_ptr(new_log_buf, new_log_buf_len); 1172 1172 } 1173 1173 1174 1174 static bool __read_mostly ignore_loglevel;
+1 -1
lib/Kconfig.debug
··· 295 295 296 296 config DEBUG_INFO_DWARF5 297 297 bool "Generate DWARF Version 5 debuginfo" 298 - depends on GCC_VERSION >= 50000 || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502))) 298 + depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502))) 299 299 depends on !DEBUG_INFO_BTF 300 300 help 301 301 Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
+1 -1
lib/bootconfig.c
··· 792 792 xbc_data = NULL; 793 793 xbc_data_size = 0; 794 794 xbc_node_num = 0; 795 - memblock_free(__pa(xbc_nodes), sizeof(struct xbc_node) * XBC_NODE_MAX); 795 + memblock_free_ptr(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX); 796 796 xbc_nodes = NULL; 797 797 brace_index = 0; 798 798 }
+36
lib/iov_iter.c
··· 1972 1972 return 0; 1973 1973 } 1974 1974 EXPORT_SYMBOL(import_single_range); 1975 + 1976 + /** 1977 + * iov_iter_restore() - Restore a &struct iov_iter to the same state as when 1978 + * iov_iter_save_state() was called. 1979 + * 1980 + * @i: &struct iov_iter to restore 1981 + * @state: state to restore from 1982 + * 1983 + * Used after iov_iter_save_state() to bring restore @i, if operations may 1984 + * have advanced it. 1985 + * 1986 + * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC 1987 + */ 1988 + void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) 1989 + { 1990 + if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && 1991 + !iov_iter_is_kvec(i)) 1992 + return; 1993 + i->iov_offset = state->iov_offset; 1994 + i->count = state->count; 1995 + /* 1996 + * For the *vec iters, nr_segs + iov is constant - if we increment 1997 + * the vec, then we also decrement the nr_segs count. Hence we don't 1998 + * need to track both of these, just one is enough and we can deduct 1999 + * the other from that. ITER_KVEC and ITER_IOVEC are the same struct 2000 + * size, so we can just increment the iov pointer as they are unionzed. 2001 + * ITER_BVEC _may_ be the same size on some archs, but on others it is 2002 + * not. Be safe and handle it separately. 2003 + */ 2004 + BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 2005 + if (iov_iter_is_bvec(i)) 2006 + i->bvec -= state->nr_segs - i->nr_segs; 2007 + else 2008 + i->iov -= state->nr_segs - i->nr_segs; 2009 + i->nr_segs = state->nr_segs; 2010 + }
+43
lib/pci_iomap.c
··· 134 134 return pci_iomap_wc_range(dev, bar, 0, maxlen); 135 135 } 136 136 EXPORT_SYMBOL_GPL(pci_iomap_wc); 137 + 138 + /* 139 + * pci_iounmap() somewhat illogically comes from lib/iomap.c for the 140 + * CONFIG_GENERIC_IOMAP case, because that's the code that knows about 141 + * the different IOMAP ranges. 142 + * 143 + * But if the architecture does not use the generic iomap code, and if 144 + * it has _not_ defined it's own private pci_iounmap function, we define 145 + * it here. 146 + * 147 + * NOTE! This default implementation assumes that if the architecture 148 + * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will 149 + * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [, 150 + * and does not need unmapping with 'ioport_unmap()'. 151 + * 152 + * If you have different rules for your architecture, you need to 153 + * implement your own pci_iounmap() that knows the rules for where 154 + * and how IO vs MEM get mapped. 155 + * 156 + * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes 157 + * from legacy <asm-generic/io.h> header file behavior. In particular, 158 + * it would seem to make sense to do the iounmap(p) for the non-IO-space 159 + * case here regardless, but that's not what the old header file code 160 + * did. Probably incorrectly, but this is meant to be bug-for-bug 161 + * compatible. 162 + */ 163 + #if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP) 164 + 165 + void pci_iounmap(struct pci_dev *dev, void __iomem *p) 166 + { 167 + #ifdef ARCH_HAS_GENERIC_IOPORT_MAP 168 + uintptr_t start = (uintptr_t) PCI_IOBASE; 169 + uintptr_t addr = (uintptr_t) p; 170 + 171 + if (addr >= start && addr < start + IO_SPACE_LIMIT) 172 + return; 173 + iounmap(p); 174 + #endif 175 + } 176 + EXPORT_SYMBOL(pci_iounmap); 177 + 178 + #endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */ 179 + 137 180 #endif /* CONFIG_PCI */
-2
mm/ksm.c
··· 651 651 * from &migrate_nodes. This will verify that future list.h changes 652 652 * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. 653 653 */ 654 - #if defined(GCC_VERSION) && GCC_VERSION >= 40903 655 654 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); 656 655 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); 657 - #endif 658 656 659 657 if (stable_node->head == &migrate_nodes) 660 658 list_del(&stable_node->list);
+15 -1
mm/memblock.c
··· 472 472 kfree(old_array); 473 473 else if (old_array != memblock_memory_init_regions && 474 474 old_array != memblock_reserved_init_regions) 475 - memblock_free(__pa(old_array), old_alloc_size); 475 + memblock_free_ptr(old_array, old_alloc_size); 476 476 477 477 /* 478 478 * Reserve the new array if that comes from the memblock. Otherwise, we ··· 793 793 &base, &end, (void *)_RET_IP_); 794 794 795 795 return memblock_remove_range(&memblock.memory, base, size); 796 + } 797 + 798 + /** 799 + * memblock_free_ptr - free boot memory allocation 800 + * @ptr: starting address of the boot memory allocation 801 + * @size: size of the boot memory block in bytes 802 + * 803 + * Free boot memory block previously allocated by memblock_alloc_xx() API. 804 + * The freeing memory will not be released to the buddy allocator. 805 + */ 806 + void __init_memblock memblock_free_ptr(void *ptr, size_t size) 807 + { 808 + if (ptr) 809 + memblock_free(__pa(ptr), size); 796 810 } 797 811 798 812 /**
+3 -16
net/caif/chnl_net.c
··· 53 53 enum caif_states state; 54 54 }; 55 55 56 - static void robust_list_del(struct list_head *delete_node) 57 - { 58 - struct list_head *list_node; 59 - struct list_head *n; 60 - ASSERT_RTNL(); 61 - list_for_each_safe(list_node, n, &chnl_net_list) { 62 - if (list_node == delete_node) { 63 - list_del(list_node); 64 - return; 65 - } 66 - } 67 - WARN_ON(1); 68 - } 69 - 70 56 static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) 71 57 { 72 58 struct sk_buff *skb; ··· 350 364 ASSERT_RTNL(); 351 365 priv = netdev_priv(dev); 352 366 strncpy(priv->name, dev->name, sizeof(priv->name)); 367 + INIT_LIST_HEAD(&priv->list_field); 353 368 return 0; 354 369 } 355 370 ··· 359 372 struct chnl_net *priv; 360 373 ASSERT_RTNL(); 361 374 priv = netdev_priv(dev); 362 - robust_list_del(&priv->list_field); 375 + list_del_init(&priv->list_field); 363 376 } 364 377 365 378 static const struct net_device_ops netdev_ops = { ··· 524 537 rtnl_lock(); 525 538 list_for_each_safe(list_node, _tmp, &chnl_net_list) { 526 539 dev = list_entry(list_node, struct chnl_net, list_field); 527 - list_del(list_node); 540 + list_del_init(list_node); 528 541 delete_device(dev); 529 542 } 530 543 rtnl_unlock();
+1 -6
net/core/netclassid_cgroup.c
··· 71 71 struct update_classid_context *ctx = (void *)v; 72 72 struct socket *sock = sock_from_file(file); 73 73 74 - if (sock) { 75 - spin_lock(&cgroup_sk_update_lock); 74 + if (sock) 76 75 sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid); 77 - spin_unlock(&cgroup_sk_update_lock); 78 - } 79 76 if (--ctx->batch == 0) { 80 77 ctx->batch = UPDATE_CLASSID_BATCH; 81 78 return n + 1; ··· 117 120 struct cgroup_cls_state *cs = css_cls_state(css); 118 121 struct css_task_iter it; 119 122 struct task_struct *p; 120 - 121 - cgroup_sk_alloc_disable(); 122 123 123 124 cs->classid = (u32)value; 124 125
+2 -8
net/core/netprio_cgroup.c
··· 207 207 if (!dev) 208 208 return -ENODEV; 209 209 210 - cgroup_sk_alloc_disable(); 211 - 212 210 rtnl_lock(); 213 211 214 212 ret = netprio_set_prio(of_css(of), dev, prio); ··· 219 221 static int update_netprio(const void *v, struct file *file, unsigned n) 220 222 { 221 223 struct socket *sock = sock_from_file(file); 222 - if (sock) { 223 - spin_lock(&cgroup_sk_update_lock); 224 + 225 + if (sock) 224 226 sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data, 225 227 (unsigned long)v); 226 - spin_unlock(&cgroup_sk_update_lock); 227 - } 228 228 return 0; 229 229 } 230 230 ··· 230 234 { 231 235 struct task_struct *p; 232 236 struct cgroup_subsys_state *css; 233 - 234 - cgroup_sk_alloc_disable(); 235 237 236 238 cgroup_taskset_for_each(p, css, tset) { 237 239 void *v = (void *)(unsigned long)css->id;
+2
net/dccp/minisocks.c
··· 94 94 newdp->dccps_role = DCCP_ROLE_SERVER; 95 95 newdp->dccps_hc_rx_ackvec = NULL; 96 96 newdp->dccps_service_list = NULL; 97 + newdp->dccps_hc_rx_ccid = NULL; 98 + newdp->dccps_hc_tx_ccid = NULL; 97 99 newdp->dccps_service = dreq->dreq_service; 98 100 newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo; 99 101 newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
+5
net/dsa/dsa.c
··· 345 345 return queue_work(dsa_owq, work); 346 346 } 347 347 348 + void dsa_flush_workqueue(void) 349 + { 350 + flush_workqueue(dsa_owq); 351 + } 352 + 348 353 int dsa_devlink_param_get(struct devlink *dl, u32 id, 349 354 struct devlink_param_gset_ctx *ctx) 350 355 {
+31 -15
net/dsa/dsa2.c
··· 897 897 ds->setup = false; 898 898 } 899 899 900 + /* First tear down the non-shared, then the shared ports. This ensures that 901 + * all work items scheduled by our switchdev handlers for user ports have 902 + * completed before we destroy the refcounting kept on the shared ports. 903 + */ 904 + static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst) 905 + { 906 + struct dsa_port *dp; 907 + 908 + list_for_each_entry(dp, &dst->ports, list) 909 + if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) 910 + dsa_port_teardown(dp); 911 + 912 + dsa_flush_workqueue(); 913 + 914 + list_for_each_entry(dp, &dst->ports, list) 915 + if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) 916 + dsa_port_teardown(dp); 917 + } 918 + 919 + static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst) 920 + { 921 + struct dsa_port *dp; 922 + 923 + list_for_each_entry(dp, &dst->ports, list) 924 + dsa_switch_teardown(dp->ds); 925 + } 926 + 900 927 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) 901 928 { 902 929 struct dsa_port *dp; ··· 950 923 return 0; 951 924 952 925 teardown: 953 - list_for_each_entry(dp, &dst->ports, list) 954 - dsa_port_teardown(dp); 926 + dsa_tree_teardown_ports(dst); 955 927 956 - list_for_each_entry(dp, &dst->ports, list) 957 - dsa_switch_teardown(dp->ds); 928 + dsa_tree_teardown_switches(dst); 958 929 959 930 return err; 960 - } 961 - 962 - static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst) 963 - { 964 - struct dsa_port *dp; 965 - 966 - list_for_each_entry(dp, &dst->ports, list) 967 - dsa_port_teardown(dp); 968 - 969 - list_for_each_entry(dp, &dst->ports, list) 970 - dsa_switch_teardown(dp->ds); 971 931 } 972 932 973 933 static int dsa_tree_setup_master(struct dsa_switch_tree *dst) ··· 1065 1051 dsa_tree_teardown_lags(dst); 1066 1052 1067 1053 dsa_tree_teardown_master(dst); 1054 + 1055 + dsa_tree_teardown_ports(dst); 1068 1056 1069 1057 dsa_tree_teardown_switches(dst); 1070 1058
+1
net/dsa/dsa_priv.h
··· 170 170 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf); 171 171 172 172 bool dsa_schedule_work(struct work_struct *work); 173 + void dsa_flush_workqueue(void); 173 174 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops); 174 175 175 176 static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
+5 -7
net/dsa/slave.c
··· 1854 1854 * use the switch internal MDIO bus instead 1855 1855 */ 1856 1856 ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags); 1857 - if (ret) { 1858 - netdev_err(slave_dev, 1859 - "failed to connect to port %d: %d\n", 1860 - dp->index, ret); 1861 - phylink_destroy(dp->pl); 1862 - return ret; 1863 - } 1857 + } 1858 + if (ret) { 1859 + netdev_err(slave_dev, "failed to connect to PHY: %pe\n", 1860 + ERR_PTR(ret)); 1861 + phylink_destroy(dp->pl); 1864 1862 } 1865 1863 1866 1864 return ret;
+1 -1
net/ipv4/tcp_input.c
··· 1346 1346 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1347 1347 if (tp->undo_marker && tp->undo_retrans > 0 && 1348 1348 after(end_seq, tp->undo_marker)) 1349 - tp->undo_retrans--; 1349 + tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount); 1350 1350 if ((sacked & TCPCB_SACKED_ACKED) && 1351 1351 before(start_seq, state->reord)) 1352 1352 state->reord = start_seq;
+1 -1
net/ipv4/udp_tunnel_nic.c
··· 935 935 { 936 936 int err; 937 937 938 - udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0); 938 + udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0); 939 939 if (!udp_tunnel_nic_workqueue) 940 940 return -ENOMEM; 941 941
+1 -2
net/ipv6/ip6_fib.c
··· 1378 1378 int err = -ENOMEM; 1379 1379 int allow_create = 1; 1380 1380 int replace_required = 0; 1381 - int sernum = fib6_new_sernum(info->nl_net); 1382 1381 1383 1382 if (info->nlh) { 1384 1383 if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) ··· 1477 1478 if (!err) { 1478 1479 if (rt->nh) 1479 1480 list_add(&rt->nh_list, &rt->nh->f6i_list); 1480 - __fib6_update_sernum_upto_root(rt, sernum); 1481 + __fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net)); 1481 1482 fib6_start_gc(info->nl_net, rt); 1482 1483 } 1483 1484
+3 -1
net/l2tp/l2tp_core.c
··· 869 869 } 870 870 871 871 if (tunnel->version == L2TP_HDR_VER_3 && 872 - l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) 872 + l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) { 873 + l2tp_session_dec_refcount(session); 873 874 goto invalid; 875 + } 874 876 875 877 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); 876 878 l2tp_session_dec_refcount(session);
+2
net/mctp/route.c
··· 1083 1083 { 1084 1084 struct mctp_route *rt; 1085 1085 1086 + rcu_read_lock(); 1086 1087 list_for_each_entry_rcu(rt, &net->mctp.routes, list) 1087 1088 mctp_route_release(rt); 1089 + rcu_read_unlock(); 1088 1090 } 1089 1091 1090 1092 static struct pernet_operations mctp_net_ops = {
+2
net/packet/af_packet.c
··· 46 46 * Copyright (C) 2011, <lokec@ccs.neu.edu> 47 47 */ 48 48 49 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 50 + 49 51 #include <linux/ethtool.h> 50 52 #include <linux/types.h> 51 53 #include <linux/mm.h>
+1 -1
net/tipc/socket.c
··· 2423 2423 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 2424 2424 u32 dport, struct sk_buff_head *xmitq) 2425 2425 { 2426 - unsigned long time_limit = jiffies + 2; 2426 + unsigned long time_limit = jiffies + usecs_to_jiffies(20000); 2427 2427 struct sk_buff *skb; 2428 2428 unsigned int lim; 2429 2429 atomic_t *dcnt;
+1 -1
net/unix/af_unix.c
··· 3073 3073 3074 3074 other = unix_peer(sk); 3075 3075 if (other && unix_peer(other) != sk && 3076 - unix_recvq_full(other) && 3076 + unix_recvq_full_lockless(other) && 3077 3077 unix_dgram_peer_wake_me(sk, other)) 3078 3078 writable = 0; 3079 3079
+5
scripts/Makefile.clang
··· 29 29 else 30 30 CLANG_FLAGS += -fintegrated-as 31 31 endif 32 + # By default, clang only warns when it encounters an unknown warning flag or 33 + # certain optimization flags it knows it has not implemented. 34 + # Make it behave more like gcc by erroring when these flags are encountered 35 + # so they can be implemented or wrapped in cc-option. 32 36 CLANG_FLAGS += -Werror=unknown-warning-option 37 + CLANG_FLAGS += -Werror=ignored-optimization-argument 33 38 KBUILD_CFLAGS += $(CLANG_FLAGS) 34 39 KBUILD_AFLAGS += $(CLANG_FLAGS) 35 40 export CLANG_FLAGS
+1 -1
scripts/Makefile.modpost
··· 13 13 # Stage 2 is handled by this file and does the following 14 14 # 1) Find all modules listed in modules.order 15 15 # 2) modpost is then used to 16 - # 3) create one <module>.mod.c file pr. module 16 + # 3) create one <module>.mod.c file per module 17 17 # 4) create one Module.symvers file with CRC for all exported symbols 18 18 19 19 # Step 3 is used to place certain information in the module's ELF
+3 -8
scripts/checkkconfigsymbols.py
··· 34 34 REGEX_KCONFIG_DEF = re.compile(DEF) 35 35 REGEX_KCONFIG_EXPR = re.compile(EXPR) 36 36 REGEX_KCONFIG_STMT = re.compile(STMT) 37 - REGEX_KCONFIG_HELP = re.compile(r"^\s+help\s*$") 38 37 REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$") 39 38 REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+") 40 39 REGEX_QUOTES = re.compile("(\"(.*?)\")") ··· 101 102 "continue.") 102 103 103 104 if args.commit: 105 + if args.commit.startswith('HEAD'): 106 + sys.exit("The --commit option can't use the HEAD ref") 107 + 104 108 args.find = False 105 109 106 110 if args.ignore: ··· 434 432 lines = [] 435 433 defined = [] 436 434 references = [] 437 - skip = False 438 435 439 436 if not os.path.exists(kfile): 440 437 return defined, references ··· 449 448 if REGEX_KCONFIG_DEF.match(line): 450 449 symbol_def = REGEX_KCONFIG_DEF.findall(line) 451 450 defined.append(symbol_def[0]) 452 - skip = False 453 - elif REGEX_KCONFIG_HELP.match(line): 454 - skip = True 455 - elif skip: 456 - # ignore content of help messages 457 - pass 458 451 elif REGEX_KCONFIG_STMT.match(line): 459 452 line = REGEX_QUOTES.sub("", line) 460 453 symbols = get_symbols_in_line(line)
+1
scripts/clang-tools/gen_compile_commands.py
··· 13 13 import os 14 14 import re 15 15 import subprocess 16 + import sys 16 17 17 18 _DEFAULT_OUTPUT = 'compile_commands.json' 18 19 _DEFAULT_LOG_LEVEL = 'WARNING'
+1 -7
scripts/min-tool-version.sh
··· 17 17 echo 2.23.0 18 18 ;; 19 19 gcc) 20 - # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293 21 - # https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk 22 - if [ "$SRCARCH" = arm64 ]; then 23 - echo 5.1.0 24 - else 25 - echo 4.9.0 26 - fi 20 + echo 5.1.0 27 21 ;; 28 22 icc) 29 23 # temporary
+1 -2
tools/bootconfig/include/linux/memblock.h
··· 4 4 5 5 #include <stdlib.h> 6 6 7 - #define __pa(addr) (addr) 8 7 #define SMP_CACHE_BYTES 0 9 8 #define memblock_alloc(size, align) malloc(size) 10 - #define memblock_free(paddr, size) free(paddr) 9 + #define memblock_free_ptr(paddr, size) free(paddr) 11 10 12 11 #endif
+2 -6
tools/include/linux/compiler-gcc.h
··· 16 16 # define __fallthrough __attribute__ ((fallthrough)) 17 17 #endif 18 18 19 - #if GCC_VERSION >= 40300 19 + #if __has_attribute(__error__) 20 20 # define __compiletime_error(message) __attribute__((error(message))) 21 - #endif /* GCC_VERSION >= 40300 */ 21 + #endif 22 22 23 23 /* &a[0] degrades to a pointer: a different type from an array */ 24 24 #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) ··· 38 38 #endif 39 39 #define __printf(a, b) __attribute__((format(printf, a, b))) 40 40 #define __scanf(a, b) __attribute__((format(scanf, a, b))) 41 - 42 - #if GCC_VERSION >= 50100 43 - #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 44 - #endif
+3 -137
tools/include/linux/overflow.h
··· 5 5 #include <linux/compiler.h> 6 6 7 7 /* 8 - * In the fallback code below, we need to compute the minimum and 9 - * maximum values representable in a given type. These macros may also 10 - * be useful elsewhere, so we provide them outside the 11 - * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. 12 - * 13 - * It would seem more obvious to do something like 8 + * We need to compute the minimum and maximum values representable in a given 9 + * type. These macros may also be useful elsewhere. It would seem more obvious 10 + * to do something like: 14 11 * 15 12 * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) 16 13 * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) ··· 33 36 #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) 34 37 #define type_min(T) ((T)((T)-type_max(T)-(T)1)) 35 38 36 - 37 - #ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 38 39 /* 39 40 * For simplicity and code hygiene, the fallback code below insists on 40 41 * a, b and *d having the same type (similar to the min() and max() ··· 67 72 (void) (&__a == __d); \ 68 73 __builtin_mul_overflow(__a, __b, __d); \ 69 74 }) 70 - 71 - #else 72 - 73 - 74 - /* Checking for unsigned overflow is relatively easy without causing UB. */ 75 - #define __unsigned_add_overflow(a, b, d) ({ \ 76 - typeof(a) __a = (a); \ 77 - typeof(b) __b = (b); \ 78 - typeof(d) __d = (d); \ 79 - (void) (&__a == &__b); \ 80 - (void) (&__a == __d); \ 81 - *__d = __a + __b; \ 82 - *__d < __a; \ 83 - }) 84 - #define __unsigned_sub_overflow(a, b, d) ({ \ 85 - typeof(a) __a = (a); \ 86 - typeof(b) __b = (b); \ 87 - typeof(d) __d = (d); \ 88 - (void) (&__a == &__b); \ 89 - (void) (&__a == __d); \ 90 - *__d = __a - __b; \ 91 - __a < __b; \ 92 - }) 93 - /* 94 - * If one of a or b is a compile-time constant, this avoids a division. 95 - */ 96 - #define __unsigned_mul_overflow(a, b, d) ({ \ 97 - typeof(a) __a = (a); \ 98 - typeof(b) __b = (b); \ 99 - typeof(d) __d = (d); \ 100 - (void) (&__a == &__b); \ 101 - (void) (&__a == __d); \ 102 - *__d = __a * __b; \ 103 - __builtin_constant_p(__b) ? \ 104 - __b > 0 && __a > type_max(typeof(__a)) / __b : \ 105 - __a > 0 && __b > type_max(typeof(__b)) / __a; \ 106 - }) 107 - 108 - /* 109 - * For signed types, detecting overflow is much harder, especially if 110 - * we want to avoid UB. But the interface of these macros is such that 111 - * we must provide a result in *d, and in fact we must produce the 112 - * result promised by gcc's builtins, which is simply the possibly 113 - * wrapped-around value. Fortunately, we can just formally do the 114 - * operations in the widest relevant unsigned type (u64) and then 115 - * truncate the result - gcc is smart enough to generate the same code 116 - * with and without the (u64) casts. 117 - */ 118 - 119 - /* 120 - * Adding two signed integers can overflow only if they have the same 121 - * sign, and overflow has happened iff the result has the opposite 122 - * sign. 123 - */ 124 - #define __signed_add_overflow(a, b, d) ({ \ 125 - typeof(a) __a = (a); \ 126 - typeof(b) __b = (b); \ 127 - typeof(d) __d = (d); \ 128 - (void) (&__a == &__b); \ 129 - (void) (&__a == __d); \ 130 - *__d = (u64)__a + (u64)__b; \ 131 - (((~(__a ^ __b)) & (*__d ^ __a)) \ 132 - & type_min(typeof(__a))) != 0; \ 133 - }) 134 - 135 - /* 136 - * Subtraction is similar, except that overflow can now happen only 137 - * when the signs are opposite. In this case, overflow has happened if 138 - * the result has the opposite sign of a. 139 - */ 140 - #define __signed_sub_overflow(a, b, d) ({ \ 141 - typeof(a) __a = (a); \ 142 - typeof(b) __b = (b); \ 143 - typeof(d) __d = (d); \ 144 - (void) (&__a == &__b); \ 145 - (void) (&__a == __d); \ 146 - *__d = (u64)__a - (u64)__b; \ 147 - ((((__a ^ __b)) & (*__d ^ __a)) \ 148 - & type_min(typeof(__a))) != 0; \ 149 - }) 150 - 151 - /* 152 - * Signed multiplication is rather hard. gcc always follows C99, so 153 - * division is truncated towards 0. This means that we can write the 154 - * overflow check like this: 155 - * 156 - * (a > 0 && (b > MAX/a || b < MIN/a)) || 157 - * (a < -1 && (b > MIN/a || b < MAX/a) || 158 - * (a == -1 && b == MIN) 159 - * 160 - * The redundant casts of -1 are to silence an annoying -Wtype-limits 161 - * (included in -Wextra) warning: When the type is u8 or u16, the 162 - * __b_c_e in check_mul_overflow obviously selects 163 - * __unsigned_mul_overflow, but unfortunately gcc still parses this 164 - * code and warns about the limited range of __b. 165 - */ 166 - 167 - #define __signed_mul_overflow(a, b, d) ({ \ 168 - typeof(a) __a = (a); \ 169 - typeof(b) __b = (b); \ 170 - typeof(d) __d = (d); \ 171 - typeof(a) __tmax = type_max(typeof(a)); \ 172 - typeof(a) __tmin = type_min(typeof(a)); \ 173 - (void) (&__a == &__b); \ 174 - (void) (&__a == __d); \ 175 - *__d = (u64)__a * (u64)__b; \ 176 - (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ 177 - (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ 178 - (__b == (typeof(__b))-1 && __a == __tmin); \ 179 - }) 180 - 181 - 182 - #define check_add_overflow(a, b, d) \ 183 - __builtin_choose_expr(is_signed_type(typeof(a)), \ 184 - __signed_add_overflow(a, b, d), \ 185 - __unsigned_add_overflow(a, b, d)) 186 - 187 - #define check_sub_overflow(a, b, d) \ 188 - __builtin_choose_expr(is_signed_type(typeof(a)), \ 189 - __signed_sub_overflow(a, b, d), \ 190 - __unsigned_sub_overflow(a, b, d)) 191 - 192 - #define check_mul_overflow(a, b, d) \ 193 - __builtin_choose_expr(is_signed_type(typeof(a)), \ 194 - __signed_mul_overflow(a, b, d), \ 195 - __unsigned_mul_overflow(a, b, d)) 196 - 197 - 198 - #endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ 199 75 200 76 /** 201 77 * array_size() - Calculate size of 2-dimensional array.
+41 -23
tools/lib/perf/evsel.c
··· 43 43 free(evsel); 44 44 } 45 45 46 - #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y)) 46 + #define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y)) 47 47 #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL) 48 48 49 49 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) ··· 54 54 int cpu, thread; 55 55 for (cpu = 0; cpu < ncpus; cpu++) { 56 56 for (thread = 0; thread < nthreads; thread++) { 57 - FD(evsel, cpu, thread) = -1; 57 + int *fd = FD(evsel, cpu, thread); 58 + 59 + if (fd) 60 + *fd = -1; 58 61 } 59 62 } 60 63 } ··· 83 80 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd) 84 81 { 85 82 struct perf_evsel *leader = evsel->leader; 86 - int fd; 83 + int *fd; 87 84 88 85 if (evsel == leader) { 89 86 *group_fd = -1; ··· 98 95 return -ENOTCONN; 99 96 100 97 fd = FD(leader, cpu, thread); 101 - if (fd == -1) 98 + if (fd == NULL || *fd == -1) 102 99 return -EBADF; 103 100 104 - *group_fd = fd; 101 + *group_fd = *fd; 105 102 106 103 return 0; 107 104 } ··· 141 138 142 139 for (cpu = 0; cpu < cpus->nr; cpu++) { 143 140 for (thread = 0; thread < threads->nr; thread++) { 144 - int fd, group_fd; 141 + int fd, group_fd, *evsel_fd; 142 + 143 + evsel_fd = FD(evsel, cpu, thread); 144 + if (evsel_fd == NULL) 145 + return -EINVAL; 145 146 146 147 err = get_group_fd(evsel, cpu, thread, &group_fd); 147 148 if (err < 0) ··· 158 151 if (fd < 0) 159 152 return -errno; 160 153 161 - FD(evsel, cpu, thread) = fd; 154 + *evsel_fd = fd; 162 155 } 163 156 } 164 157 ··· 170 163 int thread; 171 164 172 165 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { 173 - if (FD(evsel, cpu, thread) >= 0) 174 - close(FD(evsel, cpu, thread)); 175 - FD(evsel, cpu, thread) = -1; 166 + int *fd = FD(evsel, cpu, thread); 167 + 168 + if (fd && *fd >= 0) { 169 + close(*fd); 170 + *fd = -1; 171 + } 176 172 } 177 173 } 178 174 ··· 219 209 220 210 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { 221 211 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { 222 - int fd = FD(evsel, cpu, thread); 223 - struct perf_mmap *map = MMAP(evsel, cpu, thread); 212 + int *fd = FD(evsel, cpu, thread); 224 213 225 - if (fd < 0) 214 + if (fd == NULL || *fd < 0) 226 215 continue; 227 216 228 - perf_mmap__munmap(map); 217 + perf_mmap__munmap(MMAP(evsel, cpu, thread)); 229 218 } 230 219 } 231 220 ··· 248 239 249 240 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { 250 241 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { 251 - int fd = FD(evsel, cpu, thread); 252 - struct perf_mmap *map = MMAP(evsel, cpu, thread); 242 + int *fd = FD(evsel, cpu, thread); 243 + struct perf_mmap *map; 253 244 254 - if (fd < 0) 245 + if (fd == NULL || *fd < 0) 255 246 continue; 256 247 248 + map = MMAP(evsel, cpu, thread); 257 249 perf_mmap__init(map, NULL, false, NULL); 258 250 259 - ret = perf_mmap__mmap(map, &mp, fd, cpu); 251 + ret = perf_mmap__mmap(map, &mp, *fd, cpu); 260 252 if (ret) { 261 253 perf_evsel__munmap(evsel); 262 254 return ret; ··· 270 260 271 261 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread) 272 262 { 273 - if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL) 263 + int *fd = FD(evsel, cpu, thread); 264 + 265 + if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL) 274 266 return NULL; 275 267 276 268 return MMAP(evsel, cpu, thread)->base; ··· 307 295 struct perf_counts_values *count) 308 296 { 309 297 size_t size = perf_evsel__read_size(evsel); 298 + int *fd = FD(evsel, cpu, thread); 310 299 311 300 memset(count, 0, sizeof(*count)); 312 301 313 - if (FD(evsel, cpu, thread) < 0) 302 + if (fd == NULL || *fd < 0) 314 303 return -EINVAL; 315 304 316 305 if (MMAP(evsel, cpu, thread) && 317 306 !perf_mmap__read_self(MMAP(evsel, cpu, thread), count)) 318 307 return 0; 319 308 320 - if (readn(FD(evsel, cpu, thread), count->values, size) <= 0) 309 + if (readn(*fd, count->values, size) <= 0) 321 310 return -errno; 322 311 323 312 return 0; ··· 331 318 int thread; 332 319 333 320 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { 334 - int fd = FD(evsel, cpu, thread), 335 - err = ioctl(fd, ioc, arg); 321 + int err; 322 + int *fd = FD(evsel, cpu, thread); 323 + 324 + if (fd == NULL || *fd < 0) 325 + return -1; 326 + 327 + err = ioctl(*fd, ioc, arg); 336 328 337 329 if (err) 338 330 return err;
+13 -11
tools/perf/builtin-script.c
··· 368 368 return OUTPUT_TYPE_OTHER; 369 369 } 370 370 371 - static inline unsigned int attr_type(unsigned int type) 372 - { 373 - switch (type) { 374 - case OUTPUT_TYPE_SYNTH: 375 - return PERF_TYPE_SYNTH; 376 - default: 377 - return type; 378 - } 379 - } 380 - 381 371 static bool output_set_by_user(void) 382 372 { 383 373 int j; ··· 546 556 output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE; 547 557 } 548 558 559 + static struct evsel *find_first_output_type(struct evlist *evlist, 560 + unsigned int type) 561 + { 562 + struct evsel *evsel; 563 + 564 + evlist__for_each_entry(evlist, evsel) { 565 + if (output_type(evsel->core.attr.type) == (int)type) 566 + return evsel; 567 + } 568 + return NULL; 569 + } 570 + 549 571 /* 550 572 * verify all user requested events exist and the samples 551 573 * have the expected data ··· 569 567 struct evsel *evsel; 570 568 571 569 for (j = 0; j < OUTPUT_TYPE_MAX; ++j) { 572 - evsel = perf_session__find_first_evtype(session, attr_type(j)); 570 + evsel = find_first_output_type(session->evlist, j); 573 571 574 572 /* 575 573 * even if fields is set to 0 (ie., show nothing) event must
+24 -9
tools/perf/ui/browser.c
··· 757 757 } 758 758 759 759 void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column, 760 - unsigned int row, bool arrow_down) 760 + unsigned int row, int diff, bool arrow_down) 761 761 { 762 - unsigned int end_row; 762 + int end_row; 763 763 764 - if (row >= browser->top_idx) 765 - end_row = row - browser->top_idx; 766 - else 764 + if (diff <= 0) 767 765 return; 768 766 769 767 SLsmg_set_char_set(1); 770 768 771 769 if (arrow_down) { 770 + if (row + diff <= browser->top_idx) 771 + return; 772 + 773 + end_row = row + diff - browser->top_idx; 772 774 ui_browser__gotorc(browser, end_row, column - 1); 773 - SLsmg_write_char(SLSMG_ULCORN_CHAR); 774 - ui_browser__gotorc(browser, end_row, column); 775 - SLsmg_draw_hline(2); 776 - ui_browser__gotorc(browser, end_row + 1, column - 1); 777 775 SLsmg_write_char(SLSMG_LTEE_CHAR); 776 + 777 + while (--end_row >= 0 && end_row > (int)(row - browser->top_idx)) { 778 + ui_browser__gotorc(browser, end_row, column - 1); 779 + SLsmg_draw_vline(1); 780 + } 781 + 782 + end_row = (int)(row - browser->top_idx); 783 + if (end_row >= 0) { 784 + ui_browser__gotorc(browser, end_row, column - 1); 785 + SLsmg_write_char(SLSMG_ULCORN_CHAR); 786 + ui_browser__gotorc(browser, end_row, column); 787 + SLsmg_draw_hline(2); 788 + } 778 789 } else { 790 + if (row < browser->top_idx) 791 + return; 792 + 793 + end_row = row - browser->top_idx; 779 794 ui_browser__gotorc(browser, end_row, column - 1); 780 795 SLsmg_write_char(SLSMG_LTEE_CHAR); 781 796 ui_browser__gotorc(browser, end_row, column);
+1 -1
tools/perf/ui/browser.h
··· 51 51 void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column, 52 52 u64 start, u64 end); 53 53 void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column, 54 - unsigned int row, bool arrow_down); 54 + unsigned int row, int diff, bool arrow_down); 55 55 void __ui_browser__show_title(struct ui_browser *browser, const char *title); 56 56 void ui_browser__show_title(struct ui_browser *browser, const char *title); 57 57 int ui_browser__show(struct ui_browser *browser, const char *title,
+17 -7
tools/perf/ui/browsers/annotate.c
··· 125 125 ab->selection = al; 126 126 } 127 127 128 - static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor) 128 + static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor) 129 129 { 130 130 struct disasm_line *pos = list_prev_entry(cursor, al.node); 131 131 const char *name; 132 + int diff = 1; 133 + 134 + while (pos && pos->al.offset == -1) { 135 + pos = list_prev_entry(pos, al.node); 136 + if (!ab->opts->hide_src_code) 137 + diff++; 138 + } 132 139 133 140 if (!pos) 134 - return false; 141 + return 0; 135 142 136 143 if (ins__is_lock(&pos->ins)) 137 144 name = pos->ops.locked.ins.name; ··· 146 139 name = pos->ins.name; 147 140 148 141 if (!name || !cursor->ins.name) 149 - return false; 142 + return 0; 150 143 151 - return ins__is_fused(ab->arch, name, cursor->ins.name); 144 + if (ins__is_fused(ab->arch, name, cursor->ins.name)) 145 + return diff; 146 + return 0; 152 147 } 153 148 154 149 static void annotate_browser__draw_current_jump(struct ui_browser *browser) ··· 164 155 struct annotation *notes = symbol__annotation(sym); 165 156 u8 pcnt_width = annotation__pcnt_width(notes); 166 157 int width; 158 + int diff = 0; 167 159 168 160 /* PLT symbols contain external offsets */ 169 161 if (strstr(sym->name, "@plt")) ··· 215 205 pcnt_width + 2 + notes->widths.addr + width, 216 206 from, to); 217 207 218 - if (is_fused(ab, cursor)) { 208 + diff = is_fused(ab, cursor); 209 + if (diff > 0) { 219 210 ui_browser__mark_fused(browser, 220 211 pcnt_width + 3 + notes->widths.addr + width, 221 - from - 1, 222 - to > from); 212 + from - diff, diff, to > from); 223 213 } 224 214 } 225 215
+3
tools/perf/util/bpf-event.c
··· 24 24 struct btf * __weak btf__load_from_kernel_by_id(__u32 id) 25 25 { 26 26 struct btf *btf; 27 + #pragma GCC diagnostic push 28 + #pragma GCC diagnostic ignored "-Wdeprecated-declarations" 27 29 int err = btf__get_from_id(id, &btf); 30 + #pragma GCC diagnostic pop 28 31 29 32 return err ? ERR_PTR(err) : btf; 30 33 }
+1
tools/perf/util/machine.c
··· 2149 2149 2150 2150 al.filtered = 0; 2151 2151 al.sym = NULL; 2152 + al.srcline = NULL; 2152 2153 if (!cpumode) { 2153 2154 thread__find_cpumode_addr_location(thread, ip, &al); 2154 2155 } else {
+128 -9
tools/testing/selftests/bpf/cgroup_helpers.c
··· 12 12 #include <unistd.h> 13 13 #include <ftw.h> 14 14 15 - 16 15 #include "cgroup_helpers.h" 17 16 18 17 /* 19 18 * To avoid relying on the system setup, when setup_cgroup_env is called 20 - * we create a new mount namespace, and cgroup namespace. The cgroup2 21 - * root is mounted at CGROUP_MOUNT_PATH 19 + * we create a new mount namespace, and cgroup namespace. The cgroupv2 20 + * root is mounted at CGROUP_MOUNT_PATH. Unfortunately, most people don't 21 + * have cgroupv2 enabled at this point in time. It's easier to create our 22 + * own mount namespace and manage it ourselves. We assume /mnt exists. 22 23 * 23 - * Unfortunately, most people don't have cgroupv2 enabled at this point in time. 24 - * It's easier to create our own mount namespace and manage it ourselves. 25 - * 26 - * We assume /mnt exists. 24 + * Related cgroupv1 helpers are named *classid*(), since we only use the 25 + * net_cls controller for tagging net_cls.classid. We assume the default 26 + * mount under /sys/fs/cgroup/net_cls, which should be the case for the 27 + * vast majority of users. 27 28 */ 28 29 29 30 #define WALK_FD_LIMIT 16 31 + 30 32 #define CGROUP_MOUNT_PATH "/mnt" 33 + #define CGROUP_MOUNT_DFLT "/sys/fs/cgroup" 34 + #define NETCLS_MOUNT_PATH CGROUP_MOUNT_DFLT "/net_cls" 31 35 #define CGROUP_WORK_DIR "/cgroup-test-work-dir" 36 + 32 37 #define format_cgroup_path(buf, path) \ 33 38 snprintf(buf, sizeof(buf), "%s%s%s", CGROUP_MOUNT_PATH, \ 34 39 CGROUP_WORK_DIR, path) 40 + 41 + #define format_classid_path(buf) \ 42 + snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \ 43 + CGROUP_WORK_DIR) 35 44 36 45 /** 37 46 * enable_all_controllers() - Enable all available cgroup v2 controllers ··· 148 139 return 0; 149 140 } 150 141 151 - 152 - static int join_cgroup_from_top(char *cgroup_path) 142 + static int join_cgroup_from_top(const char *cgroup_path) 153 143 { 154 144 char cgroup_procs_path[PATH_MAX + 1]; 155 145 pid_t pid = getpid(); ··· 320 312 return -EINVAL; 321 313 } 322 314 return cg_fd; 315 + } 316 + 317 + /** 318 + * setup_classid_environment() - Setup the cgroupv1 net_cls environment 319 + * 320 + * After calling this function, cleanup_classid_environment should be called 321 + * once testing is complete. 322 + * 323 + * This function will print an error to stderr and return 1 if it is unable 324 + * to setup the cgroup environment. If setup is successful, 0 is returned. 325 + */ 326 + int setup_classid_environment(void) 327 + { 328 + char cgroup_workdir[PATH_MAX + 1]; 329 + 330 + format_classid_path(cgroup_workdir); 331 + 332 + if (mount("tmpfs", CGROUP_MOUNT_DFLT, "tmpfs", 0, NULL) && 333 + errno != EBUSY) { 334 + log_err("mount cgroup base"); 335 + return 1; 336 + } 337 + 338 + if (mkdir(NETCLS_MOUNT_PATH, 0777) && errno != EEXIST) { 339 + log_err("mkdir cgroup net_cls"); 340 + return 1; 341 + } 342 + 343 + if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls") && 344 + errno != EBUSY) { 345 + log_err("mount cgroup net_cls"); 346 + return 1; 347 + } 348 + 349 + cleanup_classid_environment(); 350 + 351 + if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) { 352 + log_err("mkdir cgroup work dir"); 353 + return 1; 354 + } 355 + 356 + return 0; 357 + } 358 + 359 + /** 360 + * set_classid() - Set a cgroupv1 net_cls classid 361 + * @id: the numeric classid 362 + * 363 + * Writes the passed classid into the cgroup work dir's net_cls.classid 364 + * file in order to later on trigger socket tagging. 365 + * 366 + * On success, it returns 0, otherwise on failure it returns 1. If there 367 + * is a failure, it prints the error to stderr. 368 + */ 369 + int set_classid(unsigned int id) 370 + { 371 + char cgroup_workdir[PATH_MAX - 42]; 372 + char cgroup_classid_path[PATH_MAX + 1]; 373 + int fd, rc = 0; 374 + 375 + format_classid_path(cgroup_workdir); 376 + snprintf(cgroup_classid_path, sizeof(cgroup_classid_path), 377 + "%s/net_cls.classid", cgroup_workdir); 378 + 379 + fd = open(cgroup_classid_path, O_WRONLY); 380 + if (fd < 0) { 381 + log_err("Opening cgroup classid: %s", cgroup_classid_path); 382 + return 1; 383 + } 384 + 385 + if (dprintf(fd, "%u\n", id) < 0) { 386 + log_err("Setting cgroup classid"); 387 + rc = 1; 388 + } 389 + 390 + close(fd); 391 + return rc; 392 + } 393 + 394 + /** 395 + * join_classid() - Join a cgroupv1 net_cls classid 396 + * 397 + * This function expects the cgroup work dir to be already created, as we 398 + * join it here. This causes the process sockets to be tagged with the given 399 + * net_cls classid. 400 + * 401 + * On success, it returns 0, otherwise on failure it returns 1. 402 + */ 403 + int join_classid(void) 404 + { 405 + char cgroup_workdir[PATH_MAX + 1]; 406 + 407 + format_classid_path(cgroup_workdir); 408 + return join_cgroup_from_top(cgroup_workdir); 409 + } 410 + 411 + /** 412 + * cleanup_classid_environment() - Cleanup the cgroupv1 net_cls environment 413 + * 414 + * At call time, it moves the calling process to the root cgroup, and then 415 + * runs the deletion process. 416 + * 417 + * On failure, it will print an error to stderr, and try to continue. 418 + */ 419 + void cleanup_classid_environment(void) 420 + { 421 + char cgroup_workdir[PATH_MAX + 1]; 422 + 423 + format_classid_path(cgroup_workdir); 424 + join_cgroup_from_top(NETCLS_MOUNT_PATH); 425 + nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT); 323 426 }
+15 -5
tools/testing/selftests/bpf/cgroup_helpers.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #ifndef __CGROUP_HELPERS_H 3 3 #define __CGROUP_HELPERS_H 4 + 4 5 #include <errno.h> 5 6 #include <string.h> 6 7 ··· 9 8 #define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ 10 9 __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__) 11 10 12 - 11 + /* cgroupv2 related */ 13 12 int cgroup_setup_and_join(const char *path); 14 13 int create_and_get_cgroup(const char *path); 15 - int join_cgroup(const char *path); 16 - int setup_cgroup_environment(void); 17 - void cleanup_cgroup_environment(void); 18 14 unsigned long long get_cgroup_id(const char *path); 19 15 20 - #endif 16 + int join_cgroup(const char *path); 17 + 18 + int setup_cgroup_environment(void); 19 + void cleanup_cgroup_environment(void); 20 + 21 + /* cgroupv1 related */ 22 + int set_classid(unsigned int id); 23 + int join_classid(void); 24 + 25 + int setup_classid_environment(void); 26 + void cleanup_classid_environment(void); 27 + 28 + #endif /* __CGROUP_HELPERS_H */
+21 -6
tools/testing/selftests/bpf/network_helpers.c
··· 208 208 209 209 static int connect_fd_to_addr(int fd, 210 210 const struct sockaddr_storage *addr, 211 - socklen_t addrlen) 211 + socklen_t addrlen, const bool must_fail) 212 212 { 213 - if (connect(fd, (const struct sockaddr *)addr, addrlen)) { 214 - log_err("Failed to connect to server"); 215 - return -1; 213 + int ret; 214 + 215 + errno = 0; 216 + ret = connect(fd, (const struct sockaddr *)addr, addrlen); 217 + if (must_fail) { 218 + if (!ret) { 219 + log_err("Unexpected success to connect to server"); 220 + return -1; 221 + } 222 + if (errno != EPERM) { 223 + log_err("Unexpected error from connect to server"); 224 + return -1; 225 + } 226 + } else { 227 + if (ret) { 228 + log_err("Failed to connect to server"); 229 + return -1; 230 + } 216 231 } 217 232 218 233 return 0; ··· 272 257 strlen(opts->cc) + 1)) 273 258 goto error_close; 274 259 275 - if (connect_fd_to_addr(fd, &addr, addrlen)) 260 + if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail)) 276 261 goto error_close; 277 262 278 263 return fd; ··· 304 289 return -1; 305 290 } 306 291 307 - if (connect_fd_to_addr(client_fd, &addr, len)) 292 + if (connect_fd_to_addr(client_fd, &addr, len, false)) 308 293 return -1; 309 294 310 295 return 0;
+1
tools/testing/selftests/bpf/network_helpers.h
··· 20 20 struct network_helper_opts { 21 21 const char *cc; 22 22 int timeout_ms; 23 + bool must_fail; 23 24 }; 24 25 25 26 /* ipv4 test vector */
+79
tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <test_progs.h> 4 + 5 + #include "connect4_dropper.skel.h" 6 + 7 + #include "cgroup_helpers.h" 8 + #include "network_helpers.h" 9 + 10 + static int run_test(int cgroup_fd, int server_fd, bool classid) 11 + { 12 + struct network_helper_opts opts = { 13 + .must_fail = true, 14 + }; 15 + struct connect4_dropper *skel; 16 + int fd, err = 0; 17 + 18 + skel = connect4_dropper__open_and_load(); 19 + if (!ASSERT_OK_PTR(skel, "skel_open")) 20 + return -1; 21 + 22 + skel->links.connect_v4_dropper = 23 + bpf_program__attach_cgroup(skel->progs.connect_v4_dropper, 24 + cgroup_fd); 25 + if (!ASSERT_OK_PTR(skel->links.connect_v4_dropper, "prog_attach")) { 26 + err = -1; 27 + goto out; 28 + } 29 + 30 + if (classid && !ASSERT_OK(join_classid(), "join_classid")) { 31 + err = -1; 32 + goto out; 33 + } 34 + 35 + fd = connect_to_fd_opts(server_fd, &opts); 36 + if (fd < 0) 37 + err = -1; 38 + else 39 + close(fd); 40 + out: 41 + connect4_dropper__destroy(skel); 42 + return err; 43 + } 44 + 45 + void test_cgroup_v1v2(void) 46 + { 47 + struct network_helper_opts opts = {}; 48 + int server_fd, client_fd, cgroup_fd; 49 + static const int port = 60123; 50 + 51 + /* Step 1: Check base connectivity works without any BPF. */ 52 + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); 53 + if (!ASSERT_GE(server_fd, 0, "server_fd")) 54 + return; 55 + client_fd = connect_to_fd_opts(server_fd, &opts); 56 + if (!ASSERT_GE(client_fd, 0, "client_fd")) { 57 + close(server_fd); 58 + return; 59 + } 60 + close(client_fd); 61 + close(server_fd); 62 + 63 + /* Step 2: Check BPF policy prog attached to cgroups drops connectivity. */ 64 + cgroup_fd = test__join_cgroup("/connect_dropper"); 65 + if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd")) 66 + return; 67 + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); 68 + if (!ASSERT_GE(server_fd, 0, "server_fd")) { 69 + close(cgroup_fd); 70 + return; 71 + } 72 + ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only"); 73 + setup_classid_environment(); 74 + set_classid(42); 75 + ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2"); 76 + cleanup_classid_environment(); 77 + close(server_fd); 78 + close(cgroup_fd); 79 + }
-1
tools/testing/selftests/bpf/prog_tests/task_pt_regs.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #define _GNU_SOURCE 3 3 #include <test_progs.h> 4 - #include <linux/ptrace.h> 5 4 #include "test_task_pt_regs.skel.h" 6 5 7 6 void test_task_pt_regs(void)
+26
tools/testing/selftests/bpf/progs/connect4_dropper.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <string.h> 4 + 5 + #include <linux/stddef.h> 6 + #include <linux/bpf.h> 7 + 8 + #include <sys/socket.h> 9 + 10 + #include <bpf/bpf_helpers.h> 11 + #include <bpf/bpf_endian.h> 12 + 13 + #define VERDICT_REJECT 0 14 + #define VERDICT_PROCEED 1 15 + 16 + SEC("cgroup/connect4") 17 + int connect_v4_dropper(struct bpf_sock_addr *ctx) 18 + { 19 + if (ctx->type != SOCK_STREAM) 20 + return VERDICT_PROCEED; 21 + if (ctx->user_port == bpf_htons(60123)) 22 + return VERDICT_REJECT; 23 + return VERDICT_PROCEED; 24 + } 25 + 26 + char _license[] SEC("license") = "GPL";
+13 -6
tools/testing/selftests/bpf/progs/test_task_pt_regs.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 - #include <linux/ptrace.h> 4 - #include <linux/bpf.h> 3 + #include "vmlinux.h" 5 4 #include <bpf/bpf_helpers.h> 6 5 #include <bpf/bpf_tracing.h> 7 6 8 - struct pt_regs current_regs = {}; 9 - struct pt_regs ctx_regs = {}; 7 + #define PT_REGS_SIZE sizeof(struct pt_regs) 8 + 9 + /* 10 + * The kernel struct pt_regs isn't exported in its entirety to userspace. 11 + * Pass it as an array to task_pt_regs.c 12 + */ 13 + char current_regs[PT_REGS_SIZE] = {}; 14 + char ctx_regs[PT_REGS_SIZE] = {}; 10 15 int uprobe_res = 0; 11 16 12 17 SEC("uprobe/trigger_func") ··· 22 17 23 18 current = bpf_get_current_task_btf(); 24 19 regs = (struct pt_regs *) bpf_task_pt_regs(current); 25 - __builtin_memcpy(&current_regs, regs, sizeof(*regs)); 26 - __builtin_memcpy(&ctx_regs, ctx, sizeof(*ctx)); 20 + if (bpf_probe_read_kernel(current_regs, PT_REGS_SIZE, regs)) 21 + return 0; 22 + if (bpf_probe_read_kernel(ctx_regs, PT_REGS_SIZE, ctx)) 23 + return 0; 27 24 28 25 /* Prove that uprobe was run */ 29 26 uprobe_res = 1;
+1 -1
tools/testing/selftests/nci/nci_dev.c
··· 746 746 const __u8 *rsp, __u32 rsp_len) 747 747 { 748 748 char buf[256]; 749 - unsigned int len; 749 + int len; 750 750 751 751 send(nfc_sock, &cmd[3], cmd_len - 3, 0); 752 752 len = read(virtual_fd, buf, cmd_len);
+1 -1
tools/testing/selftests/net/altnames.sh
··· 45 45 check_err $? "Got unexpected long alternative name from link show JSON" 46 46 47 47 ip link property del $DUMMY_DEV altname $SHORT_NAME 48 - check_err $? "Failed to add short alternative name" 48 + check_err $? "Failed to delete short alternative name" 49 49 50 50 ip -j -p link show $SHORT_NAME &>/dev/null 51 51 check_fail $? "Unexpected success while trying to do link show with deleted short alternative name"
+36 -1
tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #include <ppc-asm.h> 2 + #include <basic_asm.h> 3 3 #include <asm/unistd.h> 4 4 5 5 .text ··· 25 25 blr 26 26 1: 27 27 li r3, -1 28 + blr 29 + 30 + 31 + .macro scv level 32 + .long (0x44000001 | (\level) << 5) 33 + .endm 34 + 35 + FUNC_START(getppid_scv_tm_active) 36 + PUSH_BASIC_STACK(0) 37 + tbegin. 38 + beq 1f 39 + li r0, __NR_getppid 40 + scv 0 41 + tend. 42 + POP_BASIC_STACK(0) 43 + blr 44 + 1: 45 + li r3, -1 46 + POP_BASIC_STACK(0) 47 + blr 48 + 49 + FUNC_START(getppid_scv_tm_suspended) 50 + PUSH_BASIC_STACK(0) 51 + tbegin. 52 + beq 1f 53 + li r0, __NR_getppid 54 + tsuspend. 55 + scv 0 56 + tresume. 57 + tend. 58 + POP_BASIC_STACK(0) 59 + blr 60 + 1: 61 + li r3, -1 62 + POP_BASIC_STACK(0) 28 63 blr
+29 -7
tools/testing/selftests/powerpc/tm/tm-syscall.c
··· 19 19 #include "utils.h" 20 20 #include "tm.h" 21 21 22 + #ifndef PPC_FEATURE2_SCV 23 + #define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */ 24 + #endif 25 + 22 26 extern int getppid_tm_active(void); 23 27 extern int getppid_tm_suspended(void); 28 + extern int getppid_scv_tm_active(void); 29 + extern int getppid_scv_tm_suspended(void); 24 30 25 31 unsigned retries = 0; 26 32 27 33 #define TEST_DURATION 10 /* seconds */ 28 34 29 - pid_t getppid_tm(bool suspend) 35 + pid_t getppid_tm(bool scv, bool suspend) 30 36 { 31 37 int i; 32 38 pid_t pid; 33 39 34 40 for (i = 0; i < TM_RETRIES; i++) { 35 - if (suspend) 36 - pid = getppid_tm_suspended(); 37 - else 38 - pid = getppid_tm_active(); 41 + if (suspend) { 42 + if (scv) 43 + pid = getppid_scv_tm_suspended(); 44 + else 45 + pid = getppid_tm_suspended(); 46 + } else { 47 + if (scv) 48 + pid = getppid_scv_tm_active(); 49 + else 50 + pid = getppid_tm_active(); 51 + } 39 52 40 53 if (pid >= 0) 41 54 return pid; ··· 95 82 * Test a syscall within a suspended transaction and verify 96 83 * that it succeeds. 97 84 */ 98 - FAIL_IF(getppid_tm(true) == -1); /* Should succeed. */ 85 + FAIL_IF(getppid_tm(false, true) == -1); /* Should succeed. */ 99 86 100 87 /* 101 88 * Test a syscall within an active transaction and verify that 102 89 * it fails with the correct failure code. 103 90 */ 104 - FAIL_IF(getppid_tm(false) != -1); /* Should fail... */ 91 + FAIL_IF(getppid_tm(false, false) != -1); /* Should fail... */ 105 92 FAIL_IF(!failure_is_persistent()); /* ...persistently... */ 106 93 FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */ 94 + 95 + /* Now do it all again with scv if it is available. */ 96 + if (have_hwcap2(PPC_FEATURE2_SCV)) { 97 + FAIL_IF(getppid_tm(true, true) == -1); /* Should succeed. */ 98 + FAIL_IF(getppid_tm(true, false) != -1); /* Should fail... */ 99 + FAIL_IF(!failure_is_persistent()); /* ...persistently... */ 100 + FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */ 101 + } 102 + 107 103 gettimeofday(&now, 0); 108 104 } 109 105