Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Filling in the padding slot in the bpf structure as a bug fix in 'ne'
overlapped with actually using that padding area for something in
'net-next'.

Signed-off-by: David S. Miller <davem@davemloft.net>

+1057 -568
+1
Documentation/devicetree/bindings/net/dsa/b53.txt
··· 10 10 "brcm,bcm53128" 11 11 "brcm,bcm5365" 12 12 "brcm,bcm5395" 13 + "brcm,bcm5389" 13 14 "brcm,bcm5397" 14 15 "brcm,bcm5398" 15 16
+1 -1
Documentation/i2c/busses/i2c-ocores
··· 2 2 3 3 Supported adapters: 4 4 * OpenCores.org I2C controller by Richard Herveille (see datasheet link) 5 - Datasheet: http://www.opencores.org/projects.cgi/web/i2c/overview 5 + https://opencores.org/project/i2c/overview 6 6 7 7 Author: Peter Korsgaard <jacmet@sunsite.dk> 8 8
+8
MAINTAINERS
··· 15554 15554 S: Supported 15555 15555 F: drivers/char/xillybus/ 15556 15556 15557 + XLP9XX I2C DRIVER 15558 + M: George Cherian <george.cherian@cavium.com> 15559 + M: Jan Glauber <jglauber@cavium.com> 15560 + L: linux-i2c@vger.kernel.org 15561 + W: http://www.cavium.com 15562 + S: Supported 15563 + F: drivers/i2c/busses/i2c-xlp9xx.c 15564 + 15557 15565 XRA1403 GPIO EXPANDER 15558 15566 M: Nandor Han <nandor.han@ge.com> 15559 15567 M: Semi Malinen <semi.malinen@ge.com>
+7 -4
Makefile
··· 2 2 VERSION = 4 3 3 PATCHLEVEL = 17 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc6 5 + EXTRAVERSION = -rc7 6 6 NAME = Merciless Moray 7 7 8 8 # *DOCUMENTATION* ··· 500 500 RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) 501 501 export RETPOLINE_CFLAGS 502 502 503 + KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 504 + KBUILD_AFLAGS += $(call cc-option,-fno-PIE) 505 + 503 506 # check for 'asm goto' 504 507 ifeq ($(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) 505 508 CC_HAVE_ASM_GOTO := 1 ··· 624 621 # Defaults to vmlinux, but the arch makefile usually adds further targets 625 622 all: vmlinux 626 623 627 - KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 628 - KBUILD_AFLAGS += $(call cc-option,-fno-PIE) 629 - CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) 624 + CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ 625 + $(call cc-option,-fno-tree-loop-im) \ 626 + $(call cc-disable-warning,maybe-uninitialized,) 630 627 export CFLAGS_GCOV CFLAGS_KCOV 631 628 632 629 # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
+3 -3
arch/arm/boot/dts/sun4i-a10.dtsi
··· 76 76 allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi"; 77 77 clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_HDMI0>, 78 78 <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, 79 - <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, 79 + <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>, 80 80 <&ccu CLK_TCON0_CH1>, <&ccu CLK_HDMI>, 81 81 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; 82 82 status = "disabled"; ··· 88 88 allwinner,pipeline = "de_fe0-de_be0-lcd0"; 89 89 clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_DE_BE0>, 90 90 <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_BE0>, 91 - <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_TCON0_CH0>, 91 + <&ccu CLK_DE_FE0>, <&ccu CLK_TCON0_CH0>, 92 92 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; 93 93 status = "disabled"; 94 94 }; ··· 99 99 allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0"; 100 100 clocks = <&ccu CLK_AHB_TVE0>, <&ccu CLK_AHB_LCD0>, 101 101 <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, 102 - <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, 102 + <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>, 103 103 <&ccu CLK_TCON0_CH1>, <&ccu CLK_DRAM_TVE0>, 104 104 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; 105 105 status = "disabled";
+1
arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
··· 117 117 phy-handle = <&int_mii_phy>; 118 118 phy-mode = "mii"; 119 119 allwinner,leds-active-low; 120 + status = "okay"; 120 121 }; 121 122 122 123 &hdmi {
+1 -1
arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
··· 51 51 52 52 leds { 53 53 /* The LEDs use PG0~2 pins, which conflict with MMC1 */ 54 - status = "disbaled"; 54 + status = "disabled"; 55 55 }; 56 56 }; 57 57
+1 -1
arch/arm/mach-ep93xx/core.c
··· 323 323 324 324 /* All EP93xx devices use the same two GPIO pins for I2C bit-banging */ 325 325 static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = { 326 - .dev_id = "i2c-gpio", 326 + .dev_id = "i2c-gpio.0", 327 327 .table = { 328 328 /* Use local offsets on gpiochip/port "G" */ 329 329 GPIO_LOOKUP_IDX("G", 1, NULL, 0,
+1 -1
arch/arm/mach-ixp4xx/avila-setup.c
··· 51 51 }; 52 52 53 53 static struct gpiod_lookup_table avila_i2c_gpiod_table = { 54 - .dev_id = "i2c-gpio", 54 + .dev_id = "i2c-gpio.0", 55 55 .table = { 56 56 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", AVILA_SDA_PIN, 57 57 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+1 -1
arch/arm/mach-ixp4xx/dsmg600-setup.c
··· 70 70 }; 71 71 72 72 static struct gpiod_lookup_table dsmg600_i2c_gpiod_table = { 73 - .dev_id = "i2c-gpio", 73 + .dev_id = "i2c-gpio.0", 74 74 .table = { 75 75 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", DSMG600_SDA_PIN, 76 76 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+1 -1
arch/arm/mach-ixp4xx/fsg-setup.c
··· 56 56 }; 57 57 58 58 static struct gpiod_lookup_table fsg_i2c_gpiod_table = { 59 - .dev_id = "i2c-gpio", 59 + .dev_id = "i2c-gpio.0", 60 60 .table = { 61 61 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", FSG_SDA_PIN, 62 62 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+1 -1
arch/arm/mach-ixp4xx/ixdp425-setup.c
··· 124 124 #endif /* CONFIG_MTD_NAND_PLATFORM */ 125 125 126 126 static struct gpiod_lookup_table ixdp425_i2c_gpiod_table = { 127 - .dev_id = "i2c-gpio", 127 + .dev_id = "i2c-gpio.0", 128 128 .table = { 129 129 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", IXDP425_SDA_PIN, 130 130 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+1 -1
arch/arm/mach-ixp4xx/nas100d-setup.c
··· 102 102 }; 103 103 104 104 static struct gpiod_lookup_table nas100d_i2c_gpiod_table = { 105 - .dev_id = "i2c-gpio", 105 + .dev_id = "i2c-gpio.0", 106 106 .table = { 107 107 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NAS100D_SDA_PIN, 108 108 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+1 -1
arch/arm/mach-ixp4xx/nslu2-setup.c
··· 70 70 }; 71 71 72 72 static struct gpiod_lookup_table nslu2_i2c_gpiod_table = { 73 - .dev_id = "i2c-gpio", 73 + .dev_id = "i2c-gpio.0", 74 74 .table = { 75 75 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NSLU2_SDA_PIN, 76 76 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+1 -1
arch/arm/mach-pxa/palmz72.c
··· 322 322 }; 323 323 324 324 static struct gpiod_lookup_table palmz72_i2c_gpiod_table = { 325 - .dev_id = "i2c-gpio", 325 + .dev_id = "i2c-gpio.0", 326 326 .table = { 327 327 GPIO_LOOKUP_IDX("gpio-pxa", 118, NULL, 0, 328 328 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+2 -2
arch/arm/mach-pxa/viper.c
··· 460 460 461 461 /* i2c */ 462 462 static struct gpiod_lookup_table viper_i2c_gpiod_table = { 463 - .dev_id = "i2c-gpio", 463 + .dev_id = "i2c-gpio.1", 464 464 .table = { 465 465 GPIO_LOOKUP_IDX("gpio-pxa", VIPER_RTC_I2C_SDA_GPIO, 466 466 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), ··· 789 789 __setup("tpm=", viper_tpm_setup); 790 790 791 791 struct gpiod_lookup_table viper_tpm_i2c_gpiod_table = { 792 - .dev_id = "i2c-gpio", 792 + .dev_id = "i2c-gpio.2", 793 793 .table = { 794 794 GPIO_LOOKUP_IDX("gpio-pxa", VIPER_TPM_I2C_SDA_GPIO, 795 795 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+1 -1
arch/arm/mach-sa1100/simpad.c
··· 327 327 * i2c 328 328 */ 329 329 static struct gpiod_lookup_table simpad_i2c_gpiod_table = { 330 - .dev_id = "i2c-gpio", 330 + .dev_id = "i2c-gpio.0", 331 331 .table = { 332 332 GPIO_LOOKUP_IDX("gpio", 21, NULL, 0, 333 333 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
-1
arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
··· 299 299 /* GPIO blocks 16 thru 19 do not appear to be routed to pins */ 300 300 301 301 dwmmc_0: dwmmc0@f723d000 { 302 - max-frequency = <150000000>; 303 302 cap-mmc-highspeed; 304 303 mmc-hs200-1_8v; 305 304 non-removable;
+4
arch/mips/kernel/process.c
··· 721 721 if (value & ~known_bits) 722 722 return -EOPNOTSUPP; 723 723 724 + /* Setting FRE without FR is not supported. */ 725 + if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE) 726 + return -EOPNOTSUPP; 727 + 724 728 /* Avoid inadvertently triggering emulation */ 725 729 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && 726 730 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
+1 -1
arch/mips/kernel/ptrace.c
··· 818 818 break; 819 819 } 820 820 #endif 821 - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); 821 + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); 822 822 break; 823 823 case PC: 824 824 tmp = regs->cp0_epc;
+1 -1
arch/mips/kernel/ptrace32.c
··· 109 109 addr & 1); 110 110 break; 111 111 } 112 - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); 112 + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); 113 113 break; 114 114 case PC: 115 115 tmp = regs->cp0_epc;
+7
arch/nds32/Kconfig
··· 9 9 select CLKSRC_MMIO 10 10 select CLONE_BACKWARDS 11 11 select COMMON_CLK 12 + select GENERIC_ASHLDI3 13 + select GENERIC_ASHRDI3 14 + select GENERIC_LSHRDI3 15 + select GENERIC_CMPDI2 16 + select GENERIC_MULDI3 17 + select GENERIC_UCMPDI2 12 18 select GENERIC_ATOMIC64 13 19 select GENERIC_CPU_DEVICES 14 20 select GENERIC_CLOCKEVENTS ··· 88 82 89 83 menu "Kernel Features" 90 84 source "kernel/Kconfig.preempt" 85 + source "kernel/Kconfig.freezer" 91 86 source "mm/Kconfig" 92 87 source "kernel/Kconfig.hz" 93 88 endmenu
+3 -2
arch/nds32/Kconfig.cpu
··· 1 1 comment "Processor Features" 2 2 3 3 config CPU_BIG_ENDIAN 4 - bool "Big endian" 4 + def_bool !CPU_LITTLE_ENDIAN 5 5 6 6 config CPU_LITTLE_ENDIAN 7 - def_bool !CPU_BIG_ENDIAN 7 + bool "Little endian" 8 + default y 8 9 9 10 config HWZOL 10 11 bool "hardware zero overhead loop support"
+4 -3
arch/nds32/Makefile
··· 23 23 # If we have a machine-specific directory, then include it in the build. 24 24 core-y += arch/nds32/kernel/ arch/nds32/mm/ 25 25 libs-y += arch/nds32/lib/ 26 - LIBGCC_PATH := \ 27 - $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) 28 - libs-y += $(LIBGCC_PATH) 29 26 30 27 ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""' 31 28 BUILTIN_DTB := y ··· 32 35 33 36 ifdef CONFIG_CPU_LITTLE_ENDIAN 34 37 KBUILD_CFLAGS += $(call cc-option, -EL) 38 + KBUILD_AFLAGS += $(call cc-option, -EL) 39 + LDFLAGS += $(call cc-option, -EL) 35 40 else 36 41 KBUILD_CFLAGS += $(call cc-option, -EB) 42 + KBUILD_AFLAGS += $(call cc-option, -EB) 43 + LDFLAGS += $(call cc-option, -EB) 37 44 endif 38 45 39 46 boot := arch/nds32/boot
+2
arch/nds32/include/asm/Kbuild
··· 16 16 generic-y += emergency-restart.h 17 17 generic-y += errno.h 18 18 generic-y += exec.h 19 + generic-y += export.h 19 20 generic-y += fb.h 20 21 generic-y += fcntl.h 21 22 generic-y += ftrace.h ··· 50 49 generic-y += timex.h 51 50 generic-y += topology.h 52 51 generic-y += trace_clock.h 52 + generic-y += xor.h 53 53 generic-y += unaligned.h 54 54 generic-y += user.h 55 55 generic-y += vga.h
+2 -1
arch/nds32/include/asm/bitfield.h
··· 336 336 #define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE ) 337 337 #define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM ) 338 338 339 - #define INT_MASK_INITAIAL_VAL 0x10003 339 + #define INT_MASK_INITAIAL_VAL (INT_MASK_mskDSSIM|INT_MASK_mskIDIVZE) 340 340 341 341 /****************************************************************************** 342 342 * ir15: INT_PEND (Interrupt Pending Register) ··· 396 396 #define MMU_CTL_D8KB 1 397 397 #define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA ) 398 398 399 + #define MMU_CTL_CACHEABLE_NON 0 399 400 #define MMU_CTL_CACHEABLE_WB 2 400 401 #define MMU_CTL_CACHEABLE_WT 3 401 402
+2
arch/nds32/include/asm/cacheflush.h
··· 32 32 33 33 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 34 34 void flush_kernel_dcache_page(struct page *page); 35 + void flush_kernel_vmap_range(void *addr, int size); 36 + void invalidate_kernel_vmap_range(void *addr, int size); 35 37 void flush_icache_range(unsigned long start, unsigned long end); 36 38 void flush_icache_page(struct vm_area_struct *vma, struct page *page); 37 39 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages)
+2
arch/nds32/include/asm/io.h
··· 4 4 #ifndef __ASM_NDS32_IO_H 5 5 #define __ASM_NDS32_IO_H 6 6 7 + #include <linux/types.h> 8 + 7 9 extern void iounmap(volatile void __iomem *addr); 8 10 #define __raw_writeb __raw_writeb 9 11 static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+3
arch/nds32/include/asm/page.h
··· 27 27 unsigned long vaddr, struct vm_area_struct *vma); 28 28 extern void clear_user_highpage(struct page *page, unsigned long vaddr); 29 29 30 + void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 31 + struct page *to); 32 + void clear_user_page(void *addr, unsigned long vaddr, struct page *page); 30 33 #define __HAVE_ARCH_COPY_USER_HIGHPAGE 31 34 #define clear_user_highpage clear_user_highpage 32 35 #else
+1
arch/nds32/include/asm/pgtable.h
··· 152 152 #define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE) 153 153 #define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) 154 154 #define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) 155 + #define PAGE_SHARED __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D | _PAGE_CACHE_SHRD) 155 156 #define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV) 156 157 #endif /* __ASSEMBLY__ */ 157 158
+1 -1
arch/nds32/kernel/ex-entry.S
··· 118 118 /* interrupt */ 119 119 2: 120 120 #ifdef CONFIG_TRACE_IRQFLAGS 121 - jal arch_trace_hardirqs_off 121 + jal trace_hardirqs_off 122 122 #endif 123 123 move $r0, $sp 124 124 sethi $lp, hi20(ret_from_intr)
+25 -7
arch/nds32/kernel/head.S
··· 57 57 isb 58 58 mtsr $r4, $L1_PPTB ! load page table pointer\n" 59 59 60 - /* set NTC0 cacheable/writeback, mutliple page size in use */ 61 - mfsr $r3, $MMU_CTL 62 - li $r0, #~MMU_CTL_mskNTC0 63 - and $r3, $r3, $r0 64 - #ifdef CONFIG_ANDES_PAGE_SIZE_4KB 65 - ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)) 60 + #ifdef CONFIG_CPU_DCACHE_DISABLE 61 + #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_NON 66 62 #else 67 - ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)|MMU_CTL_D8KB) 63 + #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 64 + #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WT 65 + #else 66 + #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WB 67 + #endif 68 + #endif 69 + 70 + /* set NTC cacheability, mutliple page size in use */ 71 + mfsr $r3, $MMU_CTL 72 + #if CONFIG_MEMORY_START >= 0xc0000000 73 + ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC3) 74 + #elif CONFIG_MEMORY_START >= 0x80000000 75 + ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC2) 76 + #elif CONFIG_MEMORY_START >= 0x40000000 77 + ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC1) 78 + #else 79 + ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC0) 80 + #endif 81 + 82 + #ifdef CONFIG_ANDES_PAGE_SIZE_4KB 83 + ori $r3, $r3, #(MMU_CTL_mskMPZIU) 84 + #else 85 + ori $r3, $r3, #(MMU_CTL_mskMPZIU|MMU_CTL_D8KB) 68 86 #endif 69 87 #ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS 70 88 li $r0, #MMU_CTL_UNA
+3
arch/nds32/kernel/setup.c
··· 293 293 /* paging_init() sets up the MMU and marks all pages as reserved */ 294 294 paging_init(); 295 295 296 + /* invalidate all TLB entries because the new mapping is created */ 297 + __nds32__tlbop_flua(); 298 + 296 299 /* use generic way to parse */ 297 300 parse_early_param(); 298 301
+2
arch/nds32/kernel/stacktrace.c
··· 9 9 { 10 10 save_stack_trace_tsk(current, trace); 11 11 } 12 + EXPORT_SYMBOL_GPL(save_stack_trace); 12 13 13 14 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 14 15 { ··· 46 45 fpn = (unsigned long *)fpp; 47 46 } 48 47 } 48 + EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+5 -5
arch/nds32/kernel/vdso.c
··· 23 23 #include <asm/vdso_timer_info.h> 24 24 #include <asm/cache_info.h> 25 25 extern struct cache_info L1_cache_info[2]; 26 - extern char vdso_start, vdso_end; 26 + extern char vdso_start[], vdso_end[]; 27 27 static unsigned long vdso_pages __ro_after_init; 28 28 static unsigned long timer_mapping_base; 29 29 ··· 66 66 int i; 67 67 struct page **vdso_pagelist; 68 68 69 - if (memcmp(&vdso_start, "\177ELF", 4)) { 69 + if (memcmp(vdso_start, "\177ELF", 4)) { 70 70 pr_err("vDSO is not a valid ELF object!\n"); 71 71 return -EINVAL; 72 72 } 73 73 /* Creat a timer io mapping to get clock cycles counter */ 74 74 get_timer_node_info(); 75 75 76 - vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; 76 + vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; 77 77 pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", 78 - vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); 78 + vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data); 79 79 80 80 /* Allocate the vDSO pagelist */ 81 81 vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL); ··· 83 83 return -ENOMEM; 84 84 85 85 for (i = 0; i < vdso_pages; i++) 86 - vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); 86 + vdso_pagelist[i] = virt_to_page(vdso_start + i * PAGE_SIZE); 87 87 vdso_spec[1].pages = &vdso_pagelist[0]; 88 88 89 89 return 0;
+3
arch/nds32/lib/copy_page.S
··· 2 2 // Copyright (C) 2005-2017 Andes Technology Corporation 3 3 4 4 #include <linux/linkage.h> 5 + #include <asm/export.h> 5 6 #include <asm/page.h> 6 7 7 8 .text ··· 17 16 popm $r2, $r10 18 17 ret 19 18 ENDPROC(copy_page) 19 + EXPORT_SYMBOL(copy_page) 20 20 21 21 ENTRY(clear_page) 22 22 pushm $r1, $r9 ··· 37 35 popm $r1, $r9 38 36 ret 39 37 ENDPROC(clear_page) 38 + EXPORT_SYMBOL(clear_page)
+6 -3
arch/nds32/mm/alignment.c
··· 19 19 #define RA(inst) (((inst) >> 15) & 0x1FUL) 20 20 #define RB(inst) (((inst) >> 10) & 0x1FUL) 21 21 #define SV(inst) (((inst) >> 8) & 0x3UL) 22 - #define IMM(inst) (((inst) >> 0) & 0x3FFFUL) 22 + #define IMM(inst) (((inst) >> 0) & 0x7FFFUL) 23 23 24 24 #define RA3(inst) (((inst) >> 3) & 0x7UL) 25 25 #define RT3(inst) (((inst) >> 6) & 0x7UL) ··· 27 27 28 28 #define RA5(inst) (((inst) >> 0) & 0x1FUL) 29 29 #define RT4(inst) (((inst) >> 5) & 0xFUL) 30 + 31 + #define GET_IMMSVAL(imm_value) \ 32 + (((imm_value >> 14) & 0x1) ? (imm_value - 0x8000) : imm_value) 30 33 31 34 #define __get8_data(val,addr,err) \ 32 35 __asm__( \ ··· 470 467 } 471 468 472 469 if (imm) 473 - shift = IMM(inst) * len; 470 + shift = GET_IMMSVAL(IMM(inst)) * len; 474 471 else 475 472 shift = *idx_to_addr(regs, RB(inst)) << SV(inst); 476 473 ··· 555 552 556 553 static struct ctl_table nds32_sysctl_table[2] = { 557 554 { 558 - .procname = "unaligned_acess", 555 + .procname = "unaligned_access", 559 556 .mode = 0555, 560 557 .child = alignment_tbl}, 561 558 {}
+60 -14
arch/nds32/mm/cacheflush.c
··· 147 147 cpu_icache_inval_all(); 148 148 } 149 149 150 + void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 151 + struct page *to) 152 + { 153 + cpu_dcache_wbinval_page((unsigned long)vaddr); 154 + cpu_icache_inval_page((unsigned long)vaddr); 155 + copy_page(vto, vfrom); 156 + cpu_dcache_wbinval_page((unsigned long)vto); 157 + cpu_icache_inval_page((unsigned long)vto); 158 + } 159 + 160 + void clear_user_page(void *addr, unsigned long vaddr, struct page *page) 161 + { 162 + cpu_dcache_wbinval_page((unsigned long)vaddr); 163 + cpu_icache_inval_page((unsigned long)vaddr); 164 + clear_page(addr); 165 + cpu_dcache_wbinval_page((unsigned long)addr); 166 + cpu_icache_inval_page((unsigned long)addr); 167 + } 168 + 150 169 void copy_user_highpage(struct page *to, struct page *from, 151 170 unsigned long vaddr, struct vm_area_struct *vma) 152 171 { ··· 175 156 pto = page_to_phys(to); 176 157 pfrom = page_to_phys(from); 177 158 159 + local_irq_save(flags); 178 160 if (aliasing(vaddr, (unsigned long)kfrom)) 179 161 cpu_dcache_wb_page((unsigned long)kfrom); 180 - if (aliasing(vaddr, (unsigned long)kto)) 181 - cpu_dcache_inval_page((unsigned long)kto); 182 - local_irq_save(flags); 183 162 vto = kremap0(vaddr, pto); 184 163 vfrom = kremap1(vaddr, pfrom); 185 164 copy_page((void *)vto, (void *)vfrom); ··· 215 198 if (mapping && !mapping_mapped(mapping)) 216 199 set_bit(PG_dcache_dirty, &page->flags); 217 200 else { 218 - int i, pc; 219 - unsigned long vto, kaddr, flags; 201 + unsigned long kaddr, flags; 202 + 220 203 kaddr = (unsigned long)page_address(page); 221 - cpu_dcache_wbinval_page(kaddr); 222 - pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE; 223 204 local_irq_save(flags); 224 - for (i = 0; i < pc; i++) { 225 - vto = 226 - kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page)); 227 - cpu_dcache_wbinval_page(vto); 228 - kunmap01(vto); 205 + cpu_dcache_wbinval_page(kaddr); 206 + if (mapping) { 207 + unsigned long vaddr, kto; 208 + 209 + vaddr = page->index << PAGE_SHIFT; 210 + if (aliasing(vaddr, kaddr)) { 211 + kto = kremap0(vaddr, page_to_phys(page)); 212 + cpu_dcache_wbinval_page(kto); 213 + kunmap01(kto); 214 + } 229 215 } 230 216 local_irq_restore(flags); 231 217 } 232 218 } 219 + EXPORT_SYMBOL(flush_dcache_page); 233 220 234 221 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 235 222 unsigned long vaddr, void *dst, void *src, int len) ··· 272 251 void flush_anon_page(struct vm_area_struct *vma, 273 252 struct page *page, unsigned long vaddr) 274 253 { 275 - unsigned long flags; 254 + unsigned long kaddr, flags, ktmp; 276 255 if (!PageAnon(page)) 277 256 return; 278 257 ··· 282 261 local_irq_save(flags); 283 262 if (vma->vm_flags & VM_EXEC) 284 263 cpu_icache_inval_page(vaddr & PAGE_MASK); 285 - cpu_dcache_wbinval_page((unsigned long)page_address(page)); 264 + kaddr = (unsigned long)page_address(page); 265 + if (aliasing(vaddr, kaddr)) { 266 + ktmp = kremap0(vaddr, page_to_phys(page)); 267 + cpu_dcache_wbinval_page(ktmp); 268 + kunmap01(ktmp); 269 + } 286 270 local_irq_restore(flags); 287 271 } 288 272 ··· 298 272 cpu_dcache_wbinval_page((unsigned long)page_address(page)); 299 273 local_irq_restore(flags); 300 274 } 275 + EXPORT_SYMBOL(flush_kernel_dcache_page); 276 + 277 + void flush_kernel_vmap_range(void *addr, int size) 278 + { 279 + unsigned long flags; 280 + local_irq_save(flags); 281 + cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr + size); 282 + local_irq_restore(flags); 283 + } 284 + EXPORT_SYMBOL(flush_kernel_vmap_range); 285 + 286 + void invalidate_kernel_vmap_range(void *addr, int size) 287 + { 288 + unsigned long flags; 289 + local_irq_save(flags); 290 + cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size); 291 + local_irq_restore(flags); 292 + } 293 + EXPORT_SYMBOL(invalidate_kernel_vmap_range); 301 294 302 295 void flush_icache_range(unsigned long start, unsigned long end) 303 296 { ··· 328 283 cpu_cache_wbinval_range(start, end, 1); 329 284 local_irq_restore(flags); 330 285 } 286 + EXPORT_SYMBOL(flush_icache_range); 331 287 332 288 void flush_icache_page(struct vm_area_struct *vma, struct page *page) 333 289 {
+1
arch/nds32/mm/init.c
··· 30 30 * zero-initialized data and COW. 31 31 */ 32 32 struct page *empty_zero_page; 33 + EXPORT_SYMBOL(empty_zero_page); 33 34 34 35 static void __init zone_sizes_init(void) 35 36 {
+1
arch/powerpc/include/asm/kvm_book3s.h
··· 96 96 struct kvm_vcpu *runner; 97 97 struct kvm *kvm; 98 98 u64 tb_offset; /* guest timebase - host timebase */ 99 + u64 tb_offset_applied; /* timebase offset currently in force */ 99 100 ulong lpcr; 100 101 u32 arch_compat; 101 102 ulong pcr;
+1
arch/powerpc/kernel/asm-offsets.c
··· 562 562 OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads); 563 563 OFFSET(VCORE_KVM, kvmppc_vcore, kvm); 564 564 OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset); 565 + OFFSET(VCORE_TB_OFFSET_APPL, kvmppc_vcore, tb_offset_applied); 565 566 OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr); 566 567 OFFSET(VCORE_PCR, kvmppc_vcore, pcr); 567 568 OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes);
+3 -3
arch/powerpc/kvm/book3s_64_mmu_radix.c
··· 162 162 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) 163 163 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) 164 164 : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); 165 - asm volatile("ptesync": : :"memory"); 165 + asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); 166 166 } 167 167 168 168 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) ··· 173 173 /* RIC=1 PRS=0 R=1 IS=2 */ 174 174 asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1) 175 175 : : "r" (rb), "r" (kvm->arch.lpid) : "memory"); 176 - asm volatile("ptesync": : :"memory"); 176 + asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); 177 177 } 178 178 179 179 unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, ··· 584 584 585 585 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); 586 586 if (ptep && pte_present(*ptep)) { 587 - old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0, 587 + old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0, 588 588 gpa, shift); 589 589 kvmppc_radix_tlbie_page(kvm, gpa, shift); 590 590 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
+1
arch/powerpc/kvm/book3s_hv.c
··· 2441 2441 vc->in_guest = 0; 2442 2442 vc->napping_threads = 0; 2443 2443 vc->conferring_threads = 0; 2444 + vc->tb_offset_applied = 0; 2444 2445 } 2445 2446 2446 2447 static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
+52 -45
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 692 692 22: ld r8,VCORE_TB_OFFSET(r5) 693 693 cmpdi r8,0 694 694 beq 37f 695 + std r8, VCORE_TB_OFFSET_APPL(r5) 695 696 mftb r6 /* current host timebase */ 696 697 add r8,r8,r6 697 698 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ ··· 941 940 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 942 941 8: 943 942 944 - /* 945 - * Set the decrementer to the guest decrementer. 946 - */ 947 - ld r8,VCPU_DEC_EXPIRES(r4) 948 - /* r8 is a host timebase value here, convert to guest TB */ 949 - ld r5,HSTATE_KVM_VCORE(r13) 950 - ld r6,VCORE_TB_OFFSET(r5) 951 - add r8,r8,r6 952 - mftb r7 953 - subf r3,r7,r8 954 - mtspr SPRN_DEC,r3 955 - 956 943 ld r5, VCPU_SPRG0(r4) 957 944 ld r6, VCPU_SPRG1(r4) 958 945 ld r7, VCPU_SPRG2(r4) ··· 993 1004 ld r8,VCORE_LPCR(r5) 994 1005 mtspr SPRN_LPCR,r8 995 1006 isync 1007 + 1008 + /* 1009 + * Set the decrementer to the guest decrementer. 1010 + */ 1011 + ld r8,VCPU_DEC_EXPIRES(r4) 1012 + /* r8 is a host timebase value here, convert to guest TB */ 1013 + ld r5,HSTATE_KVM_VCORE(r13) 1014 + ld r6,VCORE_TB_OFFSET_APPL(r5) 1015 + add r8,r8,r6 1016 + mftb r7 1017 + subf r3,r7,r8 1018 + mtspr SPRN_DEC,r3 996 1019 997 1020 /* Check if HDEC expires soon */ 998 1021 mfspr r3, SPRN_HDEC ··· 1598 1597 1599 1598 guest_bypass: 1600 1599 stw r12, STACK_SLOT_TRAP(r1) 1601 - mr r3, r12 1600 + 1601 + /* Save DEC */ 1602 + /* Do this before kvmhv_commence_exit so we know TB is guest TB */ 1603 + ld r3, HSTATE_KVM_VCORE(r13) 1604 + mfspr r5,SPRN_DEC 1605 + mftb r6 1606 + /* On P9, if the guest has large decr enabled, don't sign extend */ 1607 + BEGIN_FTR_SECTION 1608 + ld r4, VCORE_LPCR(r3) 1609 + andis. r4, r4, LPCR_LD@h 1610 + bne 16f 1611 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1612 + extsw r5,r5 1613 + 16: add r5,r5,r6 1614 + /* r5 is a guest timebase value here, convert to host TB */ 1615 + ld r4,VCORE_TB_OFFSET_APPL(r3) 1616 + subf r5,r4,r5 1617 + std r5,VCPU_DEC_EXPIRES(r9) 1618 + 1602 1619 /* Increment exit count, poke other threads to exit */ 1620 + mr r3, r12 1603 1621 bl kvmhv_commence_exit 1604 1622 nop 1605 1623 ld r9, HSTATE_KVM_VCPU(r13) ··· 1658 1638 add r4,r4,r6 1659 1639 mtspr SPRN_PURR,r3 1660 1640 mtspr SPRN_SPURR,r4 1661 - 1662 - /* Save DEC */ 1663 - ld r3, HSTATE_KVM_VCORE(r13) 1664 - mfspr r5,SPRN_DEC 1665 - mftb r6 1666 - /* On P9, if the guest has large decr enabled, don't sign extend */ 1667 - BEGIN_FTR_SECTION 1668 - ld r4, VCORE_LPCR(r3) 1669 - andis. r4, r4, LPCR_LD@h 1670 - bne 16f 1671 - END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1672 - extsw r5,r5 1673 - 16: add r5,r5,r6 1674 - /* r5 is a guest timebase value here, convert to host TB */ 1675 - ld r4,VCORE_TB_OFFSET(r3) 1676 - subf r5,r4,r5 1677 - std r5,VCPU_DEC_EXPIRES(r9) 1678 1641 1679 1642 BEGIN_FTR_SECTION 1680 1643 b 8f ··· 1908 1905 cmpwi cr2, r0, 0 1909 1906 beq cr2, 4f 1910 1907 1908 + /* 1909 + * Radix: do eieio; tlbsync; ptesync sequence in case we 1910 + * interrupted the guest between a tlbie and a ptesync. 1911 + */ 1912 + eieio 1913 + tlbsync 1914 + ptesync 1915 + 1911 1916 /* Radix: Handle the case where the guest used an illegal PID */ 1912 1917 LOAD_REG_ADDR(r4, mmu_base_pid) 1913 1918 lwz r3, VCPU_GUEST_PID(r9) ··· 2028 2017 2029 2018 27: 2030 2019 /* Subtract timebase offset from timebase */ 2031 - ld r8,VCORE_TB_OFFSET(r5) 2020 + ld r8, VCORE_TB_OFFSET_APPL(r5) 2032 2021 cmpdi r8,0 2033 2022 beq 17f 2023 + li r0, 0 2024 + std r0, VCORE_TB_OFFSET_APPL(r5) 2034 2025 mftb r6 /* current guest timebase */ 2035 2026 subf r8,r8,r6 2036 2027 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ ··· 2713 2700 add r3, r3, r5 2714 2701 ld r4, HSTATE_KVM_VCPU(r13) 2715 2702 ld r5, HSTATE_KVM_VCORE(r13) 2716 - ld r6, VCORE_TB_OFFSET(r5) 2703 + ld r6, VCORE_TB_OFFSET_APPL(r5) 2717 2704 subf r3, r6, r3 /* convert to host TB value */ 2718 2705 std r3, VCPU_DEC_EXPIRES(r4) 2719 2706 ··· 2812 2799 /* Restore guest decrementer */ 2813 2800 ld r3, VCPU_DEC_EXPIRES(r4) 2814 2801 ld r5, HSTATE_KVM_VCORE(r13) 2815 - ld r6, VCORE_TB_OFFSET(r5) 2802 + ld r6, VCORE_TB_OFFSET_APPL(r5) 2816 2803 add r3, r3, r6 /* convert host TB to guest TB value */ 2817 2804 mftb r7 2818 2805 subf r3, r7, r3 ··· 3619 3606 */ 3620 3607 kvmhv_start_timing: 3621 3608 ld r5, HSTATE_KVM_VCORE(r13) 3622 - lbz r6, VCORE_IN_GUEST(r5) 3623 - cmpwi r6, 0 3624 - beq 5f /* if in guest, need to */ 3625 - ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ 3626 - 5: mftb r5 3627 - subf r5, r6, r5 3609 + ld r6, VCORE_TB_OFFSET_APPL(r5) 3610 + mftb r5 3611 + subf r5, r6, r5 /* subtract current timebase offset */ 3628 3612 std r3, VCPU_CUR_ACTIVITY(r4) 3629 3613 std r5, VCPU_ACTIVITY_START(r4) 3630 3614 blr ··· 3632 3622 */ 3633 3623 kvmhv_accumulate_time: 3634 3624 ld r5, HSTATE_KVM_VCORE(r13) 3635 - lbz r8, VCORE_IN_GUEST(r5) 3636 - cmpwi r8, 0 3637 - beq 4f /* if in guest, need to */ 3638 - ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ 3639 - 4: ld r5, VCPU_CUR_ACTIVITY(r4) 3625 + ld r8, VCORE_TB_OFFSET_APPL(r5) 3626 + ld r5, VCPU_CUR_ACTIVITY(r4) 3640 3627 ld r6, VCPU_ACTIVITY_START(r4) 3641 3628 std r3, VCPU_CUR_ACTIVITY(r4) 3642 3629 mftb r7 3643 - subf r7, r8, r7 3630 + subf r7, r8, r7 /* subtract current timebase offset */ 3644 3631 std r7, VCPU_ACTIVITY_START(r4) 3645 3632 cmpdi r5, 0 3646 3633 beqlr
+101 -7
arch/powerpc/kvm/book3s_xive_template.c
··· 11 11 #define XGLUE(a,b) a##b 12 12 #define GLUE(a,b) XGLUE(a,b) 13 13 14 + /* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */ 15 + #define XICS_DUMMY 1 16 + 14 17 static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) 15 18 { 16 19 u8 cppr; ··· 208 205 goto skip_ipi; 209 206 } 210 207 208 + /* If it's the dummy interrupt, continue searching */ 209 + if (hirq == XICS_DUMMY) 210 + goto skip_ipi; 211 + 211 212 /* If fetching, update queue pointers */ 212 213 if (scan_type == scan_fetch) { 213 214 q->idx = idx; ··· 392 385 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING); 393 386 } 394 387 388 + static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive, 389 + struct kvmppc_xive_vcpu *xc) 390 + { 391 + unsigned int prio; 392 + 393 + /* For each priority that is now masked */ 394 + for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { 395 + struct xive_q *q = &xc->queues[prio]; 396 + struct kvmppc_xive_irq_state *state; 397 + struct kvmppc_xive_src_block *sb; 398 + u32 idx, toggle, entry, irq, hw_num; 399 + struct xive_irq_data *xd; 400 + __be32 *qpage; 401 + u16 src; 402 + 403 + idx = q->idx; 404 + toggle = q->toggle; 405 + qpage = READ_ONCE(q->qpage); 406 + if (!qpage) 407 + continue; 408 + 409 + /* For each interrupt in the queue */ 410 + for (;;) { 411 + entry = be32_to_cpup(qpage + idx); 412 + 413 + /* No more ? */ 414 + if ((entry >> 31) == toggle) 415 + break; 416 + irq = entry & 0x7fffffff; 417 + 418 + /* Skip dummies and IPIs */ 419 + if (irq == XICS_DUMMY || irq == XICS_IPI) 420 + goto next; 421 + sb = kvmppc_xive_find_source(xive, irq, &src); 422 + if (!sb) 423 + goto next; 424 + state = &sb->irq_state[src]; 425 + 426 + /* Has it been rerouted ? */ 427 + if (xc->server_num == state->act_server) 428 + goto next; 429 + 430 + /* 431 + * Allright, it *has* been re-routed, kill it from 432 + * the queue. 433 + */ 434 + qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY); 435 + 436 + /* Find the HW interrupt */ 437 + kvmppc_xive_select_irq(state, &hw_num, &xd); 438 + 439 + /* If it's not an LSI, set PQ to 11 the EOI will force a resend */ 440 + if (!(xd->flags & XIVE_IRQ_FLAG_LSI)) 441 + GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11); 442 + 443 + /* EOI the source */ 444 + GLUE(X_PFX,source_eoi)(hw_num, xd); 445 + 446 + next: 447 + idx = (idx + 1) & q->msk; 448 + if (idx == 0) 449 + toggle ^= 1; 450 + } 451 + } 452 + } 453 + 395 454 X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) 396 455 { 397 456 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 457 + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; 398 458 u8 old_cppr; 399 459 400 460 pr_devel("H_CPPR(cppr=%ld)\n", cppr); ··· 481 407 */ 482 408 smp_mb(); 483 409 484 - /* 485 - * We are masking less, we need to look for pending things 486 - * to deliver and set VP pending bits accordingly to trigger 487 - * a new interrupt otherwise we might miss MFRR changes for 488 - * which we have optimized out sending an IPI signal. 489 - */ 490 - if (cppr > old_cppr) 410 + if (cppr > old_cppr) { 411 + /* 412 + * We are masking less, we need to look for pending things 413 + * to deliver and set VP pending bits accordingly to trigger 414 + * a new interrupt otherwise we might miss MFRR changes for 415 + * which we have optimized out sending an IPI signal. 416 + */ 491 417 GLUE(X_PFX,push_pending_to_hw)(xc); 418 + } else { 419 + /* 420 + * We are masking more, we need to check the queue for any 421 + * interrupt that has been routed to another CPU, take 422 + * it out (replace it with the dummy) and retrigger it. 423 + * 424 + * This is necessary since those interrupts may otherwise 425 + * never be processed, at least not until this CPU restores 426 + * its CPPR. 427 + * 428 + * This is in theory racy vs. HW adding new interrupts to 429 + * the queue. In practice this works because the interesting 430 + * cases are when the guest has done a set_xive() to move the 431 + * interrupt away, which flushes the xive, followed by the 432 + * target CPU doing a H_CPPR. So any new interrupt coming into 433 + * the queue must still be routed to us and isn't a source 434 + * of concern. 435 + */ 436 + GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc); 437 + } 492 438 493 439 /* Apply new CPPR */ 494 440 xc->hw_cppr = cppr;
+1 -1
arch/s390/kvm/vsie.c
··· 578 578 579 579 gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; 580 580 if (gpa && (scb_s->ecb & ECB_TE)) { 581 - if (!(gpa & ~0x1fffU)) { 581 + if (!(gpa & ~0x1fffUL)) { 582 582 rc = set_validity_icpt(scb_s, 0x0080U); 583 583 goto unpin; 584 584 }
+1 -1
arch/s390/purgatory/Makefile
··· 21 21 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes 22 22 KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare 23 23 KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding 24 - KBUILD_CFLAGS += -c -MD -Os -m64 24 + KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float 25 25 KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 26 26 27 27 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+7 -15
arch/x86/kernel/cpu/common.c
··· 942 942 {} 943 943 }; 944 944 945 + /* Only list CPUs which speculate but are non susceptible to SSB */ 945 946 static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { 946 - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW }, 947 - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT }, 948 - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL }, 949 - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW }, 950 - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW }, 951 947 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, 952 948 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, 953 949 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, ··· 951 955 { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, 952 956 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, 953 957 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, 954 - { X86_VENDOR_CENTAUR, 5, }, 955 - { X86_VENDOR_INTEL, 5, }, 956 - { X86_VENDOR_NSC, 5, }, 957 958 { X86_VENDOR_AMD, 0x12, }, 958 959 { X86_VENDOR_AMD, 0x11, }, 959 960 { X86_VENDOR_AMD, 0x10, }, 960 961 { X86_VENDOR_AMD, 0xf, }, 961 - { X86_VENDOR_ANY, 4, }, 962 962 {} 963 963 }; 964 964 ··· 962 970 { 963 971 u64 ia32_cap = 0; 964 972 973 + if (x86_match_cpu(cpu_no_speculation)) 974 + return; 975 + 976 + setup_force_cpu_bug(X86_BUG_SPECTRE_V1); 977 + setup_force_cpu_bug(X86_BUG_SPECTRE_V2); 978 + 965 979 if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) 966 980 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); 967 981 968 982 if (!x86_match_cpu(cpu_no_spec_store_bypass) && 969 983 !(ia32_cap & ARCH_CAP_SSB_NO)) 970 984 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); 971 - 972 - if (x86_match_cpu(cpu_no_speculation)) 973 - return; 974 - 975 - setup_force_cpu_bug(X86_BUG_SPECTRE_V1); 976 - setup_force_cpu_bug(X86_BUG_SPECTRE_V2); 977 985 978 986 if (x86_match_cpu(cpu_no_meltdown)) 979 987 return;
+7 -2
arch/x86/kvm/cpuid.c
··· 407 407 408 408 /* cpuid 7.0.edx*/ 409 409 const u32 kvm_cpuid_7_0_edx_x86_features = 410 - F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) | 411 - F(ARCH_CAPABILITIES); 410 + F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | 411 + F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES); 412 412 413 413 /* all calls to cpuid_count() should be made on the same cpu */ 414 414 get_cpu(); ··· 495 495 entry->ecx &= ~F(PKU); 496 496 entry->edx &= kvm_cpuid_7_0_edx_x86_features; 497 497 cpuid_mask(&entry->edx, CPUID_7_EDX); 498 + /* 499 + * We emulate ARCH_CAPABILITIES in software even 500 + * if the host doesn't support it. 501 + */ 502 + entry->edx |= F(ARCH_CAPABILITIES); 498 503 } else { 499 504 entry->ebx = 0; 500 505 entry->ecx = 0;
+11 -8
arch/x86/kvm/hyperv.c
··· 1260 1260 } 1261 1261 } 1262 1262 1263 + static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result) 1264 + { 1265 + kvm_hv_hypercall_set_result(vcpu, result); 1266 + ++vcpu->stat.hypercalls; 1267 + return kvm_skip_emulated_instruction(vcpu); 1268 + } 1269 + 1263 1270 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) 1264 1271 { 1265 - struct kvm_run *run = vcpu->run; 1266 - 1267 - kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); 1268 - return kvm_skip_emulated_instruction(vcpu); 1272 + return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result); 1269 1273 } 1270 1274 1271 1275 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) ··· 1354 1350 /* Hypercall continuation is not supported yet */ 1355 1351 if (rep_cnt || rep_idx) { 1356 1352 ret = HV_STATUS_INVALID_HYPERCALL_CODE; 1357 - goto set_result; 1353 + goto out; 1358 1354 } 1359 1355 1360 1356 switch (code) { ··· 1385 1381 break; 1386 1382 } 1387 1383 1388 - set_result: 1389 - kvm_hv_hypercall_set_result(vcpu, ret); 1390 - return 1; 1384 + out: 1385 + return kvm_hv_hypercall_complete(vcpu, ret); 1391 1386 } 1392 1387 1393 1388 void kvm_hv_init_vm(struct kvm *kvm)
+14 -2
arch/x86/kvm/lapic.c
··· 1522 1522 1523 1523 static void advance_periodic_target_expiration(struct kvm_lapic *apic) 1524 1524 { 1525 - apic->lapic_timer.tscdeadline += 1526 - nsec_to_cycles(apic->vcpu, apic->lapic_timer.period); 1525 + ktime_t now = ktime_get(); 1526 + u64 tscl = rdtsc(); 1527 + ktime_t delta; 1528 + 1529 + /* 1530 + * Synchronize both deadlines to the same time source or 1531 + * differences in the periods (caused by differences in the 1532 + * underlying clocks or numerical approximation errors) will 1533 + * cause the two to drift apart over time as the errors 1534 + * accumulate. 1535 + */ 1527 1536 apic->lapic_timer.target_expiration = 1528 1537 ktime_add_ns(apic->lapic_timer.target_expiration, 1529 1538 apic->lapic_timer.period); 1539 + delta = ktime_sub(apic->lapic_timer.target_expiration, now); 1540 + apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) + 1541 + nsec_to_cycles(apic->vcpu, delta); 1530 1542 } 1531 1543 1532 1544 static void start_sw_period(struct kvm_lapic *apic)
+8 -9
arch/x86/kvm/x86.c
··· 6671 6671 unsigned long nr, a0, a1, a2, a3, ret; 6672 6672 int op_64_bit; 6673 6673 6674 - if (kvm_hv_hypercall_enabled(vcpu->kvm)) { 6675 - if (!kvm_hv_hypercall(vcpu)) 6676 - return 0; 6677 - goto out; 6678 - } 6674 + if (kvm_hv_hypercall_enabled(vcpu->kvm)) 6675 + return kvm_hv_hypercall(vcpu); 6679 6676 6680 6677 nr = kvm_register_read(vcpu, VCPU_REGS_RAX); 6681 6678 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); ··· 6693 6696 6694 6697 if (kvm_x86_ops->get_cpl(vcpu) != 0) { 6695 6698 ret = -KVM_EPERM; 6696 - goto out_error; 6699 + goto out; 6697 6700 } 6698 6701 6699 6702 switch (nr) { ··· 6713 6716 ret = -KVM_ENOSYS; 6714 6717 break; 6715 6718 } 6716 - out_error: 6719 + out: 6717 6720 if (!op_64_bit) 6718 6721 ret = (u32)ret; 6719 6722 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 6720 6723 6721 - out: 6722 6724 ++vcpu->stat.hypercalls; 6723 6725 return kvm_skip_emulated_instruction(vcpu); 6724 6726 } ··· 7976 7980 { 7977 7981 struct msr_data apic_base_msr; 7978 7982 int mmu_reset_needed = 0; 7983 + int cpuid_update_needed = 0; 7979 7984 int pending_vec, max_bits, idx; 7980 7985 struct desc_ptr dt; 7981 7986 int ret = -EINVAL; ··· 8015 8018 vcpu->arch.cr0 = sregs->cr0; 8016 8019 8017 8020 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 8021 + cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & 8022 + (X86_CR4_OSXSAVE | X86_CR4_PKE)); 8018 8023 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 8019 - if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE)) 8024 + if (cpuid_update_needed) 8020 8025 kvm_update_cpuid(vcpu); 8021 8026 8022 8027 idx = srcu_read_lock(&vcpu->kvm->srcu);
+2 -2
drivers/atm/zatm.c
··· 1151 1151 } 1152 1152 1153 1153 1154 - static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd, 1155 - int offset, int swap) 1154 + static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset, 1155 + int swap) 1156 1156 { 1157 1157 unsigned char buf[ZEPROM_SIZE]; 1158 1158 struct zatm_dev *zatm_dev;
+2 -2
drivers/crypto/inside-secure/safexcel.c
··· 152 152 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; 153 153 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); 154 154 155 - memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0, 156 - EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); 155 + memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0, 156 + EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); 157 157 158 158 eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL, 159 159 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+4 -4
drivers/firmware/qcom_scm-32.c
··· 147 147 "smc #0 @ switch to secure world\n" 148 148 : "=r" (r0) 149 149 : "r" (r0), "r" (r1), "r" (r2) 150 - : "r3"); 150 + : "r3", "r12"); 151 151 } while (r0 == QCOM_SCM_INTERRUPTED); 152 152 153 153 return r0; ··· 263 263 "smc #0 @ switch to secure world\n" 264 264 : "=r" (r0) 265 265 : "r" (r0), "r" (r1), "r" (r2) 266 - : "r3"); 266 + : "r3", "r12"); 267 267 return r0; 268 268 } 269 269 ··· 298 298 "smc #0 @ switch to secure world\n" 299 299 : "=r" (r0) 300 300 : "r" (r0), "r" (r1), "r" (r2), "r" (r3) 301 - ); 301 + : "r12"); 302 302 return r0; 303 303 } 304 304 ··· 328 328 "smc #0 @ switch to secure world\n" 329 329 : "=r" (r0), "=r" (r1) 330 330 : "r" (r0), "r" (r1) 331 - : "r2", "r3"); 331 + : "r2", "r3", "r12"); 332 332 } while (r0 == QCOM_SCM_INTERRUPTED); 333 333 334 334 version = r1;
+29 -15
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 4555 4555 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4556 4556 struct amdgpu_crtc *acrtc = NULL; 4557 4557 struct amdgpu_dm_connector *aconnector = NULL; 4558 - struct drm_connector_state *new_con_state = NULL; 4559 - struct dm_connector_state *dm_conn_state = NULL; 4558 + struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 4559 + struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 4560 4560 struct drm_plane_state *new_plane_state = NULL; 4561 4561 4562 4562 new_stream = NULL; ··· 4577 4577 /* TODO This hack should go away */ 4578 4578 if (aconnector && enable) { 4579 4579 // Make sure fake sink is created in plug-in scenario 4580 - new_con_state = drm_atomic_get_connector_state(state, 4580 + drm_new_conn_state = drm_atomic_get_new_connector_state(state, 4581 4581 &aconnector->base); 4582 + drm_old_conn_state = drm_atomic_get_old_connector_state(state, 4583 + &aconnector->base); 4582 4584 4583 - if (IS_ERR(new_con_state)) { 4584 - ret = PTR_ERR_OR_ZERO(new_con_state); 4585 + 4586 + if (IS_ERR(drm_new_conn_state)) { 4587 + ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 4585 4588 break; 4586 4589 } 4587 4590 4588 - dm_conn_state = to_dm_connector_state(new_con_state); 4591 + dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 4592 + dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 4589 4593 4590 4594 new_stream = create_stream_for_sink(aconnector, 4591 4595 &new_crtc_state->mode, 4592 - dm_conn_state); 4596 + dm_new_conn_state); 4593 4597 4594 4598 /* 4595 4599 * we can have no stream on ACTION_SET if a display ··· 4699 4695 * We want to do dc stream updates that do not require a 4700 4696 * full modeset below. 4701 4697 */ 4702 - if (!enable || !aconnector || modereset_required(new_crtc_state)) 4698 + if (!(enable && aconnector && new_crtc_state->enable && 4699 + new_crtc_state->active)) 4703 4700 continue; 4704 4701 /* 4705 4702 * Given above conditions, the dc state cannot be NULL because: 4706 - * 1. We're attempting to enable a CRTC. Which has a... 4707 - * 2. Valid connector attached, and 4708 - * 3. User does not want to reset it (disable or mark inactive, 4709 - * which can happen on a CRTC that's already disabled). 4710 - * => It currently exists. 4703 + * 1. We're in the process of enabling CRTCs (just been added 4704 + * to the dc context, or already is on the context) 4705 + * 2. Has a valid connector attached, and 4706 + * 3. Is currently active and enabled. 4707 + * => The dc stream state currently exists. 4711 4708 */ 4712 4709 BUG_ON(dm_new_crtc_state->stream == NULL); 4713 4710 4714 - /* Color managment settings */ 4715 - if (dm_new_crtc_state->base.color_mgmt_changed) { 4711 + /* Scaling or underscan settings */ 4712 + if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state)) 4713 + update_stream_scaling_settings( 4714 + &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 4715 + 4716 + /* 4717 + * Color management settings. We also update color properties 4718 + * when a modeset is needed, to ensure it gets reprogrammed. 4719 + */ 4720 + if (dm_new_crtc_state->base.color_mgmt_changed || 4721 + drm_atomic_crtc_needs_modeset(new_crtc_state)) { 4716 4722 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state); 4717 4723 if (ret) 4718 4724 goto fail;
+4 -11
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
··· 2077 2077 return ret; 2078 2078 } 2079 2079 2080 - void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense) 2080 + void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense) 2081 2081 { 2082 2082 mutex_lock(&hdmi->mutex); 2083 2083 ··· 2102 2102 dw_hdmi_update_phy_mask(hdmi); 2103 2103 } 2104 2104 mutex_unlock(&hdmi->mutex); 2105 - } 2106 - 2107 - void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense) 2108 - { 2109 - struct dw_hdmi *hdmi = dev_get_drvdata(dev); 2110 - 2111 - __dw_hdmi_setup_rx_sense(hdmi, hpd, rx_sense); 2112 2105 } 2113 2106 EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense); 2114 2107 ··· 2138 2145 */ 2139 2146 if (intr_stat & 2140 2147 (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) { 2141 - __dw_hdmi_setup_rx_sense(hdmi, 2142 - phy_stat & HDMI_PHY_HPD, 2143 - phy_stat & HDMI_PHY_RX_SENSE); 2148 + dw_hdmi_setup_rx_sense(hdmi, 2149 + phy_stat & HDMI_PHY_HPD, 2150 + phy_stat & HDMI_PHY_RX_SENSE); 2144 2151 2145 2152 if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0) 2146 2153 cec_notifier_set_phys_addr(hdmi->cec_notifier,
+1
drivers/gpu/drm/drm_dp_helper.c
··· 1145 1145 static const u16 psr_setup_time_us[] = { 1146 1146 PSR_SETUP_TIME(330), 1147 1147 PSR_SETUP_TIME(275), 1148 + PSR_SETUP_TIME(220), 1148 1149 PSR_SETUP_TIME(165), 1149 1150 PSR_SETUP_TIME(110), 1150 1151 PSR_SETUP_TIME(55),
+11 -4
drivers/gpu/drm/i915/i915_query.c
··· 4 4 * Copyright © 2018 Intel Corporation 5 5 */ 6 6 7 + #include <linux/nospec.h> 8 + 7 9 #include "i915_drv.h" 8 10 #include "i915_query.h" 9 11 #include <uapi/drm/i915_drm.h> ··· 102 100 103 101 for (i = 0; i < args->num_items; i++, user_item_ptr++) { 104 102 struct drm_i915_query_item item; 105 - u64 func_idx; 103 + unsigned long func_idx; 106 104 int ret; 107 105 108 106 if (copy_from_user(&item, user_item_ptr, sizeof(item))) ··· 111 109 if (item.query_id == 0) 112 110 return -EINVAL; 113 111 112 + if (overflows_type(item.query_id - 1, unsigned long)) 113 + return -EINVAL; 114 + 114 115 func_idx = item.query_id - 1; 115 116 116 - if (func_idx < ARRAY_SIZE(i915_query_funcs)) 117 + ret = -EINVAL; 118 + if (func_idx < ARRAY_SIZE(i915_query_funcs)) { 119 + func_idx = array_index_nospec(func_idx, 120 + ARRAY_SIZE(i915_query_funcs)); 117 121 ret = i915_query_funcs[func_idx](dev_priv, &item); 118 - else 119 - ret = -EINVAL; 122 + } 120 123 121 124 /* Only write the length back to userspace if they differ. */ 122 125 if (ret != item.length && put_user(ret, &user_item_ptr->length))
+40 -11
drivers/gpu/drm/i915/intel_lvds.c
··· 574 574 return NOTIFY_OK; 575 575 } 576 576 577 + static int 578 + intel_lvds_connector_register(struct drm_connector *connector) 579 + { 580 + struct intel_lvds_connector *lvds = to_lvds_connector(connector); 581 + int ret; 582 + 583 + ret = intel_connector_register(connector); 584 + if (ret) 585 + return ret; 586 + 587 + lvds->lid_notifier.notifier_call = intel_lid_notify; 588 + if (acpi_lid_notifier_register(&lvds->lid_notifier)) { 589 + DRM_DEBUG_KMS("lid notifier registration failed\n"); 590 + lvds->lid_notifier.notifier_call = NULL; 591 + } 592 + 593 + return 0; 594 + } 595 + 596 + static void 597 + intel_lvds_connector_unregister(struct drm_connector *connector) 598 + { 599 + struct intel_lvds_connector *lvds = to_lvds_connector(connector); 600 + 601 + if (lvds->lid_notifier.notifier_call) 602 + acpi_lid_notifier_unregister(&lvds->lid_notifier); 603 + 604 + intel_connector_unregister(connector); 605 + } 606 + 577 607 /** 578 608 * intel_lvds_destroy - unregister and free LVDS structures 579 609 * @connector: connector to free ··· 615 585 { 616 586 struct intel_lvds_connector *lvds_connector = 617 587 to_lvds_connector(connector); 618 - 619 - if (lvds_connector->lid_notifier.notifier_call) 620 - acpi_lid_notifier_unregister(&lvds_connector->lid_notifier); 621 588 622 589 if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) 623 590 kfree(lvds_connector->base.edid); ··· 636 609 .fill_modes = drm_helper_probe_single_connector_modes, 637 610 .atomic_get_property = intel_digital_connector_atomic_get_property, 638 611 .atomic_set_property = intel_digital_connector_atomic_set_property, 639 - .late_register = intel_connector_register, 640 - .early_unregister = intel_connector_unregister, 612 + .late_register = intel_lvds_connector_register, 613 + .early_unregister = intel_lvds_connector_unregister, 641 614 .destroy = intel_lvds_destroy, 642 615 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 643 616 .atomic_duplicate_state = intel_digital_connector_duplicate_state, ··· 852 825 .matches = { 853 826 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), 854 827 DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"), 828 + }, 829 + }, 830 + { 831 + .callback = intel_no_lvds_dmi_callback, 832 + .ident = "Radiant P845", 833 + .matches = { 834 + DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"), 835 + DMI_MATCH(DMI_PRODUCT_NAME, "P845"), 855 836 }, 856 837 }, 857 838 ··· 1184 1149 lvds_encoder->is_dual_link ? "dual" : "single"); 1185 1150 1186 1151 lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; 1187 - 1188 - lvds_connector->lid_notifier.notifier_call = intel_lid_notify; 1189 - if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { 1190 - DRM_DEBUG_KMS("lid notifier registration failed\n"); 1191 - lvds_connector->lid_notifier.notifier_call = NULL; 1192 - } 1193 1152 1194 1153 return; 1195 1154
+1 -1
drivers/gpu/drm/meson/meson_dw_hdmi.c
··· 529 529 if (stat & HDMITX_TOP_INTR_HPD_RISE) 530 530 hpd_connected = true; 531 531 532 - dw_hdmi_setup_rx_sense(dw_hdmi->dev, hpd_connected, 532 + dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected, 533 533 hpd_connected); 534 534 535 535 drm_helper_hpd_irq_event(dw_hdmi->encoder.dev);
+4 -1
drivers/gpu/drm/omapdrm/dss/sdi.c
··· 82 82 struct dispc_clock_info *dispc_cinfo) 83 83 { 84 84 int i; 85 - struct sdi_clk_calc_ctx ctx = { .sdi = sdi }; 85 + struct sdi_clk_calc_ctx ctx; 86 86 87 87 /* 88 88 * DSS fclk gives us very few possibilities, so finding a good pixel ··· 95 95 bool ok; 96 96 97 97 memset(&ctx, 0, sizeof(ctx)); 98 + 99 + ctx.sdi = sdi; 100 + 98 101 if (pclk > 1000 * i * i * i) 99 102 ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu); 100 103 else
+3 -3
drivers/hwtracing/intel_th/msu.c
··· 733 733 /* Reset the page to write-back before releasing */ 734 734 set_memory_wb((unsigned long)win->block[i].bdesc, 1); 735 735 #endif 736 - dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc, 737 - win->block[i].addr); 736 + dma_free_coherent(msc_dev(msc)->parent->parent, size, 737 + win->block[i].bdesc, win->block[i].addr); 738 738 } 739 739 kfree(win); 740 740 ··· 769 769 /* Reset the page to write-back before releasing */ 770 770 set_memory_wb((unsigned long)win->block[i].bdesc, 1); 771 771 #endif 772 - dma_free_coherent(msc_dev(win->msc), PAGE_SIZE, 772 + dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 773 773 win->block[i].bdesc, win->block[i].addr); 774 774 } 775 775
+4 -3
drivers/hwtracing/stm/core.c
··· 19 19 #include <linux/stm.h> 20 20 #include <linux/fs.h> 21 21 #include <linux/mm.h> 22 + #include <linux/vmalloc.h> 22 23 #include "stm.h" 23 24 24 25 #include <uapi/linux/stm.h> ··· 675 674 { 676 675 struct stm_device *stm = to_stm_device(dev); 677 676 678 - kfree(stm); 677 + vfree(stm); 679 678 } 680 679 681 680 int stm_register_device(struct device *parent, struct stm_data *stm_data, ··· 692 691 return -EINVAL; 693 692 694 693 nmasters = stm_data->sw_end - stm_data->sw_start + 1; 695 - stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL); 694 + stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *)); 696 695 if (!stm) 697 696 return -ENOMEM; 698 697 ··· 745 744 /* matches device_initialize() above */ 746 745 put_device(&stm->dev); 747 746 err_free: 748 - kfree(stm); 747 + vfree(stm); 749 748 750 749 return err; 751 750 }
+1 -1
drivers/i2c/busses/i2c-ocores.c
··· 1 1 /* 2 2 * i2c-ocores.c: I2C bus driver for OpenCores I2C controller 3 - * (http://www.opencores.org/projects.cgi/web/i2c/overview). 3 + * (https://opencores.org/project/i2c/overview) 4 4 * 5 5 * Peter Korsgaard <jacmet@sunsite.dk> 6 6 *
+1
drivers/iio/adc/Kconfig
··· 158 158 depends on ARCH_AT91 || COMPILE_TEST 159 159 depends on HAS_IOMEM 160 160 depends on HAS_DMA 161 + select IIO_BUFFER 161 162 select IIO_TRIGGERED_BUFFER 162 163 help 163 164 Say yes here to build support for Atmel SAMA5D2 ADC which is
+24 -51
drivers/iio/adc/ad7793.c
··· 348 348 static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0, 349 349 33, 0, 17, 16, 12, 10, 8, 6, 4}; 350 350 351 - static ssize_t ad7793_read_frequency(struct device *dev, 352 - struct device_attribute *attr, 353 - char *buf) 354 - { 355 - struct iio_dev *indio_dev = dev_to_iio_dev(dev); 356 - struct ad7793_state *st = iio_priv(indio_dev); 357 - 358 - return sprintf(buf, "%d\n", 359 - st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]); 360 - } 361 - 362 - static ssize_t ad7793_write_frequency(struct device *dev, 363 - struct device_attribute *attr, 364 - const char *buf, 365 - size_t len) 366 - { 367 - struct iio_dev *indio_dev = dev_to_iio_dev(dev); 368 - struct ad7793_state *st = iio_priv(indio_dev); 369 - long lval; 370 - int i, ret; 371 - 372 - ret = kstrtol(buf, 10, &lval); 373 - if (ret) 374 - return ret; 375 - 376 - if (lval == 0) 377 - return -EINVAL; 378 - 379 - for (i = 0; i < 16; i++) 380 - if (lval == st->chip_info->sample_freq_avail[i]) 381 - break; 382 - if (i == 16) 383 - return -EINVAL; 384 - 385 - ret = iio_device_claim_direct_mode(indio_dev); 386 - if (ret) 387 - return ret; 388 - st->mode &= ~AD7793_MODE_RATE(-1); 389 - st->mode |= AD7793_MODE_RATE(i); 390 - ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode); 391 - iio_device_release_direct_mode(indio_dev); 392 - 393 - return len; 394 - } 395 - 396 - static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, 397 - ad7793_read_frequency, 398 - ad7793_write_frequency); 399 - 400 351 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( 401 352 "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4"); 402 353 ··· 375 424 ad7793_show_scale_available, NULL, 0); 376 425 377 426 static struct attribute *ad7793_attributes[] = { 378 - &iio_dev_attr_sampling_frequency.dev_attr.attr, 379 427 &iio_const_attr_sampling_frequency_available.dev_attr.attr, 380 428 &iio_dev_attr_in_m_in_scale_available.dev_attr.attr, 381 429 NULL ··· 385 435 }; 386 436 387 437 static struct attribute *ad7797_attributes[] = { 388 - &iio_dev_attr_sampling_frequency.dev_attr.attr, 389 438 &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr, 390 439 NULL 391 440 }; ··· 454 505 *val -= offset; 455 506 } 456 507 return IIO_VAL_INT; 508 + case IIO_CHAN_INFO_SAMP_FREQ: 509 + *val = st->chip_info 510 + ->sample_freq_avail[AD7793_MODE_RATE(st->mode)]; 511 + return IIO_VAL_INT; 457 512 } 458 513 return -EINVAL; 459 514 } ··· 494 541 ad7793_calibrate_all(st); 495 542 break; 496 543 } 544 + break; 545 + case IIO_CHAN_INFO_SAMP_FREQ: 546 + if (!val) { 547 + ret = -EINVAL; 548 + break; 549 + } 550 + 551 + for (i = 0; i < 16; i++) 552 + if (val == st->chip_info->sample_freq_avail[i]) 553 + break; 554 + 555 + if (i == 16) { 556 + ret = -EINVAL; 557 + break; 558 + } 559 + 560 + st->mode &= ~AD7793_MODE_RATE(-1); 561 + st->mode |= AD7793_MODE_RATE(i); 562 + ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), 563 + st->mode); 497 564 break; 498 565 default: 499 566 ret = -EINVAL;
+37 -4
drivers/iio/adc/at91-sama5d2_adc.c
··· 333 333 + AT91_SAMA5D2_DIFF_CHAN_CNT + 1), 334 334 }; 335 335 336 + static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan) 337 + { 338 + int i; 339 + 340 + for (i = 0; i < indio_dev->num_channels; i++) { 341 + if (indio_dev->channels[i].scan_index == chan) 342 + return i; 343 + } 344 + return -EINVAL; 345 + } 346 + 347 + static inline struct iio_chan_spec const * 348 + at91_adc_chan_get(struct iio_dev *indio_dev, int chan) 349 + { 350 + int index = at91_adc_chan_xlate(indio_dev, chan); 351 + 352 + if (index < 0) 353 + return NULL; 354 + return indio_dev->channels + index; 355 + } 356 + 336 357 static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) 337 358 { 338 359 struct iio_dev *indio = iio_trigger_get_drvdata(trig); ··· 371 350 at91_adc_writel(st, AT91_SAMA5D2_TRGR, status); 372 351 373 352 for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) { 374 - struct iio_chan_spec const *chan = indio->channels + bit; 353 + struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit); 375 354 355 + if (!chan) 356 + continue; 376 357 if (state) { 377 358 at91_adc_writel(st, AT91_SAMA5D2_CHER, 378 359 BIT(chan->channel)); ··· 471 448 472 449 for_each_set_bit(bit, indio_dev->active_scan_mask, 473 450 indio_dev->num_channels) { 474 - struct iio_chan_spec const *chan = indio_dev->channels + bit; 451 + struct iio_chan_spec const *chan = 452 + at91_adc_chan_get(indio_dev, bit); 453 + 454 + if (!chan) 455 + continue; 475 456 476 457 st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8; 477 458 } ··· 553 526 */ 554 527 for_each_set_bit(bit, indio_dev->active_scan_mask, 555 528 indio_dev->num_channels) { 556 - struct iio_chan_spec const *chan = indio_dev->channels + bit; 529 + struct iio_chan_spec const *chan = 530 + at91_adc_chan_get(indio_dev, bit); 557 531 532 + if (!chan) 533 + continue; 558 534 if (st->dma_st.dma_chan) 559 535 at91_adc_readl(st, chan->address); 560 536 } ··· 617 587 618 588 for_each_set_bit(bit, indio_dev->active_scan_mask, 619 589 indio_dev->num_channels) { 620 - struct iio_chan_spec const *chan = indio_dev->channels + bit; 590 + struct iio_chan_spec const *chan = 591 + at91_adc_chan_get(indio_dev, bit); 621 592 593 + if (!chan) 594 + continue; 622 595 st->buffer[i] = at91_adc_readl(st, chan->address); 623 596 i++; 624 597 }
+14 -3
drivers/iio/adc/stm32-dfsdm-adc.c
··· 144 144 * Leave as soon as if exact resolution if reached. 145 145 * Otherwise the higher resolution below 32 bits is kept. 146 146 */ 147 + fl->res = 0; 147 148 for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) { 148 149 for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) { 149 150 if (fast) ··· 194 193 } 195 194 } 196 195 197 - if (!fl->fosr) 196 + if (!fl->res) 198 197 return -EINVAL; 199 198 200 199 return 0; ··· 771 770 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 772 771 struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; 773 772 struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel]; 774 - unsigned int spi_freq = adc->spi_freq; 773 + unsigned int spi_freq; 775 774 int ret = -EINVAL; 776 775 777 776 switch (mask) { ··· 785 784 case IIO_CHAN_INFO_SAMP_FREQ: 786 785 if (!val) 787 786 return -EINVAL; 788 - if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL) 787 + 788 + switch (ch->src) { 789 + case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL: 789 790 spi_freq = adc->dfsdm->spi_master_freq; 791 + break; 792 + case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING: 793 + case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING: 794 + spi_freq = adc->dfsdm->spi_master_freq / 2; 795 + break; 796 + default: 797 + spi_freq = adc->spi_freq; 798 + } 790 799 791 800 if (spi_freq % val) 792 801 dev_warn(&indio_dev->dev,
+1 -1
drivers/iio/buffer/industrialio-buffer-dma.c
··· 587 587 * Should be used as the set_length callback for iio_buffer_access_ops 588 588 * struct for DMA buffers. 589 589 */ 590 - int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length) 590 + int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length) 591 591 { 592 592 /* Avoid an invalid state */ 593 593 if (length < 2)
+9 -2
drivers/iio/buffer/kfifo_buf.c
··· 22 22 #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) 23 23 24 24 static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, 25 - int bytes_per_datum, int length) 25 + size_t bytes_per_datum, unsigned int length) 26 26 { 27 27 if ((length == 0) || (bytes_per_datum == 0)) 28 + return -EINVAL; 29 + 30 + /* 31 + * Make sure we don't overflow an unsigned int after kfifo rounds up to 32 + * the next power of 2. 33 + */ 34 + if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum) 28 35 return -EINVAL; 29 36 30 37 return __kfifo_alloc((struct __kfifo *)&buf->kf, length, ··· 74 67 return 0; 75 68 } 76 69 77 - static int iio_set_length_kfifo(struct iio_buffer *r, int length) 70 + static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length) 78 71 { 79 72 /* Avoid an invalid state */ 80 73 if (length < 2)
+4 -4
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
··· 178 178 #ifdef CONFIG_PM 179 179 int ret; 180 180 181 - atomic_set(&st->user_requested_state, state); 182 - 183 181 if (atomic_add_unless(&st->runtime_pm_enable, 1, 1)) 184 182 pm_runtime_enable(&st->pdev->dev); 185 183 186 - if (state) 184 + if (state) { 185 + atomic_inc(&st->user_requested_state); 187 186 ret = pm_runtime_get_sync(&st->pdev->dev); 188 - else { 187 + } else { 188 + atomic_dec(&st->user_requested_state); 189 189 pm_runtime_mark_last_busy(&st->pdev->dev); 190 190 pm_runtime_use_autosuspend(&st->pdev->dev); 191 191 ret = pm_runtime_put_autosuspend(&st->pdev->dev);
+1 -1
drivers/infiniband/core/cache.c
··· 502 502 return -EINVAL; 503 503 504 504 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) 505 - return -EAGAIN; 505 + return -EINVAL; 506 506 507 507 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid)); 508 508 if (attr) {
+54 -1
drivers/infiniband/hw/bnxt_re/main.c
··· 185 185 bnxt_re_ib_unreg(rdev, false); 186 186 } 187 187 188 + static void bnxt_re_stop_irq(void *handle) 189 + { 190 + struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; 191 + struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; 192 + struct bnxt_qplib_nq *nq; 193 + int indx; 194 + 195 + for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) { 196 + nq = &rdev->nq[indx - 1]; 197 + bnxt_qplib_nq_stop_irq(nq, false); 198 + } 199 + 200 + bnxt_qplib_rcfw_stop_irq(rcfw, false); 201 + } 202 + 203 + static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) 204 + { 205 + struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; 206 + struct bnxt_msix_entry *msix_ent = rdev->msix_entries; 207 + struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; 208 + struct bnxt_qplib_nq *nq; 209 + int indx, rc; 210 + 211 + if (!ent) { 212 + /* Not setting the f/w timeout bit in rcfw. 213 + * During the driver unload the first command 214 + * to f/w will timeout and that will set the 215 + * timeout bit. 216 + */ 217 + dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n"); 218 + return; 219 + } 220 + 221 + /* Vectors may change after restart, so update with new vectors 222 + * in device sctructure. 223 + */ 224 + for (indx = 0; indx < rdev->num_msix; indx++) 225 + rdev->msix_entries[indx].vector = ent[indx].vector; 226 + 227 + bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector, 228 + false); 229 + for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) { 230 + nq = &rdev->nq[indx - 1]; 231 + rc = bnxt_qplib_nq_start_irq(nq, indx - 1, 232 + msix_ent[indx].vector, false); 233 + if (rc) 234 + dev_warn(rdev_to_dev(rdev), 235 + "Failed to reinit NQ index %d\n", indx - 1); 236 + } 237 + } 238 + 188 239 static struct bnxt_ulp_ops bnxt_re_ulp_ops = { 189 240 .ulp_async_notifier = NULL, 190 241 .ulp_stop = bnxt_re_stop, 191 242 .ulp_start = bnxt_re_start, 192 243 .ulp_sriov_config = bnxt_re_sriov_config, 193 - .ulp_shutdown = bnxt_re_shutdown 244 + .ulp_shutdown = bnxt_re_shutdown, 245 + .ulp_irq_stop = bnxt_re_stop_irq, 246 + .ulp_irq_restart = bnxt_re_start_irq 194 247 }; 195 248 196 249 /* RoCE -> Net driver */
+61 -35
drivers/infiniband/hw/bnxt_re/qplib_fp.c
··· 336 336 return IRQ_HANDLED; 337 337 } 338 338 339 + void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill) 340 + { 341 + tasklet_disable(&nq->worker); 342 + /* Mask h/w interrupt */ 343 + NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); 344 + /* Sync with last running IRQ handler */ 345 + synchronize_irq(nq->vector); 346 + if (kill) 347 + tasklet_kill(&nq->worker); 348 + if (nq->requested) { 349 + irq_set_affinity_hint(nq->vector, NULL); 350 + free_irq(nq->vector, nq); 351 + nq->requested = false; 352 + } 353 + } 354 + 339 355 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) 340 356 { 341 357 if (nq->cqn_wq) { 342 358 destroy_workqueue(nq->cqn_wq); 343 359 nq->cqn_wq = NULL; 344 360 } 345 - /* Make sure the HW is stopped! */ 346 - synchronize_irq(nq->vector); 347 - tasklet_disable(&nq->worker); 348 - tasklet_kill(&nq->worker); 349 361 350 - if (nq->requested) { 351 - irq_set_affinity_hint(nq->vector, NULL); 352 - free_irq(nq->vector, nq); 353 - nq->requested = false; 354 - } 362 + /* Make sure the HW is stopped! */ 363 + bnxt_qplib_nq_stop_irq(nq, true); 364 + 355 365 if (nq->bar_reg_iomem) 356 366 iounmap(nq->bar_reg_iomem); 357 367 nq->bar_reg_iomem = NULL; ··· 369 359 nq->cqn_handler = NULL; 370 360 nq->srqn_handler = NULL; 371 361 nq->vector = 0; 362 + } 363 + 364 + int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 365 + int msix_vector, bool need_init) 366 + { 367 + int rc; 368 + 369 + if (nq->requested) 370 + return -EFAULT; 371 + 372 + nq->vector = msix_vector; 373 + if (need_init) 374 + tasklet_init(&nq->worker, bnxt_qplib_service_nq, 375 + (unsigned long)nq); 376 + else 377 + tasklet_enable(&nq->worker); 378 + 379 + snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx); 380 + rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq); 381 + if (rc) 382 + return rc; 383 + 384 + cpumask_clear(&nq->mask); 385 + cpumask_set_cpu(nq_indx, &nq->mask); 386 + rc = irq_set_affinity_hint(nq->vector, &nq->mask); 387 + if (rc) { 388 + dev_warn(&nq->pdev->dev, 389 + "QPLIB: set affinity failed; vector: %d nq_idx: %d\n", 390 + nq->vector, nq_indx); 391 + } 392 + nq->requested = true; 393 + NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); 394 + 395 + return rc; 372 396 } 373 397 374 398 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, ··· 416 372 resource_size_t nq_base; 417 373 int rc = -1; 418 374 419 - nq->pdev = pdev; 420 - nq->vector = msix_vector; 421 375 if (cqn_handler) 422 376 nq->cqn_handler = cqn_handler; 423 377 424 378 if (srqn_handler) 425 379 nq->srqn_handler = srqn_handler; 426 380 427 - tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq); 428 - 429 381 /* Have a task to schedule CQ notifiers in post send case */ 430 382 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq"); 431 383 if (!nq->cqn_wq) 432 - goto fail; 384 + return -ENOMEM; 433 385 434 - nq->requested = false; 435 - memset(nq->name, 0, 32); 436 - sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx); 437 - rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq); 438 - if (rc) { 439 - dev_err(&nq->pdev->dev, 440 - "Failed to request IRQ for NQ: %#x", rc); 441 - goto fail; 442 - } 443 - 444 - cpumask_clear(&nq->mask); 445 - cpumask_set_cpu(nq_idx, &nq->mask); 446 - rc = irq_set_affinity_hint(nq->vector, &nq->mask); 447 - if (rc) { 448 - dev_warn(&nq->pdev->dev, 449 - "QPLIB: set affinity failed; vector: %d nq_idx: %d\n", 450 - nq->vector, nq_idx); 451 - } 452 - 453 - nq->requested = true; 454 386 nq->bar_reg = NQ_CONS_PCI_BAR_REGION; 455 387 nq->bar_reg_off = bar_reg_offset; 456 388 nq_base = pci_resource_start(pdev, nq->bar_reg); ··· 439 419 rc = -ENOMEM; 440 420 goto fail; 441 421 } 442 - NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); 422 + 423 + rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true); 424 + if (rc) { 425 + dev_err(&nq->pdev->dev, 426 + "QPLIB: Failed to request irq for nq-idx %d", nq_idx); 427 + goto fail; 428 + } 443 429 444 430 return 0; 445 431 fail:
+3
drivers/infiniband/hw/bnxt_re/qplib_fp.h
··· 467 467 struct bnxt_qplib_cq *cq; 468 468 }; 469 469 470 + void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill); 470 471 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); 472 + int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 473 + int msix_vector, bool need_init); 471 474 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 472 475 int nq_idx, int msix_vector, int bar_reg_offset, 473 476 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
+43 -18
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
··· 582 582 return -ENOMEM; 583 583 } 584 584 585 - void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 585 + void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill) 586 586 { 587 - unsigned long indx; 588 - 589 - /* Make sure the HW channel is stopped! */ 590 - synchronize_irq(rcfw->vector); 591 587 tasklet_disable(&rcfw->worker); 592 - tasklet_kill(&rcfw->worker); 588 + /* Mask h/w interrupts */ 589 + CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, 590 + rcfw->creq.max_elements); 591 + /* Sync with last running IRQ-handler */ 592 + synchronize_irq(rcfw->vector); 593 + if (kill) 594 + tasklet_kill(&rcfw->worker); 593 595 594 596 if (rcfw->requested) { 595 597 free_irq(rcfw->vector, rcfw); 596 598 rcfw->requested = false; 597 599 } 600 + } 601 + 602 + void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 603 + { 604 + unsigned long indx; 605 + 606 + bnxt_qplib_rcfw_stop_irq(rcfw, true); 607 + 598 608 if (rcfw->cmdq_bar_reg_iomem) 599 609 iounmap(rcfw->cmdq_bar_reg_iomem); 600 610 rcfw->cmdq_bar_reg_iomem = NULL; ··· 622 612 623 613 rcfw->aeq_handler = NULL; 624 614 rcfw->vector = 0; 615 + } 616 + 617 + int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, 618 + bool need_init) 619 + { 620 + int rc; 621 + 622 + if (rcfw->requested) 623 + return -EFAULT; 624 + 625 + rcfw->vector = msix_vector; 626 + if (need_init) 627 + tasklet_init(&rcfw->worker, 628 + bnxt_qplib_service_creq, (unsigned long)rcfw); 629 + else 630 + tasklet_enable(&rcfw->worker); 631 + rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, 632 + "bnxt_qplib_creq", rcfw); 633 + if (rc) 634 + return rc; 635 + rcfw->requested = true; 636 + CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, 637 + rcfw->creq.max_elements); 638 + 639 + return 0; 625 640 } 626 641 627 642 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, ··· 710 675 rcfw->creq_qp_event_processed = 0; 711 676 rcfw->creq_func_event_processed = 0; 712 677 713 - rcfw->vector = msix_vector; 714 678 if (aeq_handler) 715 679 rcfw->aeq_handler = aeq_handler; 680 + init_waitqueue_head(&rcfw->waitq); 716 681 717 - tasklet_init(&rcfw->worker, bnxt_qplib_service_creq, 718 - (unsigned long)rcfw); 719 - 720 - rcfw->requested = false; 721 - rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, 722 - "bnxt_qplib_creq", rcfw); 682 + rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true); 723 683 if (rc) { 724 684 dev_err(&rcfw->pdev->dev, 725 685 "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc); 726 686 bnxt_qplib_disable_rcfw_channel(rcfw); 727 687 return rc; 728 688 } 729 - rcfw->requested = true; 730 - 731 - init_waitqueue_head(&rcfw->waitq); 732 - 733 - CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements); 734 689 735 690 init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); 736 691 init.cmdq_size_cmdq_lvl = cpu_to_le16(
+3
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
··· 195 195 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); 196 196 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, 197 197 struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz); 198 + void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill); 198 199 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); 200 + int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, 201 + bool need_init); 199 202 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, 200 203 struct bnxt_qplib_rcfw *rcfw, 201 204 int msix_vector,
+1 -1
drivers/infiniband/ulp/srpt/Kconfig
··· 1 1 config INFINIBAND_SRPT 2 2 tristate "InfiniBand SCSI RDMA Protocol target support" 3 - depends on INFINIBAND_ADDR_TRANS && TARGET_CORE 3 + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE 4 4 ---help--- 5 5 6 6 Support for the SCSI RDMA Protocol (SRP) Target driver. The
+11 -11
drivers/input/mouse/elan_i2c_smbus.c
··· 130 130 bool max_baseline, u8 *value) 131 131 { 132 132 int error; 133 - u8 val[3]; 133 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 134 134 135 135 error = i2c_smbus_read_block_data(client, 136 136 max_baseline ? ··· 149 149 bool iap, u8 *version) 150 150 { 151 151 int error; 152 - u8 val[3]; 152 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 153 153 154 154 error = i2c_smbus_read_block_data(client, 155 155 iap ? ETP_SMBUS_IAP_VERSION_CMD : ··· 170 170 u8 *clickpad) 171 171 { 172 172 int error; 173 - u8 val[3]; 173 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 174 174 175 175 error = i2c_smbus_read_block_data(client, 176 176 ETP_SMBUS_SM_VERSION_CMD, val); ··· 188 188 static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) 189 189 { 190 190 int error; 191 - u8 val[3]; 191 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 192 192 193 193 error = i2c_smbus_read_block_data(client, 194 194 ETP_SMBUS_UNIQUEID_CMD, val); ··· 205 205 bool iap, u16 *csum) 206 206 { 207 207 int error; 208 - u8 val[3]; 208 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 209 209 210 210 error = i2c_smbus_read_block_data(client, 211 211 iap ? ETP_SMBUS_FW_CHECKSUM_CMD : ··· 226 226 { 227 227 int ret; 228 228 int error; 229 - u8 val[3]; 229 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 230 230 231 231 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val); 232 232 if (ret != 3) { ··· 246 246 { 247 247 int ret; 248 248 int error; 249 - u8 val[3]; 249 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 250 250 251 251 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val); 252 252 if (ret != 3) { ··· 267 267 { 268 268 int ret; 269 269 int error; 270 - u8 val[3]; 270 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 271 271 272 272 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val); 273 273 if (ret != 3) { ··· 294 294 { 295 295 int error; 296 296 u16 constant; 297 - u8 val[3]; 297 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 298 298 299 299 error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val); 300 300 if (error < 0) { ··· 345 345 int len; 346 346 int error; 347 347 enum tp_mode mode; 348 - u8 val[3]; 348 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 349 349 u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06}; 350 350 u16 password; 351 351 ··· 419 419 struct device *dev = &client->dev; 420 420 int error; 421 421 u16 result; 422 - u8 val[3]; 422 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 423 423 424 424 /* 425 425 * Due to the limitation of smbus protocol limiting
+6
drivers/input/mouse/synaptics.c
··· 172 172 "LEN0048", /* X1 Carbon 3 */ 173 173 "LEN0046", /* X250 */ 174 174 "LEN004a", /* W541 */ 175 + "LEN0071", /* T480 */ 176 + "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ 177 + "LEN0073", /* X1 Carbon G5 (Elantech) */ 178 + "LEN0092", /* X1 Carbon 6 */ 179 + "LEN0096", /* X280 */ 180 + "LEN0097", /* X280 -> ALPS trackpoint */ 175 181 "LEN200f", /* T450s */ 176 182 NULL 177 183 };
+13
drivers/net/dsa/b53/b53_common.c
··· 1768 1768 .duplex_reg = B53_DUPLEX_STAT_FE, 1769 1769 }, 1770 1770 { 1771 + .chip_id = BCM5389_DEVICE_ID, 1772 + .dev_name = "BCM5389", 1773 + .vlans = 4096, 1774 + .enabled_ports = 0x1f, 1775 + .arl_entries = 4, 1776 + .cpu_port = B53_CPU_PORT, 1777 + .vta_regs = B53_VTA_REGS, 1778 + .duplex_reg = B53_DUPLEX_STAT_GE, 1779 + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1780 + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1781 + }, 1782 + { 1771 1783 .chip_id = BCM5395_DEVICE_ID, 1772 1784 .dev_name = "BCM5395", 1773 1785 .vlans = 4096, ··· 2111 2099 else 2112 2100 dev->chip_id = BCM5365_DEVICE_ID; 2113 2101 break; 2102 + case BCM5389_DEVICE_ID: 2114 2103 case BCM5395_DEVICE_ID: 2115 2104 case BCM5397_DEVICE_ID: 2116 2105 case BCM5398_DEVICE_ID:
+4 -1
drivers/net/dsa/b53/b53_mdio.c
··· 285 285 #define B53_BRCM_OUI_1 0x0143bc00 286 286 #define B53_BRCM_OUI_2 0x03625c00 287 287 #define B53_BRCM_OUI_3 0x00406000 288 + #define B53_BRCM_OUI_4 0x01410c00 288 289 289 290 static int b53_mdio_probe(struct mdio_device *mdiodev) 290 291 { ··· 312 311 */ 313 312 if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 && 314 313 (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 && 315 - (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) { 314 + (phy_id & 0xfffffc00) != B53_BRCM_OUI_3 && 315 + (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) { 316 316 dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id); 317 317 return -ENODEV; 318 318 } ··· 362 360 { .compatible = "brcm,bcm53125" }, 363 361 { .compatible = "brcm,bcm53128" }, 364 362 { .compatible = "brcm,bcm5365" }, 363 + { .compatible = "brcm,bcm5389" }, 365 364 { .compatible = "brcm,bcm5395" }, 366 365 { .compatible = "brcm,bcm5397" }, 367 366 { .compatible = "brcm,bcm5398" },
+1
drivers/net/dsa/b53/b53_priv.h
··· 48 48 enum { 49 49 BCM5325_DEVICE_ID = 0x25, 50 50 BCM5365_DEVICE_ID = 0x65, 51 + BCM5389_DEVICE_ID = 0x89, 51 52 BCM5395_DEVICE_ID = 0x95, 52 53 BCM5397_DEVICE_ID = 0x97, 53 54 BCM5398_DEVICE_ID = 0x98,
+3 -1
drivers/net/ethernet/emulex/benet/be_main.c
··· 3309 3309 if ((val & POST_STAGE_FAT_LOG_START) 3310 3310 != POST_STAGE_FAT_LOG_START && 3311 3311 (val & POST_STAGE_ARMFW_UE) 3312 - != POST_STAGE_ARMFW_UE) 3312 + != POST_STAGE_ARMFW_UE && 3313 + (val & POST_STAGE_RECOVERABLE_ERR) 3314 + != POST_STAGE_RECOVERABLE_ERR) 3313 3315 return; 3314 3316 } 3315 3317
+4 -5
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 9049 9049 { 9050 9050 const struct tc_action *a; 9051 9051 LIST_HEAD(actions); 9052 - int err; 9053 9052 9054 9053 if (!tcf_exts_has_actions(exts)) 9055 9054 return -EINVAL; ··· 9069 9070 9070 9071 if (!dev) 9071 9072 return -EINVAL; 9072 - err = handle_redirect_action(adapter, dev->ifindex, queue, 9073 - action); 9074 - if (err == 0) 9075 - return err; 9073 + return handle_redirect_action(adapter, dev->ifindex, 9074 + queue, action); 9076 9075 } 9076 + 9077 + return -EINVAL; 9077 9078 } 9078 9079 9079 9080 return -EINVAL;
+5
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 4422 4422 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4423 4423 return -EINVAL; 4424 4424 } 4425 + if (is_vlan_dev(upper_dev) && 4426 + vlan_dev_vlan_id(upper_dev) == 1) { 4427 + NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic"); 4428 + return -EINVAL; 4429 + } 4425 4430 break; 4426 4431 case NETDEV_CHANGEUPPER: 4427 4432 upper_dev = info->upper_dev;
+1 -1
drivers/net/ethernet/natsemi/sonic.c
··· 84 84 for (i = 0; i < SONIC_NUM_RRS; i++) { 85 85 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), 86 86 SONIC_RBSIZE, DMA_FROM_DEVICE); 87 - if (!laddr) { 87 + if (dma_mapping_error(lp->device, laddr)) { 88 88 while(i > 0) { /* free any that were mapped successfully */ 89 89 i--; 90 90 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
+2 -2
drivers/net/ethernet/socionext/netsec.c
··· 1681 1681 if (ret) 1682 1682 goto unreg_napi; 1683 1683 1684 - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) 1685 - dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n"); 1684 + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) 1685 + dev_warn(&pdev->dev, "Failed to set DMA mask\n"); 1686 1686 1687 1687 ret = register_netdev(ndev); 1688 1688 if (ret) {
+12 -10
drivers/net/ethernet/ti/davinci_emac.c
··· 1873 1873 if (IS_ERR(priv->txchan)) { 1874 1874 dev_err(&pdev->dev, "error initializing tx dma channel\n"); 1875 1875 rc = PTR_ERR(priv->txchan); 1876 - goto no_cpdma_chan; 1876 + goto err_free_dma; 1877 1877 } 1878 1878 1879 1879 priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH, ··· 1881 1881 if (IS_ERR(priv->rxchan)) { 1882 1882 dev_err(&pdev->dev, "error initializing rx dma channel\n"); 1883 1883 rc = PTR_ERR(priv->rxchan); 1884 - goto no_cpdma_chan; 1884 + goto err_free_txchan; 1885 1885 } 1886 1886 1887 1887 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1888 1888 if (!res) { 1889 1889 dev_err(&pdev->dev, "error getting irq res\n"); 1890 1890 rc = -ENOENT; 1891 - goto no_cpdma_chan; 1891 + goto err_free_rxchan; 1892 1892 } 1893 1893 ndev->irq = res->start; 1894 1894 ··· 1914 1914 pm_runtime_put_noidle(&pdev->dev); 1915 1915 dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", 1916 1916 __func__, rc); 1917 - goto no_cpdma_chan; 1917 + goto err_napi_del; 1918 1918 } 1919 1919 1920 1920 /* register the network device */ ··· 1924 1924 dev_err(&pdev->dev, "error in register_netdev\n"); 1925 1925 rc = -ENODEV; 1926 1926 pm_runtime_put(&pdev->dev); 1927 - goto no_cpdma_chan; 1927 + goto err_napi_del; 1928 1928 } 1929 1929 1930 1930 ··· 1937 1937 1938 1938 return 0; 1939 1939 1940 - no_cpdma_chan: 1941 - if (priv->txchan) 1942 - cpdma_chan_destroy(priv->txchan); 1943 - if (priv->rxchan) 1944 - cpdma_chan_destroy(priv->rxchan); 1940 + err_napi_del: 1941 + netif_napi_del(&priv->napi); 1942 + err_free_rxchan: 1943 + cpdma_chan_destroy(priv->rxchan); 1944 + err_free_txchan: 1945 + cpdma_chan_destroy(priv->txchan); 1946 + err_free_dma: 1945 1947 cpdma_ctlr_destroy(priv->dma); 1946 1948 no_pdata: 1947 1949 if (of_phy_is_fixed_link(np))
+9 -6
drivers/net/tun.c
··· 1663 1663 else 1664 1664 *skb_xdp = 0; 1665 1665 1666 - preempt_disable(); 1666 + local_bh_disable(); 1667 1667 rcu_read_lock(); 1668 1668 xdp_prog = rcu_dereference(tun->xdp_prog); 1669 1669 if (xdp_prog && !*skb_xdp) { ··· 1688 1688 if (err) 1689 1689 goto err_redirect; 1690 1690 rcu_read_unlock(); 1691 - preempt_enable(); 1691 + local_bh_enable(); 1692 1692 return NULL; 1693 1693 case XDP_TX: 1694 1694 get_page(alloc_frag->page); ··· 1697 1697 goto err_redirect; 1698 1698 tun_xdp_flush(tun->dev); 1699 1699 rcu_read_unlock(); 1700 - preempt_enable(); 1700 + local_bh_enable(); 1701 1701 return NULL; 1702 1702 case XDP_PASS: 1703 1703 delta = orig_data - xdp.data; ··· 1717 1717 skb = build_skb(buf, buflen); 1718 1718 if (!skb) { 1719 1719 rcu_read_unlock(); 1720 - preempt_enable(); 1720 + local_bh_enable(); 1721 1721 return ERR_PTR(-ENOMEM); 1722 1722 } 1723 1723 ··· 1727 1727 alloc_frag->offset += buflen; 1728 1728 1729 1729 rcu_read_unlock(); 1730 - preempt_enable(); 1730 + local_bh_enable(); 1731 1731 1732 1732 return skb; 1733 1733 ··· 1735 1735 put_page(alloc_frag->page); 1736 1736 err_xdp: 1737 1737 rcu_read_unlock(); 1738 - preempt_enable(); 1738 + local_bh_enable(); 1739 1739 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1740 1740 return NULL; 1741 1741 } ··· 1931 1931 struct bpf_prog *xdp_prog; 1932 1932 int ret; 1933 1933 1934 + local_bh_disable(); 1934 1935 rcu_read_lock(); 1935 1936 xdp_prog = rcu_dereference(tun->xdp_prog); 1936 1937 if (xdp_prog) { 1937 1938 ret = do_xdp_generic(xdp_prog, skb); 1938 1939 if (ret != XDP_PASS) { 1939 1940 rcu_read_unlock(); 1941 + local_bh_enable(); 1940 1942 return total_len; 1941 1943 } 1942 1944 } 1943 1945 rcu_read_unlock(); 1946 + local_bh_enable(); 1944 1947 } 1945 1948 1946 1949 /* Compute the costly rx hash only if needed for flow updates.
+1 -1
drivers/net/usb/cdc_mbim.c
··· 609 609 */ 610 610 static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = { 611 611 .description = "CDC MBIM", 612 - .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, 612 + .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP, 613 613 .bind = cdc_mbim_bind, 614 614 .unbind = cdc_mbim_unbind, 615 615 .manage_power = cdc_mbim_manage_power,
+1
drivers/net/usb/qmi_wwan.c
··· 1103 1103 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 1104 1104 {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ 1105 1105 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, 1106 + {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */ 1106 1107 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 1107 1108 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 1108 1109 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
+5 -5
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
··· 1600 1600 struct iwl_trans *trans) 1601 1601 { 1602 1602 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1603 - int max_irqs, num_irqs, i, ret, nr_online_cpus; 1603 + int max_irqs, num_irqs, i, ret; 1604 1604 u16 pci_cmd; 1605 1605 1606 1606 if (!trans->cfg->mq_rx_supported) 1607 1607 goto enable_msi; 1608 1608 1609 - nr_online_cpus = num_online_cpus(); 1610 - max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES); 1609 + max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES); 1611 1610 for (i = 0; i < max_irqs; i++) 1612 1611 trans_pcie->msix_entries[i].entry = i; 1613 1612 ··· 1632 1633 * Two interrupts less: non rx causes shared with FBQ and RSS. 1633 1634 * More than two interrupts: we will use fewer RSS queues. 1634 1635 */ 1635 - if (num_irqs <= nr_online_cpus) { 1636 + if (num_irqs <= max_irqs - 2) { 1636 1637 trans_pcie->trans->num_rx_queues = num_irqs + 1; 1637 1638 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1638 1639 IWL_SHARED_IRQ_FIRST_RSS; 1639 - } else if (num_irqs == nr_online_cpus + 1) { 1640 + } else if (num_irqs == max_irqs - 1) { 1640 1641 trans_pcie->trans->num_rx_queues = num_irqs; 1641 1642 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1642 1643 } else { 1643 1644 trans_pcie->trans->num_rx_queues = num_irqs - 1; 1644 1645 } 1646 + WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); 1645 1647 1646 1648 trans_pcie->alloc_vecs = num_irqs; 1647 1649 trans_pcie->msix_enabled = true;
+3 -4
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
··· 372 372 373 373 /* 374 374 * Determine IFS values 375 - * - Use TXOP_BACKOFF for probe and management frames except beacons 375 + * - Use TXOP_BACKOFF for management frames except beacons 376 376 * - Use TXOP_SIFS for fragment bursts 377 377 * - Use TXOP_HTTXOP for everything else 378 378 * 379 379 * Note: rt2800 devices won't use CTS protection (if used) 380 380 * for frames not transmitted with TXOP_HTTXOP 381 381 */ 382 - if ((ieee80211_is_mgmt(hdr->frame_control) && 383 - !ieee80211_is_beacon(hdr->frame_control)) || 384 - (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) 382 + if (ieee80211_is_mgmt(hdr->frame_control) && 383 + !ieee80211_is_beacon(hdr->frame_control)) 385 384 txdesc->u.ht.txop = TXOP_BACKOFF; 386 385 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 387 386 txdesc->u.ht.txop = TXOP_SIFS;
+1 -1
drivers/nvme/host/Kconfig
··· 27 27 28 28 config NVME_RDMA 29 29 tristate "NVM Express over Fabrics RDMA host driver" 30 - depends on INFINIBAND_ADDR_TRANS && BLOCK 30 + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK 31 31 select NVME_CORE 32 32 select NVME_FABRICS 33 33 select SG_POOL
+1 -1
drivers/nvme/host/core.c
··· 1447 1447 if (ns->lba_shift == 0) 1448 1448 ns->lba_shift = 9; 1449 1449 ns->noiob = le16_to_cpu(id->noiob); 1450 - ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 1451 1450 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); 1451 + ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 1452 1452 /* the PI implementation requires metadata equal t10 pi tuple size */ 1453 1453 if (ns->ms == sizeof(struct t10_pi_tuple)) 1454 1454 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+1 -1
drivers/nvme/target/Kconfig
··· 27 27 28 28 config NVME_TARGET_RDMA 29 29 tristate "NVMe over Fabrics RDMA target support" 30 - depends on INFINIBAND_ADDR_TRANS 30 + depends on INFINIBAND && INFINIBAND_ADDR_TRANS 31 31 depends on NVME_TARGET 32 32 select SGL_ALLOC 33 33 help
+13 -10
drivers/platform/x86/asus-wmi.c
··· 163 163 164 164 static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL }; 165 165 166 + static bool ashs_present(void) 167 + { 168 + int i = 0; 169 + while (ashs_ids[i]) { 170 + if (acpi_dev_found(ashs_ids[i++])) 171 + return true; 172 + } 173 + return false; 174 + } 175 + 166 176 struct bios_args { 167 177 u32 arg0; 168 178 u32 arg1; ··· 1035 1025 1036 1026 static void asus_wmi_rfkill_exit(struct asus_wmi *asus) 1037 1027 { 1028 + if (asus->driver->wlan_ctrl_by_user && ashs_present()) 1029 + return; 1030 + 1038 1031 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); 1039 1032 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); 1040 1033 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); ··· 2132 2119 2133 2120 pr_info("Number of fans: %d\n", asus->asus_hwmon_num_fans); 2134 2121 return 0; 2135 - } 2136 - 2137 - static bool ashs_present(void) 2138 - { 2139 - int i = 0; 2140 - while (ashs_ids[i]) { 2141 - if (acpi_dev_found(ashs_ids[i++])) 2142 - return true; 2143 - } 2144 - return false; 2145 2122 } 2146 2123 2147 2124 /*
+5 -2
drivers/s390/block/dasd.c
··· 3034 3034 cqr->callback_data = req; 3035 3035 cqr->status = DASD_CQR_FILLED; 3036 3036 cqr->dq = dq; 3037 - req->completion_data = cqr; 3037 + *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr; 3038 + 3038 3039 blk_mq_start_request(req); 3039 3040 spin_lock(&block->queue_lock); 3040 3041 list_add_tail(&cqr->blocklist, &block->ccw_queue); ··· 3059 3058 */ 3060 3059 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) 3061 3060 { 3062 - struct dasd_ccw_req *cqr = req->completion_data; 3063 3061 struct dasd_block *block = req->q->queuedata; 3064 3062 struct dasd_device *device; 3063 + struct dasd_ccw_req *cqr; 3065 3064 unsigned long flags; 3066 3065 int rc = 0; 3067 3066 3067 + cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)); 3068 3068 if (!cqr) 3069 3069 return BLK_EH_NOT_HANDLED; 3070 3070 ··· 3171 3169 int rc; 3172 3170 3173 3171 block->tag_set.ops = &dasd_mq_ops; 3172 + block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *); 3174 3173 block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; 3175 3174 block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; 3176 3175 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+20 -2
drivers/scsi/scsi_transport_srp.c
··· 51 51 struct transport_container rport_attr_cont; 52 52 }; 53 53 54 + static int scsi_is_srp_rport(const struct device *dev); 55 + 54 56 #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t) 55 57 56 58 #define dev_to_rport(d) container_of(d, struct srp_rport, dev) ··· 62 60 return dev_to_shost(r->dev.parent); 63 61 } 64 62 63 + static int find_child_rport(struct device *dev, void *data) 64 + { 65 + struct device **child = data; 66 + 67 + if (scsi_is_srp_rport(dev)) { 68 + WARN_ON_ONCE(*child); 69 + *child = dev; 70 + } 71 + return 0; 72 + } 73 + 65 74 static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) 66 75 { 67 - return transport_class_to_srp_rport(&shost->shost_gendev); 76 + struct device *child = NULL; 77 + 78 + WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child, 79 + find_child_rport) < 0); 80 + return child ? dev_to_rport(child) : NULL; 68 81 } 69 82 70 83 /** ··· 617 600 struct srp_rport *rport = shost_to_rport(shost); 618 601 619 602 pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); 620 - return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 && 603 + return rport && rport->fast_io_fail_tmo < 0 && 604 + rport->dev_loss_tmo < 0 && 621 605 i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? 622 606 BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; 623 607 }
-36
drivers/soc/lantiq/gphy.c
··· 30 30 struct clk *gphy_clk_gate; 31 31 struct reset_control *gphy_reset; 32 32 struct reset_control *gphy_reset2; 33 - struct notifier_block gphy_reboot_nb; 34 33 void __iomem *membase; 35 34 char *fw_name; 36 35 }; ··· 62 63 {}, 63 64 }; 64 65 MODULE_DEVICE_TABLE(of, xway_gphy_match); 65 - 66 - static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb) 67 - { 68 - return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb); 69 - } 70 - 71 - static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb, 72 - unsigned long code, void *unused) 73 - { 74 - struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb); 75 - 76 - if (priv) { 77 - reset_control_assert(priv->gphy_reset); 78 - reset_control_assert(priv->gphy_reset2); 79 - } 80 - 81 - return NOTIFY_DONE; 82 - } 83 66 84 67 static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, 85 68 dma_addr_t *dev_addr) ··· 186 205 reset_control_deassert(priv->gphy_reset); 187 206 reset_control_deassert(priv->gphy_reset2); 188 207 189 - /* assert the gphy reset because it can hang after a reboot: */ 190 - priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify; 191 - priv->gphy_reboot_nb.priority = -1; 192 - 193 - ret = register_reboot_notifier(&priv->gphy_reboot_nb); 194 - if (ret) 195 - dev_warn(dev, "Failed to register reboot notifier\n"); 196 - 197 208 platform_set_drvdata(pdev, priv); 198 209 199 210 return ret; ··· 193 220 194 221 static int xway_gphy_remove(struct platform_device *pdev) 195 222 { 196 - struct device *dev = &pdev->dev; 197 223 struct xway_gphy_priv *priv = platform_get_drvdata(pdev); 198 - int ret; 199 - 200 - reset_control_assert(priv->gphy_reset); 201 - reset_control_assert(priv->gphy_reset2); 202 224 203 225 iowrite32be(0, priv->membase); 204 226 205 227 clk_disable_unprepare(priv->gphy_clk_gate); 206 - 207 - ret = unregister_reboot_notifier(&priv->gphy_reboot_nb); 208 - if (ret) 209 - dev_warn(dev, "Failed to unregister reboot notifier\n"); 210 228 211 229 return 0; 212 230 }
+1 -1
drivers/staging/lustre/lnet/Kconfig
··· 34 34 35 35 config LNET_XPRT_IB 36 36 tristate "LNET infiniband support" 37 - depends on LNET && PCI && INFINIBAND_ADDR_TRANS 37 + depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS 38 38 default LNET && INFINIBAND 39 39 help 40 40 This option allows the LNET users to use infiniband as an
+1 -1
drivers/thunderbolt/icm.c
··· 1255 1255 /* Map empty entries to null UUID */ 1256 1256 uuid[0] = 0; 1257 1257 uuid[1] = 0; 1258 - } else { 1258 + } else if (uuid[0] != 0 || uuid[1] != 0) { 1259 1259 /* Upper two DWs are always one's */ 1260 1260 uuid[2] = 0xffffffff; 1261 1261 uuid[3] = 0xffffffff;
+10 -15
drivers/vfio/vfio_iommu_type1.c
··· 404 404 { 405 405 unsigned long pfn = 0; 406 406 long ret, pinned = 0, lock_acct = 0; 407 + bool rsvd; 407 408 dma_addr_t iova = vaddr - dma->vaddr + dma->iova; 408 409 409 410 /* This code path is only user initiated */ ··· 415 414 if (ret) 416 415 return ret; 417 416 418 - if (is_invalid_reserved_pfn(*pfn_base)) { 419 - struct vm_area_struct *vma; 420 - 421 - down_read(&current->mm->mmap_sem); 422 - vma = find_vma_intersection(current->mm, vaddr, vaddr + 1); 423 - pinned = min_t(long, npage, vma_pages(vma)); 424 - up_read(&current->mm->mmap_sem); 425 - return pinned; 426 - } 427 - 428 417 pinned++; 418 + rsvd = is_invalid_reserved_pfn(*pfn_base); 429 419 430 420 /* 431 421 * Reserved pages aren't counted against the user, externally pinned 432 422 * pages are already counted against the user. 433 423 */ 434 - if (!vfio_find_vpfn(dma, iova)) { 424 + if (!rsvd && !vfio_find_vpfn(dma, iova)) { 435 425 if (!lock_cap && current->mm->locked_vm + 1 > limit) { 436 426 put_pfn(*pfn_base, dma->prot); 437 427 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, ··· 442 450 if (ret) 443 451 break; 444 452 445 - if (pfn != *pfn_base + pinned) { 453 + if (pfn != *pfn_base + pinned || 454 + rsvd != is_invalid_reserved_pfn(pfn)) { 446 455 put_pfn(pfn, dma->prot); 447 456 break; 448 457 } 449 458 450 - if (!vfio_find_vpfn(dma, iova)) { 459 + if (!rsvd && !vfio_find_vpfn(dma, iova)) { 451 460 if (!lock_cap && 452 461 current->mm->locked_vm + lock_acct + 1 > limit) { 453 462 put_pfn(pfn, dma->prot); ··· 466 473 467 474 unpin_out: 468 475 if (ret) { 469 - for (pfn = *pfn_base ; pinned ; pfn++, pinned--) 470 - put_pfn(pfn, dma->prot); 476 + if (!rsvd) { 477 + for (pfn = *pfn_base ; pinned ; pfn++, pinned--) 478 + put_pfn(pfn, dma->prot); 479 + } 471 480 472 481 return ret; 473 482 }
+24 -13
drivers/vhost/net.c
··· 108 108 /* vhost zerocopy support fields below: */ 109 109 /* last used idx for outstanding DMA zerocopy buffers */ 110 110 int upend_idx; 111 - /* first used idx for DMA done zerocopy buffers */ 111 + /* For TX, first used idx for DMA done zerocopy buffers 112 + * For RX, number of batched heads 113 + */ 112 114 int done_idx; 113 115 /* an array of userspace buffers info */ 114 116 struct ubuf_info *ubuf_info; ··· 631 629 return skb_queue_empty(&sk->sk_receive_queue); 632 630 } 633 631 632 + static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) 633 + { 634 + struct vhost_virtqueue *vq = &nvq->vq; 635 + struct vhost_dev *dev = vq->dev; 636 + 637 + if (!nvq->done_idx) 638 + return; 639 + 640 + vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); 641 + nvq->done_idx = 0; 642 + } 643 + 634 644 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) 635 645 { 636 646 struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; ··· 652 638 int len = peek_head_len(rvq, sk); 653 639 654 640 if (!len && vq->busyloop_timeout) { 641 + /* Flush batched heads first */ 642 + vhost_rx_signal_used(rvq); 655 643 /* Both tx vq and rx socket were polled here */ 656 644 mutex_lock_nested(&vq->mutex, 1); 657 645 vhost_disable_notify(&net->dev, vq); ··· 781 765 }; 782 766 size_t total_len = 0; 783 767 int err, mergeable; 784 - s16 headcount, nheads = 0; 768 + s16 headcount; 785 769 size_t vhost_hlen, sock_hlen; 786 770 size_t vhost_len, sock_len; 787 771 struct socket *sock; ··· 810 794 while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { 811 795 sock_len += sock_hlen; 812 796 vhost_len = sock_len + vhost_hlen; 813 - headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len, 814 - &in, vq_log, &log, 797 + headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, 798 + vhost_len, &in, vq_log, &log, 815 799 likely(mergeable) ? UIO_MAXIOV : 1); 816 800 /* On error, stop handling until the next kick. */ 817 801 if (unlikely(headcount < 0)) ··· 882 866 vhost_discard_vq_desc(vq, headcount); 883 867 goto out; 884 868 } 885 - nheads += headcount; 886 - if (nheads > VHOST_RX_BATCH) { 887 - vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, 888 - nheads); 889 - nheads = 0; 890 - } 869 + nvq->done_idx += headcount; 870 + if (nvq->done_idx > VHOST_RX_BATCH) 871 + vhost_rx_signal_used(nvq); 891 872 if (unlikely(vq_log)) 892 873 vhost_log_write(vq, vq_log, log, vhost_len); 893 874 total_len += vhost_len; ··· 896 883 } 897 884 vhost_net_enable_vq(net, vq); 898 885 out: 899 - if (nheads) 900 - vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, 901 - nheads); 886 + vhost_rx_signal_used(nvq); 902 887 mutex_unlock(&vq->mutex); 903 888 } 904 889
+3 -7
fs/afs/security.c
··· 372 372 mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file"); 373 373 374 374 if (S_ISDIR(inode->i_mode)) { 375 - if (mask & MAY_EXEC) { 375 + if (mask & (MAY_EXEC | MAY_READ | MAY_CHDIR)) { 376 376 if (!(access & AFS_ACE_LOOKUP)) 377 377 goto permission_denied; 378 - } else if (mask & MAY_READ) { 379 - if (!(access & AFS_ACE_LOOKUP)) 380 - goto permission_denied; 381 - } else if (mask & MAY_WRITE) { 378 + } 379 + if (mask & MAY_WRITE) { 382 380 if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */ 383 381 AFS_ACE_INSERT))) /* create, mkdir, symlink, rename to */ 384 382 goto permission_denied; 385 - } else { 386 - BUG(); 387 383 } 388 384 } else { 389 385 if (!(access & AFS_ACE_LOOKUP))
+10 -9
fs/afs/vlclient.c
··· 23 23 struct afs_uvldbentry__xdr *uvldb; 24 24 struct afs_vldb_entry *entry; 25 25 bool new_only = false; 26 - u32 tmp, nr_servers; 26 + u32 tmp, nr_servers, vlflags; 27 27 int i, ret; 28 28 29 29 _enter(""); ··· 55 55 new_only = true; 56 56 } 57 57 58 + vlflags = ntohl(uvldb->flags); 58 59 for (i = 0; i < nr_servers; i++) { 59 60 struct afs_uuid__xdr *xdr; 60 61 struct afs_uuid *uuid; ··· 65 64 if (tmp & AFS_VLSF_DONTUSE || 66 65 (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) 67 66 continue; 68 - if (tmp & AFS_VLSF_RWVOL) 67 + if (tmp & AFS_VLSF_RWVOL) { 69 68 entry->fs_mask[i] |= AFS_VOL_VTM_RW; 69 + if (vlflags & AFS_VLF_BACKEXISTS) 70 + entry->fs_mask[i] |= AFS_VOL_VTM_BAK; 71 + } 70 72 if (tmp & AFS_VLSF_ROVOL) 71 73 entry->fs_mask[i] |= AFS_VOL_VTM_RO; 72 - if (tmp & AFS_VLSF_BACKVOL) 73 - entry->fs_mask[i] |= AFS_VOL_VTM_BAK; 74 74 if (!entry->fs_mask[i]) 75 75 continue; 76 76 ··· 91 89 for (i = 0; i < AFS_MAXTYPES; i++) 92 90 entry->vid[i] = ntohl(uvldb->volumeId[i]); 93 91 94 - tmp = ntohl(uvldb->flags); 95 - if (tmp & AFS_VLF_RWEXISTS) 92 + if (vlflags & AFS_VLF_RWEXISTS) 96 93 __set_bit(AFS_VLDB_HAS_RW, &entry->flags); 97 - if (tmp & AFS_VLF_ROEXISTS) 94 + if (vlflags & AFS_VLF_ROEXISTS) 98 95 __set_bit(AFS_VLDB_HAS_RO, &entry->flags); 99 - if (tmp & AFS_VLF_BACKEXISTS) 96 + if (vlflags & AFS_VLF_BACKEXISTS) 100 97 __set_bit(AFS_VLDB_HAS_BAK, &entry->flags); 101 98 102 - if (!(tmp & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) { 99 + if (!(vlflags & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) { 103 100 entry->error = -ENOMEDIUM; 104 101 __set_bit(AFS_VLDB_QUERY_ERROR, &entry->flags); 105 102 }
+1 -1
fs/cifs/Kconfig
··· 197 197 198 198 config CIFS_SMB_DIRECT 199 199 bool "SMB Direct support (Experimental)" 200 - depends on CIFS=m && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND_ADDR_TRANS=y 200 + depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y 201 201 help 202 202 Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1. 203 203 SMB Direct allows transferring SMB packets over RDMA. If unsure,
+1
fs/inode.c
··· 178 178 mapping->a_ops = &empty_aops; 179 179 mapping->host = inode; 180 180 mapping->flags = 0; 181 + mapping->wb_err = 0; 181 182 atomic_set(&mapping->i_mmap_writable, 0); 182 183 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 183 184 mapping->private_data = NULL;
+1 -1
include/drm/bridge/dw_hdmi.h
··· 151 151 struct drm_encoder *encoder, 152 152 const struct dw_hdmi_plat_data *plat_data); 153 153 154 - void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense); 154 + void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); 155 155 156 156 void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); 157 157 void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
+3 -3
include/linux/iio/buffer_impl.h
··· 53 53 int (*request_update)(struct iio_buffer *buffer); 54 54 55 55 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); 56 - int (*set_length)(struct iio_buffer *buffer, int length); 56 + int (*set_length)(struct iio_buffer *buffer, unsigned int length); 57 57 58 58 int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); 59 59 int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); ··· 72 72 */ 73 73 struct iio_buffer { 74 74 /** @length: Number of datums in buffer. */ 75 - int length; 75 + unsigned int length; 76 76 77 77 /** @bytes_per_datum: Size of individual datum including timestamp. */ 78 - int bytes_per_datum; 78 + size_t bytes_per_datum; 79 79 80 80 /** 81 81 * @access: Buffer access functions associated with the
+3 -1
include/trace/events/sched.h
··· 435 435 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); 436 436 __entry->pid = tsk->pid; 437 437 __entry->oldprio = tsk->prio; 438 - __entry->newprio = pi_task ? pi_task->prio : tsk->prio; 438 + __entry->newprio = pi_task ? 439 + min(tsk->normal_prio, pi_task->prio) : 440 + tsk->normal_prio; 439 441 /* XXX SCHED_DEADLINE bits missing */ 440 442 ), 441 443
+1
include/uapi/linux/bpf.h
··· 2332 2332 __u32 map_flags; 2333 2333 char name[BPF_OBJ_NAME_LEN]; 2334 2334 __u32 ifindex; 2335 + __u32 :32; 2335 2336 __u64 netns_dev; 2336 2337 __u64 netns_ino; 2337 2338 __u32 btf_id;
+2 -4
kernel/kthread.c
··· 193 193 194 194 void kthread_park_complete(struct task_struct *k) 195 195 { 196 - complete(&to_kthread(k)->parked); 196 + complete_all(&to_kthread(k)->parked); 197 197 } 198 198 199 199 static int kthread(void *_create) ··· 459 459 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) 460 460 __kthread_bind(k, kthread->cpu, TASK_PARKED); 461 461 462 + reinit_completion(&kthread->parked); 462 463 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 463 464 wake_up_state(k, TASK_PARKED); 464 465 } ··· 483 482 484 483 if (WARN_ON(k->flags & PF_EXITING)) 485 484 return -ENOSYS; 486 - 487 - if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) 488 - return -EBUSY; 489 485 490 486 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 491 487 if (k != current) {
+1 -1
kernel/sched/topology.c
··· 1708 1708 rcu_read_unlock(); 1709 1709 1710 1710 if (rq && sched_debug_enabled) { 1711 - pr_info("span: %*pbl (max cpu_capacity = %lu)\n", 1711 + pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", 1712 1712 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); 1713 1713 } 1714 1714
+6 -6
kernel/trace/trace.c
··· 893 893 EXPORT_SYMBOL_GPL(__trace_bputs); 894 894 895 895 #ifdef CONFIG_TRACER_SNAPSHOT 896 - static void tracing_snapshot_instance(struct trace_array *tr) 896 + void tracing_snapshot_instance(struct trace_array *tr) 897 897 { 898 898 struct tracer *tracer = tr->current_trace; 899 899 unsigned long flags; ··· 949 949 struct trace_buffer *size_buf, int cpu_id); 950 950 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); 951 951 952 - static int alloc_snapshot(struct trace_array *tr) 952 + int tracing_alloc_snapshot_instance(struct trace_array *tr) 953 953 { 954 954 int ret; 955 955 ··· 995 995 struct trace_array *tr = &global_trace; 996 996 int ret; 997 997 998 - ret = alloc_snapshot(tr); 998 + ret = tracing_alloc_snapshot_instance(tr); 999 999 WARN_ON(ret < 0); 1000 1000 1001 1001 return ret; ··· 5408 5408 5409 5409 #ifdef CONFIG_TRACER_MAX_TRACE 5410 5410 if (t->use_max_tr && !had_max_tr) { 5411 - ret = alloc_snapshot(tr); 5411 + ret = tracing_alloc_snapshot_instance(tr); 5412 5412 if (ret < 0) 5413 5413 goto out; 5414 5414 } ··· 6451 6451 } 6452 6452 #endif 6453 6453 if (!tr->allocated_snapshot) { 6454 - ret = alloc_snapshot(tr); 6454 + ret = tracing_alloc_snapshot_instance(tr); 6455 6455 if (ret < 0) 6456 6456 break; 6457 6457 } ··· 7179 7179 return ret; 7180 7180 7181 7181 out_reg: 7182 - ret = alloc_snapshot(tr); 7182 + ret = tracing_alloc_snapshot_instance(tr); 7183 7183 if (ret < 0) 7184 7184 goto out; 7185 7185
+11
kernel/trace/trace.h
··· 1817 1817 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } 1818 1818 #endif 1819 1819 1820 + #ifdef CONFIG_TRACER_SNAPSHOT 1821 + void tracing_snapshot_instance(struct trace_array *tr); 1822 + int tracing_alloc_snapshot_instance(struct trace_array *tr); 1823 + #else 1824 + static inline void tracing_snapshot_instance(struct trace_array *tr) { } 1825 + static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) 1826 + { 1827 + return 0; 1828 + } 1829 + #endif 1830 + 1820 1831 extern struct trace_iterator *tracepoint_print_iter; 1821 1832 1822 1833 #endif /* _LINUX_KERNEL_TRACE_H */
+11 -4
kernel/trace/trace_events_trigger.c
··· 483 483 struct trace_event_file *file; 484 484 485 485 list_for_each_entry(file, &tr->events, list) { 486 - struct event_trigger_data *data; 487 - list_for_each_entry_rcu(data, &file->triggers, list) { 486 + struct event_trigger_data *data, *n; 487 + list_for_each_entry_safe(data, n, &file->triggers, list) { 488 488 trace_event_trigger_enable_disable(file, 0); 489 + list_del_rcu(&data->list); 489 490 if (data->ops->free) 490 491 data->ops->free(data->ops, data); 491 492 } ··· 643 642 trigger_data->count = -1; 644 643 trigger_data->ops = trigger_ops; 645 644 trigger_data->cmd_ops = cmd_ops; 645 + trigger_data->private_data = file; 646 646 INIT_LIST_HEAD(&trigger_data->list); 647 647 INIT_LIST_HEAD(&trigger_data->named_list); 648 648 ··· 1055 1053 snapshot_trigger(struct event_trigger_data *data, void *rec, 1056 1054 struct ring_buffer_event *event) 1057 1055 { 1058 - tracing_snapshot(); 1056 + struct trace_event_file *file = data->private_data; 1057 + 1058 + if (file) 1059 + tracing_snapshot_instance(file->tr); 1060 + else 1061 + tracing_snapshot(); 1059 1062 } 1060 1063 1061 1064 static void ··· 1083 1076 { 1084 1077 int ret = register_trigger(glob, ops, data, file); 1085 1078 1086 - if (ret > 0 && tracing_alloc_snapshot() != 0) { 1079 + if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) { 1087 1080 unregister_trigger(glob, ops, data, file); 1088 1081 ret = 0; 1089 1082 }
+1 -1
mm/huge_memory.c
··· 2431 2431 __split_huge_page_tail(head, i, lruvec, list); 2432 2432 /* Some pages can be beyond i_size: drop them from page cache */ 2433 2433 if (head[i].index >= end) { 2434 - __ClearPageDirty(head + i); 2434 + ClearPageDirty(head + i); 2435 2435 __delete_from_page_cache(head + i, NULL); 2436 2436 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 2437 2437 shmem_uncharge(head->mapping->host, 1);
+1 -1
mm/vmscan.c
··· 1418 1418 return ret; 1419 1419 1420 1420 mapping = page_mapping(page); 1421 - migrate_dirty = mapping && mapping->a_ops->migratepage; 1421 + migrate_dirty = !mapping || mapping->a_ops->migratepage; 1422 1422 unlock_page(page); 1423 1423 if (!migrate_dirty) 1424 1424 return ret;
+1 -1
net/9p/Kconfig
··· 32 32 33 33 34 34 config NET_9P_RDMA 35 - depends on INET && INFINIBAND_ADDR_TRANS 35 + depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS 36 36 tristate "9P RDMA Transport (Experimental)" 37 37 help 38 38 This builds support for an RDMA transport.
+2 -1
net/bridge/netfilter/ebtables.c
··· 1949 1949 int off, pad = 0; 1950 1950 unsigned int size_kern, match_size = mwt->match_size; 1951 1951 1952 - strlcpy(name, mwt->u.name, sizeof(name)); 1952 + if (strscpy(name, mwt->u.name, sizeof(name)) < 0) 1953 + return -EINVAL; 1953 1954 1954 1955 if (state->buf_kern_start) 1955 1956 dst = state->buf_kern_start + state->buf_kern_offset;
+3 -3
net/core/net-sysfs.c
··· 1214 1214 cpumask_var_t mask; 1215 1215 unsigned long index; 1216 1216 1217 - if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 1218 - return -ENOMEM; 1219 - 1220 1217 index = get_netdev_queue_index(queue); 1221 1218 1222 1219 if (dev->num_tc) { ··· 1222 1225 if (tc < 0) 1223 1226 return -EINVAL; 1224 1227 } 1228 + 1229 + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 1230 + return -ENOMEM; 1225 1231 1226 1232 rcu_read_lock(); 1227 1233 dev_maps = rcu_dereference(dev->xps_maps);
+4 -4
net/ipv4/ip_tunnel.c
··· 328 328 329 329 if (tdev) { 330 330 hlen = tdev->hard_header_len + tdev->needed_headroom; 331 - mtu = tdev->mtu; 331 + mtu = min(tdev->mtu, IP_MAX_MTU); 332 332 } 333 333 334 334 dev->needed_headroom = t_hlen + hlen; ··· 362 362 nt = netdev_priv(dev); 363 363 t_hlen = nt->hlen + sizeof(struct iphdr); 364 364 dev->min_mtu = ETH_MIN_MTU; 365 - dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; 365 + dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; 366 366 ip_tunnel_add(itn, nt); 367 367 return nt; 368 368 ··· 930 930 { 931 931 struct ip_tunnel *tunnel = netdev_priv(dev); 932 932 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 933 - int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; 933 + int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; 934 934 935 935 if (new_mtu < ETH_MIN_MTU) 936 936 return -EINVAL; ··· 1107 1107 1108 1108 mtu = ip_tunnel_bind_dev(dev); 1109 1109 if (tb[IFLA_MTU]) { 1110 - unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen; 1110 + unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen; 1111 1111 1112 1112 mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, 1113 1113 (unsigned int)(max - sizeof(struct iphdr)));
+8 -3
net/ipv6/ip6_tunnel.c
··· 1692 1692 if (new_mtu < ETH_MIN_MTU) 1693 1693 return -EINVAL; 1694 1694 } 1695 - if (new_mtu > 0xFFF8 - dev->hard_header_len) 1696 - return -EINVAL; 1695 + if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) { 1696 + if (new_mtu > IP6_MAX_MTU - dev->hard_header_len) 1697 + return -EINVAL; 1698 + } else { 1699 + if (new_mtu > IP_MAX_MTU - dev->hard_header_len) 1700 + return -EINVAL; 1701 + } 1697 1702 dev->mtu = new_mtu; 1698 1703 return 0; 1699 1704 } ··· 1846 1841 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1847 1842 dev->mtu -= 8; 1848 1843 dev->min_mtu = ETH_MIN_MTU; 1849 - dev->max_mtu = 0xFFF8 - dev->hard_header_len; 1844 + dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len; 1850 1845 1851 1846 return 0; 1852 1847
+2 -2
net/ipv6/seg6_iptunnel.c
··· 122 122 hdrlen = (osrh->hdrlen + 1) << 3; 123 123 tot_len = hdrlen + sizeof(*hdr); 124 124 125 - err = skb_cow_head(skb, tot_len); 125 + err = skb_cow_head(skb, tot_len + skb->mac_len); 126 126 if (unlikely(err)) 127 127 return err; 128 128 ··· 181 181 182 182 hdrlen = (osrh->hdrlen + 1) << 3; 183 183 184 - err = skb_cow_head(skb, hdrlen); 184 + err = skb_cow_head(skb, hdrlen + skb->mac_len); 185 185 if (unlikely(err)) 186 186 return err; 187 187
+3 -2
net/ipv6/sit.c
··· 1371 1371 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1372 1372 dev->mtu = ETH_DATA_LEN - t_hlen; 1373 1373 dev->min_mtu = IPV6_MIN_MTU; 1374 - dev->max_mtu = 0xFFF8 - t_hlen; 1374 + dev->max_mtu = IP6_MAX_MTU - t_hlen; 1375 1375 dev->flags = IFF_NOARP; 1376 1376 netif_keep_dst(dev); 1377 1377 dev->addr_len = 4; ··· 1583 1583 if (tb[IFLA_MTU]) { 1584 1584 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 1585 1585 1586 - if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len) 1586 + if (mtu >= IPV6_MIN_MTU && 1587 + mtu <= IP6_MAX_MTU - dev->hard_header_len) 1587 1588 dev->mtu = mtu; 1588 1589 } 1589 1590
+1 -1
net/ipv6/xfrm6_policy.c
··· 124 124 struct flowi6 *fl6 = &fl->u.ip6; 125 125 int onlyproto = 0; 126 126 const struct ipv6hdr *hdr = ipv6_hdr(skb); 127 - u16 offset = sizeof(*hdr); 127 + u32 offset = sizeof(*hdr); 128 128 struct ipv6_opt_hdr *exthdr; 129 129 const unsigned char *nh = skb_network_header(skb); 130 130 u16 nhoff = IP6CB(skb)->nhoff;
+1 -1
net/kcm/kcmsock.c
··· 1671 1671 __module_get(newsock->ops->owner); 1672 1672 1673 1673 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, 1674 - &kcm_proto, true); 1674 + &kcm_proto, false); 1675 1675 if (!newsk) { 1676 1676 sock_release(newsock); 1677 1677 return ERR_PTR(-ENOMEM);
+1 -1
net/ncsi/ncsi-netlink.c
··· 208 208 static int ncsi_pkg_info_all_nl(struct sk_buff *skb, 209 209 struct netlink_callback *cb) 210 210 { 211 - struct nlattr *attrs[NCSI_ATTR_MAX]; 211 + struct nlattr *attrs[NCSI_ATTR_MAX + 1]; 212 212 struct ncsi_package *np, *package; 213 213 struct ncsi_dev_priv *ndp; 214 214 unsigned int package_id;
+15 -6
net/netfilter/ipvs/ip_vs_ctl.c
··· 2385 2385 struct ipvs_sync_daemon_cfg cfg; 2386 2386 2387 2387 memset(&cfg, 0, sizeof(cfg)); 2388 - strlcpy(cfg.mcast_ifn, dm->mcast_ifn, 2389 - sizeof(cfg.mcast_ifn)); 2388 + ret = -EINVAL; 2389 + if (strscpy(cfg.mcast_ifn, dm->mcast_ifn, 2390 + sizeof(cfg.mcast_ifn)) <= 0) 2391 + goto out_dec; 2390 2392 cfg.syncid = dm->syncid; 2391 2393 ret = start_sync_thread(ipvs, &cfg, dm->state); 2392 2394 } else { ··· 2426 2424 } 2427 2425 } 2428 2426 2427 + if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) && 2428 + strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) == 2429 + IP_VS_SCHEDNAME_MAXLEN) { 2430 + ret = -EINVAL; 2431 + goto out_unlock; 2432 + } 2433 + 2429 2434 /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */ 2430 2435 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP && 2431 2436 usvc.protocol != IPPROTO_SCTP) { 2432 - pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n", 2437 + pr_err("set_ctl: invalid protocol: %d %pI4:%d\n", 2433 2438 usvc.protocol, &usvc.addr.ip, 2434 - ntohs(usvc.port), usvc.sched_name); 2439 + ntohs(usvc.port)); 2435 2440 ret = -EFAULT; 2436 2441 goto out_unlock; 2437 2442 } ··· 2860 2851 static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = { 2861 2852 [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 }, 2862 2853 [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING, 2863 - .len = IP_VS_IFNAME_MAXLEN }, 2854 + .len = IP_VS_IFNAME_MAXLEN - 1 }, 2864 2855 [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 }, 2865 2856 [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 }, 2866 2857 [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 }, ··· 2878 2869 [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 }, 2879 2870 [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 }, 2880 2871 [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING, 2881 - .len = IP_VS_SCHEDNAME_MAXLEN }, 2872 + .len = IP_VS_SCHEDNAME_MAXLEN - 1 }, 2882 2873 [IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING, 2883 2874 .len = IP_VS_PENAME_MAXLEN }, 2884 2875 [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
+5 -3
net/netfilter/nf_tables_api.c
··· 1336 1336 rcu_assign_pointer(chain->stats, newstats); 1337 1337 synchronize_rcu(); 1338 1338 free_percpu(oldstats); 1339 - } else 1339 + } else { 1340 1340 rcu_assign_pointer(chain->stats, newstats); 1341 + static_branch_inc(&nft_counters_enabled); 1342 + } 1341 1343 } 1342 1344 1343 1345 static void nf_tables_chain_free_chain_rules(struct nft_chain *chain) ··· 4946 4944 if (idx > s_idx) 4947 4945 memset(&cb->args[1], 0, 4948 4946 sizeof(cb->args) - sizeof(cb->args[0])); 4949 - if (filter && filter->table[0] && 4947 + if (filter && filter->table && 4950 4948 strcmp(filter->table, table->name)) 4951 4949 goto cont; 4952 4950 if (filter && ··· 5626 5624 if (idx > s_idx) 5627 5625 memset(&cb->args[1], 0, 5628 5626 sizeof(cb->args) - sizeof(cb->args[0])); 5629 - if (filter && filter->table[0] && 5627 + if (filter && filter->table && 5630 5628 strcmp(filter->table, table->name)) 5631 5629 goto cont; 5632 5630
+2 -2
net/netfilter/nf_tables_core.c
··· 104 104 if (!base_chain->stats) 105 105 return; 106 106 107 + local_bh_disable(); 107 108 stats = this_cpu_ptr(rcu_dereference(base_chain->stats)); 108 109 if (stats) { 109 - local_bh_disable(); 110 110 u64_stats_update_begin(&stats->syncp); 111 111 stats->pkts++; 112 112 stats->bytes += pkt->skb->len; 113 113 u64_stats_update_end(&stats->syncp); 114 - local_bh_enable(); 115 114 } 115 + local_bh_enable(); 116 116 } 117 117 118 118 struct nft_jumpstack {
+12 -8
net/netfilter/nft_ct.c
··· 881 881 struct nft_object *obj, bool reset) 882 882 { 883 883 const struct nft_ct_helper_obj *priv = nft_obj_data(obj); 884 - const struct nf_conntrack_helper *helper = priv->helper4; 884 + const struct nf_conntrack_helper *helper; 885 885 u16 family; 886 + 887 + if (priv->helper4 && priv->helper6) { 888 + family = NFPROTO_INET; 889 + helper = priv->helper4; 890 + } else if (priv->helper6) { 891 + family = NFPROTO_IPV6; 892 + helper = priv->helper6; 893 + } else { 894 + family = NFPROTO_IPV4; 895 + helper = priv->helper4; 896 + } 886 897 887 898 if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name)) 888 899 return -1; 889 900 890 901 if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto)) 891 902 return -1; 892 - 893 - if (priv->helper4 && priv->helper6) 894 - family = NFPROTO_INET; 895 - else if (priv->helper6) 896 - family = NFPROTO_IPV6; 897 - else 898 - family = NFPROTO_IPV4; 899 903 900 904 if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family))) 901 905 return -1;
+24 -14
net/netfilter/nft_limit.c
··· 51 51 return !limit->invert; 52 52 } 53 53 54 + /* Use same default as in iptables. */ 55 + #define NFT_LIMIT_PKT_BURST_DEFAULT 5 56 + 54 57 static int nft_limit_init(struct nft_limit *limit, 55 - const struct nlattr * const tb[]) 58 + const struct nlattr * const tb[], bool pkts) 56 59 { 57 - u64 unit; 60 + u64 unit, tokens; 58 61 59 62 if (tb[NFTA_LIMIT_RATE] == NULL || 60 63 tb[NFTA_LIMIT_UNIT] == NULL) ··· 71 68 72 69 if (tb[NFTA_LIMIT_BURST]) 73 70 limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST])); 74 - else 75 - limit->burst = 0; 71 + 72 + if (pkts && limit->burst == 0) 73 + limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT; 76 74 77 75 if (limit->rate + limit->burst < limit->rate) 78 76 return -EOVERFLOW; 79 77 80 - /* The token bucket size limits the number of tokens can be 81 - * accumulated. tokens_max specifies the bucket size. 82 - * tokens_max = unit * (rate + burst) / rate. 83 - */ 84 - limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), 85 - limit->rate); 78 + if (pkts) { 79 + tokens = div_u64(limit->nsecs, limit->rate) * limit->burst; 80 + } else { 81 + /* The token bucket size limits the number of tokens can be 82 + * accumulated. tokens_max specifies the bucket size. 83 + * tokens_max = unit * (rate + burst) / rate. 84 + */ 85 + tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), 86 + limit->rate); 87 + } 88 + 89 + limit->tokens = tokens; 86 90 limit->tokens_max = limit->tokens; 87 91 88 92 if (tb[NFTA_LIMIT_FLAGS]) { ··· 154 144 struct nft_limit_pkts *priv = nft_expr_priv(expr); 155 145 int err; 156 146 157 - err = nft_limit_init(&priv->limit, tb); 147 + err = nft_limit_init(&priv->limit, tb, true); 158 148 if (err < 0) 159 149 return err; 160 150 ··· 195 185 { 196 186 struct nft_limit *priv = nft_expr_priv(expr); 197 187 198 - return nft_limit_init(priv, tb); 188 + return nft_limit_init(priv, tb, false); 199 189 } 200 190 201 191 static int nft_limit_bytes_dump(struct sk_buff *skb, ··· 256 246 struct nft_limit_pkts *priv = nft_obj_data(obj); 257 247 int err; 258 248 259 - err = nft_limit_init(&priv->limit, tb); 249 + err = nft_limit_init(&priv->limit, tb, true); 260 250 if (err < 0) 261 251 return err; 262 252 ··· 299 289 { 300 290 struct nft_limit *priv = nft_obj_data(obj); 301 291 302 - return nft_limit_init(priv, tb); 292 + return nft_limit_init(priv, tb, false); 303 293 } 304 294 305 295 static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
+8 -6
net/netfilter/nft_meta.c
··· 259 259 struct sk_buff *skb = pkt->skb; 260 260 u32 *sreg = &regs->data[meta->sreg]; 261 261 u32 value = *sreg; 262 - u8 pkt_type; 262 + u8 value8; 263 263 264 264 switch (meta->key) { 265 265 case NFT_META_MARK: ··· 269 269 skb->priority = value; 270 270 break; 271 271 case NFT_META_PKTTYPE: 272 - pkt_type = nft_reg_load8(sreg); 272 + value8 = nft_reg_load8(sreg); 273 273 274 - if (skb->pkt_type != pkt_type && 275 - skb_pkt_type_ok(pkt_type) && 274 + if (skb->pkt_type != value8 && 275 + skb_pkt_type_ok(value8) && 276 276 skb_pkt_type_ok(skb->pkt_type)) 277 - skb->pkt_type = pkt_type; 277 + skb->pkt_type = value8; 278 278 break; 279 279 case NFT_META_NFTRACE: 280 - skb->nf_trace = !!value; 280 + value8 = nft_reg_load8(sreg); 281 + 282 + skb->nf_trace = !!value8; 281 283 break; 282 284 default: 283 285 WARN_ON(1);
+1 -1
net/rds/Kconfig
··· 8 8 9 9 config RDS_RDMA 10 10 tristate "RDS over Infiniband" 11 - depends on RDS && INFINIBAND_ADDR_TRANS 11 + depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS 12 12 ---help--- 13 13 Allow RDS to use Infiniband as a transport. 14 14 This transport supports RDMA operations.
+1 -1
net/sched/cls_flower.c
··· 1028 1028 fl_mask_put(head, fnew->mask, false); 1029 1029 1030 1030 errout_idr: 1031 - if (fnew->handle) 1031 + if (!fold) 1032 1032 idr_remove(&head->handle_idr, fnew->handle); 1033 1033 errout: 1034 1034 tcf_exts_destroy(&fnew->exts);
+1 -1
net/sunrpc/Kconfig
··· 50 50 51 51 config SUNRPC_XPRT_RDMA 52 52 tristate "RPC-over-RDMA transport" 53 - depends on SUNRPC && INFINIBAND_ADDR_TRANS 53 + depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS 54 54 default SUNRPC && INFINIBAND 55 55 select SG_POOL 56 56 help
+2 -3
net/xfrm/xfrm_policy.c
··· 1658 1658 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len; 1659 1659 } 1660 1660 1661 - out: 1662 1661 return &xdst0->u.dst; 1663 1662 1664 1663 put_states: ··· 1666 1667 free_dst: 1667 1668 if (xdst0) 1668 1669 dst_release_immediate(&xdst0->u.dst); 1669 - xdst0 = ERR_PTR(err); 1670 - goto out; 1670 + 1671 + return ERR_PTR(err); 1671 1672 } 1672 1673 1673 1674 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
+1 -1
security/selinux/ss/services.c
··· 1494 1494 scontext_len, &context, def_sid); 1495 1495 if (rc == -EINVAL && force) { 1496 1496 context.str = str; 1497 - context.len = scontext_len; 1497 + context.len = strlen(str) + 1; 1498 1498 str = NULL; 1499 1499 } else if (rc) 1500 1500 goto out_unlock;
+1
tools/include/uapi/linux/bpf.h
··· 2332 2332 __u32 map_flags; 2333 2333 char name[BPF_OBJ_NAME_LEN]; 2334 2334 __u32 ifindex; 2335 + __u32 :32; 2335 2336 __u64 netns_dev; 2336 2337 __u64 netns_ino; 2337 2338 __u32 btf_id;