Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

xdp_umem.c had overlapping changes between the 64-bit math fix
for the calculation of npgs and the removal of the zerocopy
memory type which got rid of the chunk_size_nohdr member.

The mlx5 Kconfig conflict is a case where we just take the
net-next copy of the Kconfig entry dependency as it takes on
the ESWITCH dependency by one level of indirection which is
what the 'net' conflicting change is trying to ensure.

Signed-off-by: David S. Miller <davem@davemloft.net>

+1495 -834
+13 -8
Documentation/process/coding-style.rst
··· 84 84 Coding style is all about readability and maintainability using commonly 85 85 available tools. 86 86 87 - The limit on the length of lines is 80 columns and this is a strongly 88 - preferred limit. 87 + The preferred limit on the length of a single line is 80 columns. 89 88 90 - Statements longer than 80 columns will be broken into sensible chunks, unless 91 - exceeding 80 columns significantly increases readability and does not hide 92 - information. Descendants are always substantially shorter than the parent and 93 - are placed substantially to the right. The same applies to function headers 94 - with a long argument list. However, never break user-visible strings such as 95 - printk messages, because that breaks the ability to grep for them. 89 + Statements longer than 80 columns should be broken into sensible chunks, 90 + unless exceeding 80 columns significantly increases readability and does 91 + not hide information. 92 + 93 + Descendants are always substantially shorter than the parent and are 94 + are placed substantially to the right. A very commonly used style 95 + is to align descendants to a function open parenthesis. 96 + 97 + These same rules are applied to function headers with a long argument list. 98 + 99 + However, never break user-visible strings such as printk messages because 100 + that breaks the ability to grep for them. 96 101 97 102 98 103 3) Placing Braces and Spaces
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 7 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc6 5 + EXTRAVERSION = -rc7 6 6 NAME = Kleptomaniac Octopus 7 7 8 8 # *DOCUMENTATION*
+1 -1
arch/arm/boot/compressed/vmlinux.lds.S
··· 42 42 } 43 43 .table : ALIGN(4) { 44 44 _table_start = .; 45 - LONG(ZIMAGE_MAGIC(2)) 45 + LONG(ZIMAGE_MAGIC(4)) 46 46 LONG(ZIMAGE_MAGIC(0x5a534c4b)) 47 47 LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start)) 48 48 LONG(ZIMAGE_MAGIC(_kernel_bss_size))
+1 -1
arch/arm/boot/dts/am437x-gp-evm.dts
··· 943 943 944 944 &cpsw_emac0 { 945 945 phy-handle = <&ethphy0>; 946 - phy-mode = "rgmii"; 946 + phy-mode = "rgmii-rxid"; 947 947 }; 948 948 949 949 &elm {
+1 -1
arch/arm/boot/dts/am437x-idk-evm.dts
··· 504 504 505 505 &cpsw_emac0 { 506 506 phy-handle = <&ethphy0>; 507 - phy-mode = "rgmii"; 507 + phy-mode = "rgmii-rxid"; 508 508 }; 509 509 510 510 &rtc {
+2 -2
arch/arm/boot/dts/am437x-sk-evm.dts
··· 833 833 834 834 &cpsw_emac0 { 835 835 phy-handle = <&ethphy0>; 836 - phy-mode = "rgmii"; 836 + phy-mode = "rgmii-rxid"; 837 837 dual_emac_res_vlan = <1>; 838 838 }; 839 839 840 840 &cpsw_emac1 { 841 841 phy-handle = <&ethphy1>; 842 - phy-mode = "rgmii"; 842 + phy-mode = "rgmii-rxid"; 843 843 dual_emac_res_vlan = <2>; 844 844 }; 845 845
+2 -2
arch/arm/boot/dts/am571x-idk.dts
··· 190 190 191 191 &cpsw_port1 { 192 192 phy-handle = <&ethphy0_sw>; 193 - phy-mode = "rgmii"; 193 + phy-mode = "rgmii-rxid"; 194 194 ti,dual-emac-pvid = <1>; 195 195 }; 196 196 197 197 &cpsw_port2 { 198 198 phy-handle = <&ethphy1_sw>; 199 - phy-mode = "rgmii"; 199 + phy-mode = "rgmii-rxid"; 200 200 ti,dual-emac-pvid = <2>; 201 201 }; 202 202
+2 -2
arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
··· 433 433 434 434 &cpsw_emac0 { 435 435 phy-handle = <&phy0>; 436 - phy-mode = "rgmii"; 436 + phy-mode = "rgmii-rxid"; 437 437 dual_emac_res_vlan = <1>; 438 438 }; 439 439 440 440 &cpsw_emac1 { 441 441 phy-handle = <&phy1>; 442 - phy-mode = "rgmii"; 442 + phy-mode = "rgmii-rxid"; 443 443 dual_emac_res_vlan = <2>; 444 444 }; 445 445
+2 -2
arch/arm/boot/dts/am57xx-idk-common.dtsi
··· 408 408 409 409 &cpsw_emac0 { 410 410 phy-handle = <&ethphy0>; 411 - phy-mode = "rgmii"; 411 + phy-mode = "rgmii-rxid"; 412 412 dual_emac_res_vlan = <1>; 413 413 }; 414 414 415 415 &cpsw_emac1 { 416 416 phy-handle = <&ethphy1>; 417 - phy-mode = "rgmii"; 417 + phy-mode = "rgmii-rxid"; 418 418 dual_emac_res_vlan = <2>; 419 419 }; 420 420
+3 -3
arch/arm/boot/dts/bcm-hr2.dtsi
··· 75 75 timer@20200 { 76 76 compatible = "arm,cortex-a9-global-timer"; 77 77 reg = <0x20200 0x100>; 78 - interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 78 + interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>; 79 79 clocks = <&periph_clk>; 80 80 }; 81 81 ··· 83 83 compatible = "arm,cortex-a9-twd-timer"; 84 84 reg = <0x20600 0x20>; 85 85 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | 86 - IRQ_TYPE_LEVEL_HIGH)>; 86 + IRQ_TYPE_EDGE_RISING)>; 87 87 clocks = <&periph_clk>; 88 88 }; 89 89 ··· 91 91 compatible = "arm,cortex-a9-twd-wdt"; 92 92 reg = <0x20620 0x20>; 93 93 interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | 94 - IRQ_TYPE_LEVEL_HIGH)>; 94 + IRQ_TYPE_EDGE_RISING)>; 95 95 clocks = <&periph_clk>; 96 96 }; 97 97
+1 -1
arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
··· 24 24 25 25 leds { 26 26 act { 27 - gpios = <&gpio 47 GPIO_ACTIVE_HIGH>; 27 + gpios = <&gpio 47 GPIO_ACTIVE_LOW>; 28 28 }; 29 29 }; 30 30
+1 -1
arch/arm/boot/dts/dm814x.dtsi
··· 693 693 694 694 davinci_mdio: mdio@800 { 695 695 compatible = "ti,cpsw-mdio", "ti,davinci_mdio"; 696 - clocks = <&alwon_ethernet_clkctrl DM814_ETHERNET_CPGMAC0_CLKCTRL 0>; 696 + clocks = <&cpsw_125mhz_gclk>; 697 697 clock-names = "fck"; 698 698 #address-cells = <1>; 699 699 #size-cells = <0>;
-7
arch/arm/boot/dts/imx6q-b450v3.dts
··· 65 65 }; 66 66 }; 67 67 68 - &clks { 69 - assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, 70 - <&clks IMX6QDL_CLK_LDB_DI1_SEL>; 71 - assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>, 72 - <&clks IMX6QDL_CLK_PLL3_USB_OTG>; 73 - }; 74 - 75 68 &ldb { 76 69 status = "okay"; 77 70
-7
arch/arm/boot/dts/imx6q-b650v3.dts
··· 65 65 }; 66 66 }; 67 67 68 - &clks { 69 - assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, 70 - <&clks IMX6QDL_CLK_LDB_DI1_SEL>; 71 - assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>, 72 - <&clks IMX6QDL_CLK_PLL3_USB_OTG>; 73 - }; 74 - 75 68 &ldb { 76 69 status = "okay"; 77 70
-11
arch/arm/boot/dts/imx6q-b850v3.dts
··· 53 53 }; 54 54 }; 55 55 56 - &clks { 57 - assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, 58 - <&clks IMX6QDL_CLK_LDB_DI1_SEL>, 59 - <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>, 60 - <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>; 61 - assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, 62 - <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, 63 - <&clks IMX6QDL_CLK_PLL2_PFD2_396M>, 64 - <&clks IMX6QDL_CLK_PLL2_PFD2_396M>; 65 - }; 66 - 67 56 &ldb { 68 57 fsl,dual-channel; 69 58 status = "okay";
+15
arch/arm/boot/dts/imx6q-bx50v3.dtsi
··· 377 377 #interrupt-cells = <1>; 378 378 }; 379 379 }; 380 + 381 + &clks { 382 + assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, 383 + <&clks IMX6QDL_CLK_LDB_DI1_SEL>, 384 + <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>, 385 + <&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>, 386 + <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>, 387 + <&clks IMX6QDL_CLK_IPU2_DI1_PRE_SEL>; 388 + assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, 389 + <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, 390 + <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, 391 + <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, 392 + <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, 393 + <&clks IMX6QDL_CLK_PLL2_PFD0_352M>; 394 + };
+6 -6
arch/arm/boot/dts/mmp3-dell-ariel.dts
··· 98 98 status = "okay"; 99 99 }; 100 100 101 - &ssp3 { 101 + &ssp1 { 102 102 status = "okay"; 103 - cs-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>; 103 + cs-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; 104 104 105 105 firmware-flash@0 { 106 - compatible = "st,m25p80", "jedec,spi-nor"; 106 + compatible = "winbond,w25q32", "jedec,spi-nor"; 107 107 reg = <0>; 108 - spi-max-frequency = <40000000>; 108 + spi-max-frequency = <104000000>; 109 109 m25p,fast-read; 110 110 }; 111 111 }; 112 112 113 - &ssp4 { 114 - cs-gpios = <&gpio 56 GPIO_ACTIVE_HIGH>; 113 + &ssp2 { 114 + cs-gpios = <&gpio 56 GPIO_ACTIVE_LOW>; 115 115 status = "okay"; 116 116 };
+3 -5
arch/arm/boot/dts/mmp3.dtsi
··· 202 202 }; 203 203 204 204 hsic_phy0: hsic-phy@f0001800 { 205 - compatible = "marvell,mmp3-hsic-phy", 206 - "usb-nop-xceiv"; 205 + compatible = "marvell,mmp3-hsic-phy"; 207 206 reg = <0xf0001800 0x40>; 208 207 #phy-cells = <0>; 209 208 status = "disabled"; ··· 223 224 }; 224 225 225 226 hsic_phy1: hsic-phy@f0002800 { 226 - compatible = "marvell,mmp3-hsic-phy", 227 - "usb-nop-xceiv"; 227 + compatible = "marvell,mmp3-hsic-phy"; 228 228 reg = <0xf0002800 0x40>; 229 229 #phy-cells = <0>; 230 230 status = "disabled"; ··· 529 531 }; 530 532 531 533 soc_clocks: clocks@d4050000 { 532 - compatible = "marvell,mmp2-clock"; 534 + compatible = "marvell,mmp3-clock"; 533 535 reg = <0xd4050000 0x1000>, 534 536 <0xd4282800 0x400>, 535 537 <0xd4015000 0x1000>;
+1 -74
arch/arm/include/asm/assembler.h
··· 18 18 #endif 19 19 20 20 #include <asm/ptrace.h> 21 - #include <asm/domain.h> 22 21 #include <asm/opcodes-virt.h> 23 22 #include <asm/asm-offsets.h> 24 23 #include <asm/page.h> 25 24 #include <asm/thread_info.h> 25 + #include <asm/uaccess-asm.h> 26 26 27 27 #define IOMEM(x) (x) 28 28 ··· 444 444 \name: 445 445 .asciz "\string" 446 446 .size \name , . - \name 447 - .endm 448 - 449 - .macro csdb 450 - #ifdef CONFIG_THUMB2_KERNEL 451 - .inst.w 0xf3af8014 452 - #else 453 - .inst 0xe320f014 454 - #endif 455 - .endm 456 - 457 - .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req 458 - #ifndef CONFIG_CPU_USE_DOMAINS 459 - adds \tmp, \addr, #\size - 1 460 - sbcscc \tmp, \tmp, \limit 461 - bcs \bad 462 - #ifdef CONFIG_CPU_SPECTRE 463 - movcs \addr, #0 464 - csdb 465 - #endif 466 - #endif 467 - .endm 468 - 469 - .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req 470 - #ifdef CONFIG_CPU_SPECTRE 471 - sub \tmp, \limit, #1 472 - subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr 473 - addhs \tmp, \tmp, #1 @ if (tmp >= 0) { 474 - subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) } 475 - movlo \addr, #0 @ if (tmp < 0) addr = NULL 476 - csdb 477 - #endif 478 - .endm 479 - 480 - .macro uaccess_disable, tmp, isb=1 481 - #ifdef CONFIG_CPU_SW_DOMAIN_PAN 482 - /* 483 - * Whenever we re-enter userspace, the domains should always be 484 - * set appropriately. 485 - */ 486 - mov \tmp, #DACR_UACCESS_DISABLE 487 - mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register 488 - .if \isb 489 - instr_sync 490 - .endif 491 - #endif 492 - .endm 493 - 494 - .macro uaccess_enable, tmp, isb=1 495 - #ifdef CONFIG_CPU_SW_DOMAIN_PAN 496 - /* 497 - * Whenever we re-enter userspace, the domains should always be 498 - * set appropriately. 499 - */ 500 - mov \tmp, #DACR_UACCESS_ENABLE 501 - mcr p15, 0, \tmp, c3, c0, 0 502 - .if \isb 503 - instr_sync 504 - .endif 505 - #endif 506 - .endm 507 - 508 - .macro uaccess_save, tmp 509 - #ifdef CONFIG_CPU_SW_DOMAIN_PAN 510 - mrc p15, 0, \tmp, c3, c0, 0 511 - str \tmp, [sp, #SVC_DACR] 512 - #endif 513 - .endm 514 - 515 - .macro uaccess_restore 516 - #ifdef CONFIG_CPU_SW_DOMAIN_PAN 517 - ldr r0, [sp, #SVC_DACR] 518 - mcr p15, 0, r0, c3, c0, 0 519 - #endif 520 447 .endm 521 448 522 449 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
+117
arch/arm/include/asm/uaccess-asm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + 3 + #ifndef __ASM_UACCESS_ASM_H__ 4 + #define __ASM_UACCESS_ASM_H__ 5 + 6 + #include <asm/asm-offsets.h> 7 + #include <asm/domain.h> 8 + #include <asm/memory.h> 9 + #include <asm/thread_info.h> 10 + 11 + .macro csdb 12 + #ifdef CONFIG_THUMB2_KERNEL 13 + .inst.w 0xf3af8014 14 + #else 15 + .inst 0xe320f014 16 + #endif 17 + .endm 18 + 19 + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req 20 + #ifndef CONFIG_CPU_USE_DOMAINS 21 + adds \tmp, \addr, #\size - 1 22 + sbcscc \tmp, \tmp, \limit 23 + bcs \bad 24 + #ifdef CONFIG_CPU_SPECTRE 25 + movcs \addr, #0 26 + csdb 27 + #endif 28 + #endif 29 + .endm 30 + 31 + .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req 32 + #ifdef CONFIG_CPU_SPECTRE 33 + sub \tmp, \limit, #1 34 + subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr 35 + addhs \tmp, \tmp, #1 @ if (tmp >= 0) { 36 + subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) } 37 + movlo \addr, #0 @ if (tmp < 0) addr = NULL 38 + csdb 39 + #endif 40 + .endm 41 + 42 + .macro uaccess_disable, tmp, isb=1 43 + #ifdef CONFIG_CPU_SW_DOMAIN_PAN 44 + /* 45 + * Whenever we re-enter userspace, the domains should always be 46 + * set appropriately. 47 + */ 48 + mov \tmp, #DACR_UACCESS_DISABLE 49 + mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register 50 + .if \isb 51 + instr_sync 52 + .endif 53 + #endif 54 + .endm 55 + 56 + .macro uaccess_enable, tmp, isb=1 57 + #ifdef CONFIG_CPU_SW_DOMAIN_PAN 58 + /* 59 + * Whenever we re-enter userspace, the domains should always be 60 + * set appropriately. 61 + */ 62 + mov \tmp, #DACR_UACCESS_ENABLE 63 + mcr p15, 0, \tmp, c3, c0, 0 64 + .if \isb 65 + instr_sync 66 + .endif 67 + #endif 68 + .endm 69 + 70 + #if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS) 71 + #define DACR(x...) x 72 + #else 73 + #define DACR(x...) 74 + #endif 75 + 76 + /* 77 + * Save the address limit on entry to a privileged exception. 78 + * 79 + * If we are using the DACR for kernel access by the user accessors 80 + * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain 81 + * back to client mode, whether or not \disable is set. 82 + * 83 + * If we are using SW PAN, set the DACR user domain to no access 84 + * if \disable is set. 85 + */ 86 + .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable 87 + ldr \tmp1, [\tsk, #TI_ADDR_LIMIT] 88 + mov \tmp2, #TASK_SIZE 89 + str \tmp2, [\tsk, #TI_ADDR_LIMIT] 90 + DACR( mrc p15, 0, \tmp0, c3, c0, 0) 91 + DACR( str \tmp0, [sp, #SVC_DACR]) 92 + str \tmp1, [sp, #SVC_ADDR_LIMIT] 93 + .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN) 94 + /* kernel=client, user=no access */ 95 + mov \tmp2, #DACR_UACCESS_DISABLE 96 + mcr p15, 0, \tmp2, c3, c0, 0 97 + instr_sync 98 + .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS) 99 + /* kernel=client */ 100 + bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL) 101 + orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) 102 + mcr p15, 0, \tmp2, c3, c0, 0 103 + instr_sync 104 + .endif 105 + .endm 106 + 107 + /* Restore the user access state previously saved by uaccess_entry */ 108 + .macro uaccess_exit, tsk, tmp0, tmp1 109 + ldr \tmp1, [sp, #SVC_ADDR_LIMIT] 110 + DACR( ldr \tmp0, [sp, #SVC_DACR]) 111 + str \tmp1, [\tsk, #TI_ADDR_LIMIT] 112 + DACR( mcr p15, 0, \tmp0, c3, c0, 0) 113 + .endm 114 + 115 + #undef DACR 116 + 117 + #endif /* __ASM_UACCESS_ASM_H__ */
+1 -1
arch/arm/kernel/atags_proc.c
··· 42 42 size_t size; 43 43 44 44 if (tag->hdr.tag != ATAG_CORE) { 45 - pr_info("No ATAGs?"); 45 + pr_info("No ATAGs?\n"); 46 46 return -EINVAL; 47 47 } 48 48
+2 -9
arch/arm/kernel/entry-armv.S
··· 27 27 #include <asm/unistd.h> 28 28 #include <asm/tls.h> 29 29 #include <asm/system_info.h> 30 + #include <asm/uaccess-asm.h> 30 31 31 32 #include "entry-header.S" 32 33 #include <asm/entry-macro-multi.S> ··· 180 179 stmia r7, {r2 - r6} 181 180 182 181 get_thread_info tsk 183 - ldr r0, [tsk, #TI_ADDR_LIMIT] 184 - mov r1, #TASK_SIZE 185 - str r1, [tsk, #TI_ADDR_LIMIT] 186 - str r0, [sp, #SVC_ADDR_LIMIT] 187 - 188 - uaccess_save r0 189 - .if \uaccess 190 - uaccess_disable r0 191 - .endif 182 + uaccess_entry tsk, r0, r1, r2, \uaccess 192 183 193 184 .if \trace 194 185 #ifdef CONFIG_TRACE_IRQFLAGS
+3 -6
arch/arm/kernel/entry-header.S
··· 6 6 #include <asm/asm-offsets.h> 7 7 #include <asm/errno.h> 8 8 #include <asm/thread_info.h> 9 + #include <asm/uaccess-asm.h> 9 10 #include <asm/v7m.h> 10 11 11 12 @ Bad Abort numbers ··· 218 217 blne trace_hardirqs_off 219 218 #endif 220 219 .endif 221 - ldr r1, [sp, #SVC_ADDR_LIMIT] 222 - uaccess_restore 223 - str r1, [tsk, #TI_ADDR_LIMIT] 220 + uaccess_exit tsk, r0, r1 224 221 225 222 #ifndef CONFIG_THUMB2_KERNEL 226 223 @ ARM mode SVC restore ··· 262 263 @ on the stack remains correct). 263 264 @ 264 265 .macro svc_exit_via_fiq 265 - ldr r1, [sp, #SVC_ADDR_LIMIT] 266 - uaccess_restore 267 - str r1, [tsk, #TI_ADDR_LIMIT] 266 + uaccess_exit tsk, r0, r1 268 267 #ifndef CONFIG_THUMB2_KERNEL 269 268 @ ARM mode restore 270 269 mov r0, sp
+2 -2
arch/arm/kernel/ptrace.c
··· 219 219 }; 220 220 221 221 static struct undef_hook thumb_break_hook = { 222 - .instr_mask = 0xffff, 223 - .instr_val = 0xde01, 222 + .instr_mask = 0xffffffff, 223 + .instr_val = 0x0000de01, 224 224 .cpsr_mask = PSR_T_BIT, 225 225 .cpsr_val = PSR_T_BIT, 226 226 .fn = break_trap,
+2 -2
arch/arm64/boot/dts/mediatek/mt8173.dtsi
··· 1402 1402 "venc_lt_sel"; 1403 1403 assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>, 1404 1404 <&topckgen CLK_TOP_VENC_LT_SEL>; 1405 - assigned-clock-parents = <&topckgen CLK_TOP_VENCPLL_D2>, 1406 - <&topckgen CLK_TOP_UNIVPLL1_D2>; 1405 + assigned-clock-parents = <&topckgen CLK_TOP_VCODECPLL>, 1406 + <&topckgen CLK_TOP_VCODECPLL_370P5>; 1407 1407 }; 1408 1408 1409 1409 jpegdec: jpegdec@18004000 {
+1 -1
arch/arm64/kernel/smp.c
··· 176 176 panic("CPU%u detected unsupported configuration\n", cpu); 177 177 } 178 178 179 - return ret; 179 + return -EIO; 180 180 } 181 181 182 182 static void init_gic_priority_masking(void)
-6
arch/csky/abiv1/inc/abi/entry.h
··· 80 80 .endm 81 81 82 82 .macro RESTORE_ALL 83 - psrclr ie 84 83 ldw lr, (sp, 4) 85 84 ldw a0, (sp, 8) 86 85 mtcr a0, epc ··· 173 174 174 175 movi r6, 0 175 176 cpwcr r6, cpcr31 176 - .endm 177 - 178 - .macro ANDI_R3 rx, imm 179 - lsri \rx, 3 180 - andi \rx, (\imm >> 3) 181 177 .endm 182 178 #endif /* __ASM_CSKY_ENTRY_H */
+2 -6
arch/csky/abiv2/inc/abi/entry.h
··· 13 13 #define LSAVE_A1 28 14 14 #define LSAVE_A2 32 15 15 #define LSAVE_A3 36 16 + #define LSAVE_A4 40 17 + #define LSAVE_A5 44 16 18 17 19 #define KSPTOUSP 18 20 #define USPTOKSP ··· 65 63 .endm 66 64 67 65 .macro RESTORE_ALL 68 - psrclr ie 69 66 ldw tls, (sp, 0) 70 67 ldw lr, (sp, 4) 71 68 ldw a0, (sp, 8) ··· 301 300 302 301 jmpi 3f /* jump to va */ 303 302 3: 304 - .endm 305 - 306 - .macro ANDI_R3 rx, imm 307 - lsri \rx, 3 308 - andi \rx, (\imm >> 3) 309 303 .endm 310 304 #endif /* __ASM_CSKY_ENTRY_H */
+6
arch/csky/include/asm/thread_info.h
··· 81 81 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 82 82 #define _TIF_SECCOMP (1 << TIF_SECCOMP) 83 83 84 + #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ 85 + _TIF_NOTIFY_RESUME | _TIF_UPROBE) 86 + 87 + #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ 88 + _TIF_SYSCALL_TRACEPOINT) 89 + 84 90 #endif /* _ASM_CSKY_THREAD_INFO_H */
+58 -59
arch/csky/kernel/entry.S
··· 128 128 ENTRY(csky_systemcall) 129 129 SAVE_ALL TRAP0_SIZE 130 130 zero_fp 131 - #ifdef CONFIG_RSEQ_DEBUG 132 - mov a0, sp 133 - jbsr rseq_syscall 134 - #endif 135 131 psrset ee, ie 136 132 137 - lrw r11, __NR_syscalls 138 - cmphs syscallid, r11 /* Check nr of syscall */ 139 - bt ret_from_exception 133 + lrw r9, __NR_syscalls 134 + cmphs syscallid, r9 /* Check nr of syscall */ 135 + bt 1f 140 136 141 - lrw r13, sys_call_table 142 - ixw r13, syscallid 143 - ldw r11, (r13) 144 - cmpnei r11, 0 137 + lrw r9, sys_call_table 138 + ixw r9, syscallid 139 + ldw syscallid, (r9) 140 + cmpnei syscallid, 0 145 141 bf ret_from_exception 146 142 147 143 mov r9, sp 148 144 bmaski r10, THREAD_SHIFT 149 145 andn r9, r10 150 - ldw r12, (r9, TINFO_FLAGS) 151 - ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) 152 - cmpnei r12, 0 146 + ldw r10, (r9, TINFO_FLAGS) 147 + lrw r9, _TIF_SYSCALL_WORK 148 + and r10, r9 149 + cmpnei r10, 0 153 150 bt csky_syscall_trace 154 151 #if defined(__CSKYABIV2__) 155 152 subi sp, 8 156 153 stw r5, (sp, 0x4) 157 154 stw r4, (sp, 0x0) 158 - jsr r11 /* Do system call */ 155 + jsr syscallid /* Do system call */ 159 156 addi sp, 8 160 157 #else 161 - jsr r11 158 + jsr syscallid 162 159 #endif 163 160 stw a0, (sp, LSAVE_A0) /* Save return value */ 161 + 1: 162 + #ifdef CONFIG_DEBUG_RSEQ 163 + mov a0, sp 164 + jbsr rseq_syscall 165 + #endif 164 166 jmpi ret_from_exception 165 167 166 168 csky_syscall_trace: ··· 175 173 ldw a3, (sp, LSAVE_A3) 176 174 #if defined(__CSKYABIV2__) 177 175 subi sp, 8 178 - stw r5, (sp, 0x4) 179 - stw r4, (sp, 0x0) 176 + ldw r9, (sp, LSAVE_A4) 177 + stw r9, (sp, 0x0) 178 + ldw r9, (sp, LSAVE_A5) 179 + stw r9, (sp, 0x4) 180 + jsr syscallid /* Do system call */ 181 + addi sp, 8 180 182 #else 181 183 ldw r6, (sp, LSAVE_A4) 182 184 ldw r7, (sp, LSAVE_A5) 183 - #endif 184 - jsr r11 /* Do system call */ 185 - #if defined(__CSKYABIV2__) 186 - addi sp, 8 185 + jsr syscallid /* Do system call */ 187 186 #endif 188 187 stw a0, (sp, LSAVE_A0) /* Save return value */ 189 188 189 + #ifdef CONFIG_DEBUG_RSEQ 190 + mov a0, sp 191 + jbsr rseq_syscall 192 + #endif 190 193 mov a0, sp /* right now, sp --> pt_regs */ 191 194 jbsr syscall_trace_exit 192 195 br ret_from_exception ··· 207 200 mov r9, sp 208 201 bmaski r10, THREAD_SHIFT 209 202 andn r9, r10 210 - ldw r12, (r9, TINFO_FLAGS) 211 - ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) 212 - cmpnei r12, 0 203 + ldw r10, (r9, TINFO_FLAGS) 204 + lrw r9, _TIF_SYSCALL_WORK 205 + and r10, r9 206 + cmpnei r10, 0 213 207 bf ret_from_exception 214 208 mov a0, sp /* sp = pt_regs pointer */ 215 209 jbsr syscall_trace_exit 216 210 217 211 ret_from_exception: 218 - ld syscallid, (sp, LSAVE_PSR) 219 - btsti syscallid, 31 220 - bt 1f 212 + psrclr ie 213 + ld r9, (sp, LSAVE_PSR) 214 + btsti r9, 31 221 215 216 + bt 1f 222 217 /* 223 218 * Load address of current->thread_info, Then get address of task_struct 224 219 * Get task_needreshed in task_struct ··· 229 220 bmaski r10, THREAD_SHIFT 230 221 andn r9, r10 231 222 232 - ldw r12, (r9, TINFO_FLAGS) 233 - andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | _TIF_UPROBE) 234 - cmpnei r12, 0 223 + ldw r10, (r9, TINFO_FLAGS) 224 + lrw r9, _TIF_WORK_MASK 225 + and r10, r9 226 + cmpnei r10, 0 235 227 bt exit_work 236 228 1: 229 + #ifdef CONFIG_PREEMPTION 230 + mov r9, sp 231 + bmaski r10, THREAD_SHIFT 232 + andn r9, r10 233 + 234 + ldw r10, (r9, TINFO_PREEMPT) 235 + cmpnei r10, 0 236 + bt 2f 237 + jbsr preempt_schedule_irq /* irq en/disable is done inside */ 238 + 2: 239 + #endif 240 + 237 241 #ifdef CONFIG_TRACE_IRQFLAGS 238 242 ld r10, (sp, LSAVE_PSR) 239 243 btsti r10, 6 ··· 257 235 RESTORE_ALL 258 236 259 237 exit_work: 260 - lrw syscallid, ret_from_exception 261 - mov lr, syscallid 238 + lrw r9, ret_from_exception 239 + mov lr, r9 262 240 263 - btsti r12, TIF_NEED_RESCHED 241 + btsti r10, TIF_NEED_RESCHED 264 242 bt work_resched 265 243 244 + psrset ie 266 245 mov a0, sp 267 - mov a1, r12 246 + mov a1, r10 268 247 jmpi do_notify_resume 269 248 270 249 work_resched: ··· 314 291 jbsr trace_hardirqs_off 315 292 #endif 316 293 317 - #ifdef CONFIG_PREEMPTION 318 - mov r9, sp /* Get current stack pointer */ 319 - bmaski r10, THREAD_SHIFT 320 - andn r9, r10 /* Get thread_info */ 321 - 322 - /* 323 - * Get task_struct->stack.preempt_count for current, 324 - * and increase 1. 325 - */ 326 - ldw r12, (r9, TINFO_PREEMPT) 327 - addi r12, 1 328 - stw r12, (r9, TINFO_PREEMPT) 329 - #endif 330 294 331 295 mov a0, sp 332 296 jbsr csky_do_IRQ 333 297 334 - #ifdef CONFIG_PREEMPTION 335 - subi r12, 1 336 - stw r12, (r9, TINFO_PREEMPT) 337 - cmpnei r12, 0 338 - bt 2f 339 - ldw r12, (r9, TINFO_FLAGS) 340 - btsti r12, TIF_NEED_RESCHED 341 - bf 2f 342 - jbsr preempt_schedule_irq /* irq en/disable is done inside */ 343 - #endif 344 - 2: 345 298 jmpi ret_from_exception 346 299 347 300 /*
+1 -1
arch/ia64/include/asm/device.h
··· 6 6 #define _ASM_IA64_DEVICE_H 7 7 8 8 struct dev_archdata { 9 - #ifdef CONFIG_INTEL_IOMMU 9 + #ifdef CONFIG_IOMMU_API 10 10 void *iommu; /* hook for IOMMU specific extension */ 11 11 #endif 12 12 };
+1 -1
arch/parisc/mm/init.c
··· 562 562 > BITS_PER_LONG); 563 563 564 564 high_memory = __va((max_pfn << PAGE_SHIFT)); 565 - set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); 565 + set_max_mapnr(max_low_pfn); 566 566 memblock_free_all(); 567 567 568 568 #ifdef CONFIG_PA11
+1
arch/powerpc/Kconfig
··· 126 126 select ARCH_HAS_MMIOWB if PPC64 127 127 select ARCH_HAS_PHYS_TO_DMA 128 128 select ARCH_HAS_PMEM_API 129 + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 129 130 select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 130 131 select ARCH_HAS_PTE_SPECIAL 131 132 select ARCH_HAS_MEMBARRIER_CALLBACKS
+3
arch/powerpc/kernel/Makefile
··· 162 162 GCOV_PROFILE_kprobes-ftrace.o := n 163 163 KCOV_INSTRUMENT_kprobes-ftrace.o := n 164 164 UBSAN_SANITIZE_kprobes-ftrace.o := n 165 + GCOV_PROFILE_syscall_64.o := n 166 + KCOV_INSTRUMENT_syscall_64.o := n 167 + UBSAN_SANITIZE_syscall_64.o := n 165 168 UBSAN_SANITIZE_vdso.o := n 166 169 167 170 # Necessary for booting with kcov enabled on book3e machines
+2
arch/powerpc/kernel/exceptions-64s.S
··· 2411 2411 GEN_COMMON facility_unavailable 2412 2412 addi r3,r1,STACK_FRAME_OVERHEAD 2413 2413 bl facility_unavailable_exception 2414 + REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2414 2415 b interrupt_return 2415 2416 2416 2417 GEN_KVM facility_unavailable ··· 2441 2440 GEN_COMMON h_facility_unavailable 2442 2441 addi r3,r1,STACK_FRAME_OVERHEAD 2443 2442 bl facility_unavailable_exception 2443 + REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */ 2444 2444 b interrupt_return 2445 2445 2446 2446 GEN_KVM h_facility_unavailable
+1 -1
arch/x86/include/asm/device.h
··· 3 3 #define _ASM_X86_DEVICE_H 4 4 5 5 struct dev_archdata { 6 - #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) 6 + #ifdef CONFIG_IOMMU_API 7 7 void *iommu; /* hook for IOMMU specific extension */ 8 8 #endif 9 9 };
+1 -1
arch/x86/include/asm/dma.h
··· 74 74 #define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT) 75 75 76 76 /* 4GB broken PCI/AGP hardware bus master zone */ 77 - #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) 77 + #define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) 78 78 79 79 #ifdef CONFIG_X86_32 80 80 /* The maximum address that we can perform a DMA transfer to on this platform */
+2 -2
arch/x86/include/asm/io_bitmap.h
··· 17 17 18 18 #ifdef CONFIG_X86_IOPL_IOPERM 19 19 void io_bitmap_share(struct task_struct *tsk); 20 - void io_bitmap_exit(void); 20 + void io_bitmap_exit(struct task_struct *tsk); 21 21 22 22 void native_tss_update_io_bitmap(void); 23 23 ··· 29 29 30 30 #else 31 31 static inline void io_bitmap_share(struct task_struct *tsk) { } 32 - static inline void io_bitmap_exit(void) { } 32 + static inline void io_bitmap_exit(struct task_struct *tsk) { } 33 33 static inline void tss_update_io_bitmap(void) { } 34 34 #endif 35 35
+9 -2
arch/x86/include/uapi/asm/unistd.h
··· 2 2 #ifndef _UAPI_ASM_X86_UNISTD_H 3 3 #define _UAPI_ASM_X86_UNISTD_H 4 4 5 - /* x32 syscall flag bit */ 6 - #define __X32_SYSCALL_BIT 0x40000000UL 5 + /* 6 + * x32 syscall flag bit. Some user programs expect syscall NR macros 7 + * and __X32_SYSCALL_BIT to have type int, even though syscall numbers 8 + * are, for practical purposes, unsigned long. 9 + * 10 + * Fortunately, expressions like (nr & ~__X32_SYSCALL_BIT) do the right 11 + * thing regardless. 12 + */ 13 + #define __X32_SYSCALL_BIT 0x40000000 7 14 8 15 #ifndef __KERNEL__ 9 16 # ifdef __i386__
+48 -38
arch/x86/kernel/fpu/xstate.c
··· 957 957 return true; 958 958 } 959 959 960 - /* 961 - * This is similar to user_regset_copyout(), but will not add offset to 962 - * the source data pointer or increment pos, count, kbuf, and ubuf. 963 - */ 964 - static inline void 965 - __copy_xstate_to_kernel(void *kbuf, const void *data, 966 - unsigned int offset, unsigned int size, unsigned int size_total) 960 + static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count) 967 961 { 968 - if (offset < size_total) { 969 - unsigned int copy = min(size, size_total - offset); 962 + if (*pos < to) { 963 + unsigned size = to - *pos; 970 964 971 - memcpy(kbuf + offset, data, copy); 965 + if (size > *count) 966 + size = *count; 967 + memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size); 968 + *kbuf += size; 969 + *pos += size; 970 + *count -= size; 971 + } 972 + } 973 + 974 + static void copy_part(unsigned offset, unsigned size, void *from, 975 + void **kbuf, unsigned *pos, unsigned *count) 976 + { 977 + fill_gap(offset, kbuf, pos, count); 978 + if (size > *count) 979 + size = *count; 980 + if (size) { 981 + memcpy(*kbuf, from, size); 982 + *kbuf += size; 983 + *pos += size; 984 + *count -= size; 972 985 } 973 986 } 974 987 ··· 994 981 */ 995 982 int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total) 996 983 { 997 - unsigned int offset, size; 998 984 struct xstate_header header; 985 + const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr); 986 + unsigned count = size_total; 999 987 int i; 1000 988 1001 989 /* ··· 1012 998 header.xfeatures = xsave->header.xfeatures; 1013 999 header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR; 1014 1000 1001 + if (header.xfeatures & XFEATURE_MASK_FP) 1002 + copy_part(0, off_mxcsr, 1003 + &xsave->i387, &kbuf, &offset_start, &count); 1004 + if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)) 1005 + copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE, 1006 + &xsave->i387.mxcsr, &kbuf, &offset_start, &count); 1007 + if (header.xfeatures & XFEATURE_MASK_FP) 1008 + copy_part(offsetof(struct fxregs_state, st_space), 128, 1009 + &xsave->i387.st_space, &kbuf, &offset_start, &count); 1010 + if (header.xfeatures & XFEATURE_MASK_SSE) 1011 + copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256, 1012 + &xsave->i387.xmm_space, &kbuf, &offset_start, &count); 1013 + /* 1014 + * Fill xsave->i387.sw_reserved value for ptrace frame: 1015 + */ 1016 + copy_part(offsetof(struct fxregs_state, sw_reserved), 48, 1017 + xstate_fx_sw_bytes, &kbuf, &offset_start, &count); 1015 1018 /* 1016 1019 * Copy xregs_state->header: 1017 1020 */ 1018 - offset = offsetof(struct xregs_state, header); 1019 - size = sizeof(header); 1021 + copy_part(offsetof(struct xregs_state, header), sizeof(header), 1022 + &header, &kbuf, &offset_start, &count); 1020 1023 1021 - __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total); 1022 - 1023 - for (i = 0; i < XFEATURE_MAX; i++) { 1024 + for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { 1024 1025 /* 1025 1026 * Copy only in-use xstates: 1026 1027 */ 1027 1028 if ((header.xfeatures >> i) & 1) { 1028 1029 void *src = __raw_xsave_addr(xsave, i); 1029 1030 1030 - offset = xstate_offsets[i]; 1031 - size = xstate_sizes[i]; 1032 - 1033 - /* The next component has to fit fully into the output buffer: */ 1034 - if (offset + size > size_total) 1035 - break; 1036 - 1037 - __copy_xstate_to_kernel(kbuf, src, offset, size, size_total); 1031 + copy_part(xstate_offsets[i], xstate_sizes[i], 1032 + src, &kbuf, &offset_start, &count); 1038 1033 } 1039 1034 1040 1035 } 1041 - 1042 - if (xfeatures_mxcsr_quirk(header.xfeatures)) { 1043 - offset = offsetof(struct fxregs_state, mxcsr); 1044 - size = MXCSR_AND_FLAGS_SIZE; 1045 - __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total); 1046 - } 1047 - 1048 - /* 1049 - * Fill xsave->i387.sw_reserved value for ptrace frame: 1050 - */ 1051 - offset = offsetof(struct fxregs_state, sw_reserved); 1052 - size = sizeof(xstate_fx_sw_bytes); 1053 - 1054 - __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total); 1036 + fill_gap(size_total, &kbuf, &offset_start, &count); 1055 1037 1056 1038 return 0; 1057 1039 }
+11 -11
arch/x86/kernel/ioport.c
··· 33 33 set_tsk_thread_flag(tsk, TIF_IO_BITMAP); 34 34 } 35 35 36 - static void task_update_io_bitmap(void) 36 + static void task_update_io_bitmap(struct task_struct *tsk) 37 37 { 38 - struct thread_struct *t = &current->thread; 38 + struct thread_struct *t = &tsk->thread; 39 39 40 40 if (t->iopl_emul == 3 || t->io_bitmap) { 41 41 /* TSS update is handled on exit to user space */ 42 - set_thread_flag(TIF_IO_BITMAP); 42 + set_tsk_thread_flag(tsk, TIF_IO_BITMAP); 43 43 } else { 44 - clear_thread_flag(TIF_IO_BITMAP); 44 + clear_tsk_thread_flag(tsk, TIF_IO_BITMAP); 45 45 /* Invalidate TSS */ 46 46 preempt_disable(); 47 47 tss_update_io_bitmap(); ··· 49 49 } 50 50 } 51 51 52 - void io_bitmap_exit(void) 52 + void io_bitmap_exit(struct task_struct *tsk) 53 53 { 54 - struct io_bitmap *iobm = current->thread.io_bitmap; 54 + struct io_bitmap *iobm = tsk->thread.io_bitmap; 55 55 56 - current->thread.io_bitmap = NULL; 57 - task_update_io_bitmap(); 56 + tsk->thread.io_bitmap = NULL; 57 + task_update_io_bitmap(tsk); 58 58 if (iobm && refcount_dec_and_test(&iobm->refcnt)) 59 59 kfree(iobm); 60 60 } ··· 102 102 if (!iobm) 103 103 return -ENOMEM; 104 104 refcount_set(&iobm->refcnt, 1); 105 - io_bitmap_exit(); 105 + io_bitmap_exit(current); 106 106 } 107 107 108 108 /* ··· 134 134 } 135 135 /* All permissions dropped? */ 136 136 if (max_long == UINT_MAX) { 137 - io_bitmap_exit(); 137 + io_bitmap_exit(current); 138 138 return 0; 139 139 } 140 140 ··· 192 192 } 193 193 194 194 t->iopl_emul = level; 195 - task_update_io_bitmap(); 195 + task_update_io_bitmap(current); 196 196 197 197 return 0; 198 198 }
+2 -2
arch/x86/kernel/process.c
··· 96 96 } 97 97 98 98 /* 99 - * Free current thread data structures etc.. 99 + * Free thread data structures etc.. 100 100 */ 101 101 void exit_thread(struct task_struct *tsk) 102 102 { ··· 104 104 struct fpu *fpu = &t->fpu; 105 105 106 106 if (test_thread_flag(TIF_IO_BITMAP)) 107 - io_bitmap_exit(); 107 + io_bitmap_exit(tsk); 108 108 109 109 free_vm86(t); 110 110
+4 -7
block/blk-core.c
··· 891 891 } 892 892 893 893 /* 894 - * Non-mq queues do not honor REQ_NOWAIT, so complete a bio 895 - * with BLK_STS_AGAIN status in order to catch -EAGAIN and 896 - * to give a chance to the caller to repeat request gracefully. 894 + * For a REQ_NOWAIT based request, return -EOPNOTSUPP 895 + * if queue is not a request based queue. 897 896 */ 898 - if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) { 899 - status = BLK_STS_AGAIN; 900 - goto end_io; 901 - } 897 + if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) 898 + goto not_supported; 902 899 903 900 if (should_fail_bio(bio)) 904 901 goto end_io;
+1
drivers/clk/qcom/Kconfig
··· 377 377 378 378 config SM_GCC_8250 379 379 tristate "SM8250 Global Clock Controller" 380 + select QCOM_GDSC 380 381 help 381 382 Support for the global clock controller on SM8250 devices. 382 383 Say Y if you want to use peripheral devices such as UART,
+1 -2
drivers/clk/qcom/gcc-sm8150.c
··· 76 76 .clkr.hw.init = &(struct clk_init_data){ 77 77 .name = "gpll0_out_even", 78 78 .parent_data = &(const struct clk_parent_data){ 79 - .fw_name = "bi_tcxo", 80 - .name = "bi_tcxo", 79 + .hw = &gpll0.clkr.hw, 81 80 }, 82 81 .num_parents = 1, 83 82 .ops = &clk_trion_pll_postdiv_ops,
+1 -1
drivers/crypto/chelsio/chtls/chtls_io.c
··· 682 682 make_tx_data_wr(sk, skb, immdlen, len, 683 683 credits_needed, completion); 684 684 tp->snd_nxt += len; 685 - tp->lsndtime = tcp_time_stamp(tp); 685 + tp->lsndtime = tcp_jiffies32; 686 686 if (completion) 687 687 ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR; 688 688 } else {
+1 -1
drivers/gpio/gpio-bcm-kona.c
··· 625 625 626 626 kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0); 627 627 if (IS_ERR(kona_gpio->reg_base)) { 628 - ret = -ENXIO; 628 + ret = PTR_ERR(kona_gpio->reg_base); 629 629 goto err_irq_domain; 630 630 } 631 631
+5 -2
drivers/gpio/gpio-exar.c
··· 148 148 mutex_init(&exar_gpio->lock); 149 149 150 150 index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); 151 - if (index < 0) 152 - goto err_destroy; 151 + if (index < 0) { 152 + ret = index; 153 + goto err_mutex_destroy; 154 + } 153 155 154 156 sprintf(exar_gpio->name, "exar_gpio%d", index); 155 157 exar_gpio->gpio_chip.label = exar_gpio->name; ··· 178 176 179 177 err_destroy: 180 178 ida_simple_remove(&ida_index, index); 179 + err_mutex_destroy: 181 180 mutex_destroy(&exar_gpio->lock); 182 181 return ret; 183 182 }
+3 -3
drivers/gpio/gpio-mlxbf2.c
··· 127 127 { 128 128 u32 arm_gpio_lock_val; 129 129 130 - spin_lock(&gs->gc.bgpio_lock); 131 130 mutex_lock(yu_arm_gpio_lock_param.lock); 131 + spin_lock(&gs->gc.bgpio_lock); 132 132 133 133 arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io); 134 134 ··· 136 136 * When lock active bit[31] is set, ModeX is write enabled 137 137 */ 138 138 if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) { 139 - mutex_unlock(yu_arm_gpio_lock_param.lock); 140 139 spin_unlock(&gs->gc.bgpio_lock); 140 + mutex_unlock(yu_arm_gpio_lock_param.lock); 141 141 return -EINVAL; 142 142 } 143 143 ··· 152 152 static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs) 153 153 { 154 154 writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io); 155 - mutex_unlock(yu_arm_gpio_lock_param.lock); 156 155 spin_unlock(&gs->gc.bgpio_lock); 156 + mutex_unlock(yu_arm_gpio_lock_param.lock); 157 157 } 158 158 159 159 /*
+9 -6
drivers/gpio/gpio-mvebu.c
··· 782 782 "marvell,armada-370-gpio")) 783 783 return 0; 784 784 785 + /* 786 + * There are only two sets of PWM configuration registers for 787 + * all the GPIO lines on those SoCs which this driver reserves 788 + * for the first two GPIO chips. So if the resource is missing 789 + * we can't treat it as an error. 790 + */ 791 + if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm")) 792 + return 0; 793 + 785 794 if (IS_ERR(mvchip->clk)) 786 795 return PTR_ERR(mvchip->clk); 787 796 ··· 813 804 mvchip->mvpwm = mvpwm; 814 805 mvpwm->mvchip = mvchip; 815 806 816 - /* 817 - * There are only two sets of PWM configuration registers for 818 - * all the GPIO lines on those SoCs which this driver reserves 819 - * for the first two GPIO chips. So if the resource is missing 820 - * we can't treat it as an error. 821 - */ 822 807 mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm"); 823 808 if (IS_ERR(mvpwm->membase)) 824 809 return PTR_ERR(mvpwm->membase);
+2 -2
drivers/gpio/gpio-pxa.c
··· 660 660 pchip->irq1 = irq1; 661 661 662 662 gpio_reg_base = devm_platform_ioremap_resource(pdev, 0); 663 - if (!gpio_reg_base) 664 - return -EINVAL; 663 + if (IS_ERR(gpio_reg_base)) 664 + return PTR_ERR(gpio_reg_base); 665 665 666 666 clk = clk_get(&pdev->dev, NULL); 667 667 if (IS_ERR(clk)) {
+22 -4
drivers/gpio/gpiolib.c
··· 729 729 if (ret) 730 730 goto out_free_descs; 731 731 } 732 + 733 + atomic_notifier_call_chain(&desc->gdev->notifier, 734 + GPIOLINE_CHANGED_REQUESTED, desc); 735 + 732 736 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 733 737 offset); 734 738 } ··· 1086 1082 ret = gpiod_direction_input(desc); 1087 1083 if (ret) 1088 1084 goto out_free_desc; 1085 + 1086 + atomic_notifier_call_chain(&desc->gdev->notifier, 1087 + GPIOLINE_CHANGED_REQUESTED, desc); 1089 1088 1090 1089 le->irq = gpiod_to_irq(desc); 1091 1090 if (le->irq <= 0) { ··· 3005 2998 } 3006 2999 done: 3007 3000 spin_unlock_irqrestore(&gpio_lock, flags); 3008 - atomic_notifier_call_chain(&desc->gdev->notifier, 3009 - GPIOLINE_CHANGED_REQUESTED, desc); 3010 3001 return ret; 3011 3002 } 3012 3003 ··· 4220 4215 } 4221 4216 } 4222 4217 4223 - if (test_bit(FLAG_IS_OUT, &desc->flags)) { 4218 + /* To be valid for IRQ the line needs to be input or open drain */ 4219 + if (test_bit(FLAG_IS_OUT, &desc->flags) && 4220 + !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) { 4224 4221 chip_err(gc, 4225 4222 "%s: tried to flag a GPIO set as output for IRQ\n", 4226 4223 __func__); ··· 4285 4278 4286 4279 if (!IS_ERR(desc) && 4287 4280 !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) { 4288 - WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags)); 4281 + /* 4282 + * We must not be output when using IRQ UNLESS we are 4283 + * open drain. 4284 + */ 4285 + WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) && 4286 + !test_bit(FLAG_OPEN_DRAIN, &desc->flags)); 4289 4287 set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags); 4290 4288 } 4291 4289 } ··· 4973 4961 return ERR_PTR(ret); 4974 4962 } 4975 4963 4964 + atomic_notifier_call_chain(&desc->gdev->notifier, 4965 + GPIOLINE_CHANGED_REQUESTED, desc); 4966 + 4976 4967 return desc; 4977 4968 } 4978 4969 EXPORT_SYMBOL_GPL(gpiod_get_index); ··· 5040 5025 gpiod_put(desc); 5041 5026 return ERR_PTR(ret); 5042 5027 } 5028 + 5029 + atomic_notifier_call_chain(&desc->gdev->notifier, 5030 + GPIOLINE_CHANGED_REQUESTED, desc); 5043 5031 5044 5032 return desc; 5045 5033 }
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 1050 1050 /* Check with device cgroup if @kfd device is accessible */ 1051 1051 static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) 1052 1052 { 1053 - #if defined(CONFIG_CGROUP_DEVICE) 1053 + #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) 1054 1054 struct drm_device *ddev = kfd->ddev; 1055 1055 1056 1056 return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
-7
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 7880 7880 return -EINVAL; 7881 7881 } 7882 7882 7883 - if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width || 7884 - new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) { 7885 - DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n", 7886 - new_plane_state->crtc_x, new_plane_state->crtc_y); 7887 - return -EINVAL; 7888 - } 7889 - 7890 7883 return 0; 7891 7884 } 7892 7885
+2
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 1684 1684 return; 1685 1685 1686 1686 /* Stall out until the cursor update completes. */ 1687 + if (vupdate_end < vupdate_start) 1688 + vupdate_end += stream->timing.v_total; 1687 1689 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line; 1688 1690 udelay(us_to_vupdate + us_vupdate); 1689 1691 }
+3 -3
drivers/gpu/drm/ingenic/ingenic-drm.c
··· 328 328 if (!drm_atomic_crtc_needs_modeset(state)) 329 329 return 0; 330 330 331 - if (state->mode.hdisplay > priv->soc_info->max_height || 332 - state->mode.vdisplay > priv->soc_info->max_width) 331 + if (state->mode.hdisplay > priv->soc_info->max_width || 332 + state->mode.vdisplay > priv->soc_info->max_height) 333 333 return -EINVAL; 334 334 335 335 rate = clk_round_rate(priv->pix_clk, ··· 474 474 475 475 static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg) 476 476 { 477 - struct ingenic_drm *priv = arg; 477 + struct ingenic_drm *priv = drm_device_get_priv(arg); 478 478 unsigned int state; 479 479 480 480 regmap_read(priv->map, JZ_REG_LCD_STATE, &state);
+13 -7
drivers/infiniband/core/rdma_core.c
··· 153 153 uobj->context = NULL; 154 154 155 155 /* 156 - * For DESTROY the usecnt is held write locked, the caller is expected 157 - * to put it unlock and put the object when done with it. Only DESTROY 158 - * can remove the IDR handle. 156 + * For DESTROY the usecnt is not changed, the caller is expected to 157 + * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR 158 + * handle. 159 159 */ 160 160 if (reason != RDMA_REMOVE_DESTROY) 161 161 atomic_set(&uobj->usecnt, 0); ··· 187 187 /* 188 188 * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY 189 189 * sequence. It should only be used from command callbacks. On success the 190 - * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This 190 + * caller must pair this with uobj_put_destroy(). This 191 191 * version requires the caller to have already obtained an 192 192 * LOOKUP_DESTROY uobject kref. 193 193 */ ··· 198 198 199 199 down_read(&ufile->hw_destroy_rwsem); 200 200 201 + /* 202 + * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left 203 + * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY. 204 + * This is because any other concurrent thread can still see the object 205 + * in the xarray due to RCU. Leaving it locked ensures nothing else will 206 + * touch it. 207 + */ 201 208 ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE); 202 209 if (ret) 203 210 goto out_unlock; ··· 223 216 /* 224 217 * uobj_get_destroy destroys the HW object and returns a handle to the uobj 225 218 * with a NULL object pointer. The caller must pair this with 226 - * uverbs_put_destroy. 219 + * uobj_put_destroy(). 227 220 */ 228 221 struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, 229 222 u32 id, struct uverbs_attr_bundle *attrs) ··· 257 250 uobj = __uobj_get_destroy(obj, id, attrs); 258 251 if (IS_ERR(uobj)) 259 252 return PTR_ERR(uobj); 260 - 261 - rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); 253 + uobj_put_destroy(uobj); 262 254 return 0; 263 255 } 264 256
+1
drivers/infiniband/hw/mlx5/mr.c
··· 1439 1439 1440 1440 if (is_odp_mr(mr)) { 1441 1441 to_ib_umem_odp(mr->umem)->private = mr; 1442 + init_waitqueue_head(&mr->q_deferred_work); 1442 1443 atomic_set(&mr->num_deferred_work, 0); 1443 1444 err = xa_err(xa_store(&dev->odp_mkeys, 1444 1445 mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
+5 -4
drivers/infiniband/hw/qib/qib_sysfs.c
··· 760 760 qib_dev_err(dd, 761 761 "Skipping linkcontrol sysfs info, (err %d) port %u\n", 762 762 ret, port_num); 763 - goto bail; 763 + goto bail_link; 764 764 } 765 765 kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); 766 766 ··· 770 770 qib_dev_err(dd, 771 771 "Skipping sl2vl sysfs info, (err %d) port %u\n", 772 772 ret, port_num); 773 - goto bail_link; 773 + goto bail_sl; 774 774 } 775 775 kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); 776 776 ··· 780 780 qib_dev_err(dd, 781 781 "Skipping diag_counters sysfs info, (err %d) port %u\n", 782 782 ret, port_num); 783 - goto bail_sl; 783 + goto bail_diagc; 784 784 } 785 785 kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); 786 786 ··· 793 793 qib_dev_err(dd, 794 794 "Skipping Congestion Control sysfs info, (err %d) port %u\n", 795 795 ret, port_num); 796 - goto bail_diagc; 796 + goto bail_cc; 797 797 } 798 798 799 799 kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); ··· 854 854 &cc_table_bin_attr); 855 855 kobject_put(&ppd->pport_cc_kobj); 856 856 } 857 + kobject_put(&ppd->diagc_kobj); 857 858 kobject_put(&ppd->sl2vl_kobj); 858 859 kobject_put(&ppd->pport_kobj); 859 860 }
+1 -1
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
··· 829 829 !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 830 830 dev_err(&pdev->dev, "PCI BAR region not MMIO\n"); 831 831 ret = -ENOMEM; 832 - goto err_free_device; 832 + goto err_disable_pdev; 833 833 } 834 834 835 835 ret = pci_request_regions(pdev, DRV_NAME);
+4
drivers/infiniband/ulp/ipoib/ipoib.h
··· 377 377 struct ipoib_rx_buf *rx_ring; 378 378 379 379 struct ipoib_tx_buf *tx_ring; 380 + /* cyclic ring variables for managing tx_ring, for UD only */ 380 381 unsigned int tx_head; 381 382 unsigned int tx_tail; 383 + /* cyclic ring variables for counting overall outstanding send WRs */ 384 + unsigned int global_tx_head; 385 + unsigned int global_tx_tail; 382 386 struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; 383 387 struct ib_ud_wr tx_wr; 384 388 struct ib_wc send_wc[MAX_SEND_CQE];
+9 -6
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 756 756 return; 757 757 } 758 758 759 - if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) { 759 + if ((priv->global_tx_head - priv->global_tx_tail) == 760 + ipoib_sendq_size - 1) { 760 761 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", 761 762 tx->qp->qp_num); 762 763 netif_stop_queue(dev); ··· 787 786 } else { 788 787 netif_trans_update(dev); 789 788 ++tx->tx_head; 790 - ++priv->tx_head; 789 + ++priv->global_tx_head; 791 790 } 792 791 } 793 792 ··· 821 820 netif_tx_lock(dev); 822 821 823 822 ++tx->tx_tail; 824 - ++priv->tx_tail; 823 + ++priv->global_tx_tail; 825 824 826 825 if (unlikely(netif_queue_stopped(dev) && 827 - (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 && 826 + ((priv->global_tx_head - priv->global_tx_tail) <= 827 + ipoib_sendq_size >> 1) && 828 828 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) 829 829 netif_wake_queue(dev); 830 830 ··· 1234 1232 dev_kfree_skb_any(tx_req->skb); 1235 1233 netif_tx_lock_bh(p->dev); 1236 1234 ++p->tx_tail; 1237 - ++priv->tx_tail; 1238 - if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) && 1235 + ++priv->global_tx_tail; 1236 + if (unlikely((priv->global_tx_head - priv->global_tx_tail) <= 1237 + ipoib_sendq_size >> 1) && 1239 1238 netif_queue_stopped(p->dev) && 1240 1239 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 1241 1240 netif_wake_queue(p->dev);
+7 -2
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 407 407 dev_kfree_skb_any(tx_req->skb); 408 408 409 409 ++priv->tx_tail; 410 + ++priv->global_tx_tail; 410 411 411 412 if (unlikely(netif_queue_stopped(dev) && 412 - ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) && 413 + ((priv->global_tx_head - priv->global_tx_tail) <= 414 + ipoib_sendq_size >> 1) && 413 415 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) 414 416 netif_wake_queue(dev); 415 417 ··· 636 634 else 637 635 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; 638 636 /* increase the tx_head after send success, but use it for queue state */ 639 - if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) { 637 + if ((priv->global_tx_head - priv->global_tx_tail) == 638 + ipoib_sendq_size - 1) { 640 639 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); 641 640 netif_stop_queue(dev); 642 641 } ··· 665 662 666 663 rc = priv->tx_head; 667 664 ++priv->tx_head; 665 + ++priv->global_tx_head; 668 666 } 669 667 return rc; 670 668 } ··· 811 807 ipoib_dma_unmap_tx(priv, tx_req); 812 808 dev_kfree_skb_any(tx_req->skb); 813 809 ++priv->tx_tail; 810 + ++priv->global_tx_tail; 814 811 } 815 812 816 813 for (i = 0; i < ipoib_recvq_size; ++i) {
+6 -4
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 1184 1184 1185 1185 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 1186 1186 jiffies_to_msecs(jiffies - dev_trans_start(dev))); 1187 - ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 1188 - netif_queue_stopped(dev), 1189 - priv->tx_head, priv->tx_tail); 1187 + ipoib_warn(priv, 1188 + "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n", 1189 + netif_queue_stopped(dev), priv->tx_head, priv->tx_tail, 1190 + priv->global_tx_head, priv->global_tx_tail); 1191 + 1190 1192 /* XXX reset QP, etc. */ 1191 1193 } 1192 1194 ··· 1703 1701 goto out_rx_ring_cleanup; 1704 1702 } 1705 1703 1706 - /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 1704 + /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */ 1707 1705 1708 1706 if (ipoib_transport_dev_init(dev, priv->ca)) { 1709 1707 pr_warn("%s: ipoib_transport_dev_init failed\n",
+4 -15
drivers/input/evdev.c
··· 326 326 return fasync_helper(fd, file, on, &client->fasync); 327 327 } 328 328 329 - static int evdev_flush(struct file *file, fl_owner_t id) 330 - { 331 - struct evdev_client *client = file->private_data; 332 - struct evdev *evdev = client->evdev; 333 - 334 - mutex_lock(&evdev->mutex); 335 - 336 - if (evdev->exist && !client->revoked) 337 - input_flush_device(&evdev->handle, file); 338 - 339 - mutex_unlock(&evdev->mutex); 340 - return 0; 341 - } 342 - 343 329 static void evdev_free(struct device *dev) 344 330 { 345 331 struct evdev *evdev = container_of(dev, struct evdev, dev); ··· 439 453 unsigned int i; 440 454 441 455 mutex_lock(&evdev->mutex); 456 + 457 + if (evdev->exist && !client->revoked) 458 + input_flush_device(&evdev->handle, file); 459 + 442 460 evdev_ungrab(evdev, client); 443 461 mutex_unlock(&evdev->mutex); 444 462 ··· 1300 1310 .compat_ioctl = evdev_ioctl_compat, 1301 1311 #endif 1302 1312 .fasync = evdev_fasync, 1303 - .flush = evdev_flush, 1304 1313 .llseek = no_llseek, 1305 1314 }; 1306 1315
+12
drivers/input/joystick/xpad.c
··· 459 459 }; 460 460 461 461 /* 462 + * This packet is required for Xbox One S (0x045e:0x02ea) 463 + * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to 464 + * initialize the controller that was previously used in 465 + * Bluetooth mode. 466 + */ 467 + static const u8 xboxone_s_init[] = { 468 + 0x05, 0x20, 0x00, 0x0f, 0x06 469 + }; 470 + 471 + /* 462 472 * This packet is required for the Titanfall 2 Xbox One pads 463 473 * (0x0e6f:0x0165) to finish initialization and for Hori pads 464 474 * (0x0f0d:0x0067) to make the analog sticks work. ··· 526 516 XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), 527 517 XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), 528 518 XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), 519 + XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init), 520 + XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), 529 521 XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1), 530 522 XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2), 531 523 XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
+1 -1
drivers/input/keyboard/applespi.c
··· 186 186 u8 number_of_fingers; 187 187 u8 clicked2; 188 188 u8 unknown3[16]; 189 - struct tp_finger fingers[0]; 189 + struct tp_finger fingers[]; 190 190 }; 191 191 192 192 /**
+5 -9
drivers/input/keyboard/cros_ec_keyb.c
··· 347 347 params->info_type = info_type; 348 348 params->event_type = event_type; 349 349 350 - ret = cros_ec_cmd_xfer(ec_dev, msg); 351 - if (ret < 0) { 352 - dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n", 353 - (int)info_type, (int)event_type, ret); 354 - } else if (msg->result == EC_RES_INVALID_VERSION) { 350 + ret = cros_ec_cmd_xfer_status(ec_dev, msg); 351 + if (ret == -ENOTSUPP) { 355 352 /* With older ECs we just return 0 for everything */ 356 353 memset(result, 0, result_size); 357 354 ret = 0; 358 - } else if (msg->result != EC_RES_SUCCESS) { 359 - dev_warn(ec_dev->dev, "Error getting info %d/%d: %d\n", 360 - (int)info_type, (int)event_type, msg->result); 361 - ret = -EPROTO; 355 + } else if (ret < 0) { 356 + dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n", 357 + (int)info_type, (int)event_type, ret); 362 358 } else if (ret != result_size) { 363 359 dev_warn(ec_dev->dev, "Wrong size %d/%d: %d != %zu\n", 364 360 (int)info_type, (int)event_type,
+37 -35
drivers/input/misc/axp20x-pek.c
··· 205 205 206 206 static irqreturn_t axp20x_pek_irq(int irq, void *pwr) 207 207 { 208 - struct input_dev *idev = pwr; 209 - struct axp20x_pek *axp20x_pek = input_get_drvdata(idev); 208 + struct axp20x_pek *axp20x_pek = pwr; 209 + struct input_dev *idev = axp20x_pek->input; 210 + 211 + if (!idev) 212 + return IRQ_HANDLED; 210 213 211 214 /* 212 215 * The power-button is connected to ground so a falling edge (dbf) ··· 228 225 static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, 229 226 struct platform_device *pdev) 230 227 { 231 - struct axp20x_dev *axp20x = axp20x_pek->axp20x; 232 228 struct input_dev *idev; 233 229 int error; 234 - 235 - axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR"); 236 - if (axp20x_pek->irq_dbr < 0) 237 - return axp20x_pek->irq_dbr; 238 - axp20x_pek->irq_dbr = regmap_irq_get_virq(axp20x->regmap_irqc, 239 - axp20x_pek->irq_dbr); 240 - 241 - axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF"); 242 - if (axp20x_pek->irq_dbf < 0) 243 - return axp20x_pek->irq_dbf; 244 - axp20x_pek->irq_dbf = regmap_irq_get_virq(axp20x->regmap_irqc, 245 - axp20x_pek->irq_dbf); 246 230 247 231 axp20x_pek->input = devm_input_allocate_device(&pdev->dev); 248 232 if (!axp20x_pek->input) ··· 245 255 246 256 input_set_drvdata(idev, axp20x_pek); 247 257 248 - error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr, 249 - axp20x_pek_irq, 0, 250 - "axp20x-pek-dbr", idev); 251 - if (error < 0) { 252 - dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n", 253 - axp20x_pek->irq_dbr, error); 254 - return error; 255 - } 256 - 257 - error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf, 258 - axp20x_pek_irq, 0, 259 - "axp20x-pek-dbf", idev); 260 - if (error < 0) { 261 - dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n", 262 - axp20x_pek->irq_dbf, error); 263 - return error; 264 - } 265 - 266 258 error = input_register_device(idev); 267 259 if (error) { 268 260 dev_err(&pdev->dev, "Can't register input device: %d\n", 269 261 error); 270 262 return error; 271 263 } 272 - 273 - device_init_wakeup(&pdev->dev, true); 274 264 275 265 return 0; 276 266 } ··· 309 339 310 340 axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent); 311 341 342 + axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR"); 343 + if (axp20x_pek->irq_dbr < 0) 344 + return axp20x_pek->irq_dbr; 345 + axp20x_pek->irq_dbr = regmap_irq_get_virq( 346 + axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbr); 347 + 348 + axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF"); 349 + if (axp20x_pek->irq_dbf < 0) 350 + return axp20x_pek->irq_dbf; 351 + axp20x_pek->irq_dbf = regmap_irq_get_virq( 352 + axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf); 353 + 312 354 if (axp20x_pek_should_register_input(axp20x_pek, pdev)) { 313 355 error = axp20x_pek_probe_input_device(axp20x_pek, pdev); 314 356 if (error) ··· 328 346 } 329 347 330 348 axp20x_pek->info = (struct axp20x_info *)match->driver_data; 349 + 350 + error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr, 351 + axp20x_pek_irq, 0, 352 + "axp20x-pek-dbr", axp20x_pek); 353 + if (error < 0) { 354 + dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n", 355 + axp20x_pek->irq_dbr, error); 356 + return error; 357 + } 358 + 359 + error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf, 360 + axp20x_pek_irq, 0, 361 + "axp20x-pek-dbf", axp20x_pek); 362 + if (error < 0) { 363 + dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n", 364 + axp20x_pek->irq_dbf, error); 365 + return error; 366 + } 367 + 368 + device_init_wakeup(&pdev->dev, true); 331 369 332 370 platform_set_drvdata(pdev, axp20x_pek); 333 371
+1
drivers/input/mouse/synaptics.c
··· 170 170 "LEN005b", /* P50 */ 171 171 "LEN005e", /* T560 */ 172 172 "LEN006c", /* T470s */ 173 + "LEN007a", /* T470s */ 173 174 "LEN0071", /* T480 */ 174 175 "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ 175 176 "LEN0073", /* X1 Carbon G5 (Elantech) */
+3 -2
drivers/input/rmi4/rmi_driver.c
··· 205 205 206 206 if (count) { 207 207 kfree(attn_data.data); 208 - attn_data.data = NULL; 208 + drvdata->attn_data.data = NULL; 209 209 } 210 210 211 211 if (!kfifo_is_empty(&drvdata->attn_fifo)) ··· 1210 1210 if (data->input) { 1211 1211 rmi_driver_set_input_name(rmi_dev, data->input); 1212 1212 if (!rmi_dev->xport->input) { 1213 - if (input_register_device(data->input)) { 1213 + retval = input_register_device(data->input); 1214 + if (retval) { 1214 1215 dev_err(dev, "%s: Failed to register input device.\n", 1215 1216 __func__); 1216 1217 goto err_destroy_functions;
+7
drivers/input/serio/i8042-x86ia64io.h
··· 662 662 DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), 663 663 }, 664 664 }, 665 + { 666 + /* Lenovo ThinkPad Twist S230u */ 667 + .matches = { 668 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 669 + DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), 670 + }, 671 + }, 665 672 { } 666 673 }; 667 674
+10 -1
drivers/input/touchscreen/elants_i2c.c
··· 19 19 */ 20 20 21 21 22 + #include <linux/bits.h> 22 23 #include <linux/module.h> 23 24 #include <linux/input.h> 24 25 #include <linux/interrupt.h> ··· 74 73 #define FW_POS_STATE 1 75 74 #define FW_POS_TOTAL 2 76 75 #define FW_POS_XY 3 76 + #define FW_POS_TOOL_TYPE 33 77 77 #define FW_POS_CHECKSUM 34 78 78 #define FW_POS_WIDTH 35 79 79 #define FW_POS_PRESSURE 45 ··· 844 842 { 845 843 struct input_dev *input = ts->input; 846 844 unsigned int n_fingers; 845 + unsigned int tool_type; 847 846 u16 finger_state; 848 847 int i; 849 848 ··· 854 851 855 852 dev_dbg(&ts->client->dev, 856 853 "n_fingers: %u, state: %04x\n", n_fingers, finger_state); 854 + 855 + /* Note: all fingers have the same tool type */ 856 + tool_type = buf[FW_POS_TOOL_TYPE] & BIT(0) ? 857 + MT_TOOL_FINGER : MT_TOOL_PALM; 857 858 858 859 for (i = 0; i < MAX_CONTACT_NUM && n_fingers; i++) { 859 860 if (finger_state & 1) { ··· 874 867 i, x, y, p, w); 875 868 876 869 input_mt_slot(input, i); 877 - input_mt_report_slot_state(input, MT_TOOL_FINGER, true); 870 + input_mt_report_slot_state(input, tool_type, true); 878 871 input_event(input, EV_ABS, ABS_MT_POSITION_X, x); 879 872 input_event(input, EV_ABS, ABS_MT_POSITION_Y, y); 880 873 input_event(input, EV_ABS, ABS_MT_PRESSURE, p); ··· 1314 1307 input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0); 1315 1308 input_set_abs_params(ts->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); 1316 1309 input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0); 1310 + input_set_abs_params(ts->input, ABS_MT_TOOL_TYPE, 1311 + 0, MT_TOOL_PALM, 0, 0); 1317 1312 input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res); 1318 1313 input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res); 1319 1314 input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1);
+5 -7
drivers/input/touchscreen/mms114.c
··· 91 91 if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL) 92 92 BUG(); 93 93 94 - /* Write register: use repeated start */ 94 + /* Write register */ 95 95 xfer[0].addr = client->addr; 96 - xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART; 96 + xfer[0].flags = client->flags & I2C_M_TEN; 97 97 xfer[0].len = 1; 98 98 xfer[0].buf = &buf; 99 99 100 100 /* Read data */ 101 101 xfer[1].addr = client->addr; 102 - xfer[1].flags = I2C_M_RD; 102 + xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD; 103 103 xfer[1].len = len; 104 104 xfer[1].buf = val; 105 105 ··· 428 428 const void *match_data; 429 429 int error; 430 430 431 - if (!i2c_check_functionality(client->adapter, 432 - I2C_FUNC_PROTOCOL_MANGLING)) { 433 - dev_err(&client->dev, 434 - "Need i2c bus that supports protocol mangling\n"); 431 + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { 432 + dev_err(&client->dev, "Not supported I2C adapter\n"); 435 433 return -ENODEV; 436 434 } 437 435
+1
drivers/input/touchscreen/usbtouchscreen.c
··· 182 182 #endif 183 183 184 184 #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH 185 + {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, 185 186 {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, 186 187 {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, 187 188 {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
+1 -1
drivers/iommu/iommu.c
··· 510 510 NULL, "%d", group->id); 511 511 if (ret) { 512 512 ida_simple_remove(&iommu_group_ida, group->id); 513 - kfree(group); 513 + kobject_put(&group->kobj); 514 514 return ERR_PTR(ret); 515 515 } 516 516
+1 -1
drivers/mmc/core/block.c
··· 2484 2484 struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, 2485 2485 struct mmc_rpmb_data, chrdev); 2486 2486 2487 - put_device(&rpmb->dev); 2488 2487 mmc_blk_put(rpmb->md); 2488 + put_device(&rpmb->dev); 2489 2489 2490 2490 return 0; 2491 2491 }
+6 -3
drivers/mmc/host/sdhci.c
··· 4000 4000 mmc_hostname(mmc), host->version); 4001 4001 } 4002 4002 4003 - if (host->quirks & SDHCI_QUIRK_BROKEN_CQE) 4004 - mmc->caps2 &= ~MMC_CAP2_CQE; 4005 - 4006 4003 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 4007 4004 host->flags |= SDHCI_USE_SDMA; 4008 4005 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) ··· 4535 4538 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4536 4539 struct mmc_host *mmc = host->mmc; 4537 4540 int ret; 4541 + 4542 + if ((mmc->caps2 & MMC_CAP2_CQE) && 4543 + (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { 4544 + mmc->caps2 &= ~MMC_CAP2_CQE; 4545 + mmc->cqe_ops = NULL; 4546 + } 4538 4547 4539 4548 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4540 4549 if (!host->complete_wq)
+3 -1
drivers/net/bonding/bond_sysfs_slave.c
··· 149 149 150 150 err = kobject_init_and_add(&slave->kobj, &slave_ktype, 151 151 &(slave->dev->dev.kobj), "bonding_slave"); 152 - if (err) 152 + if (err) { 153 + kobject_put(&slave->kobj); 153 154 return err; 155 + } 154 156 155 157 for (a = slave_attrs; *a; ++a) { 156 158 err = sysfs_create_file(&slave->kobj, &((*a)->attr));
+6 -2
drivers/net/dsa/ocelot/felix.c
··· 103 103 const struct switchdev_obj_port_vlan *vlan) 104 104 { 105 105 struct ocelot *ocelot = ds->priv; 106 + u16 flags = vlan->flags; 106 107 u16 vid; 107 108 int err; 108 109 110 + if (dsa_is_cpu_port(ds, port)) 111 + flags &= ~BRIDGE_VLAN_INFO_UNTAGGED; 112 + 109 113 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 110 114 err = ocelot_vlan_add(ocelot, port, vid, 111 - vlan->flags & BRIDGE_VLAN_INFO_PVID, 112 - vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); 115 + flags & BRIDGE_VLAN_INFO_PVID, 116 + flags & BRIDGE_VLAN_INFO_UNTAGGED); 113 117 if (err) { 114 118 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", 115 119 vid, port, err);
+5 -11
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 4176 4176 int i, intr_process, rc, tmo_count; 4177 4177 struct input *req = msg; 4178 4178 u32 *data = msg; 4179 - __le32 *resp_len; 4180 4179 u8 *valid; 4181 4180 u16 cp_ring_id, len = 0; 4182 4181 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 4183 4182 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 4184 4183 struct hwrm_short_input short_input = {0}; 4185 4184 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; 4186 - u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr; 4187 4185 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; 4188 4186 u16 dst = BNXT_HWRM_CHNL_CHIMP; 4189 4187 ··· 4199 4201 bar_offset = BNXT_GRCPF_REG_KONG_COMM; 4200 4202 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; 4201 4203 resp = bp->hwrm_cmd_kong_resp_addr; 4202 - resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr; 4203 4204 } 4204 4205 4205 4206 memset(resp, 0, PAGE_SIZE); ··· 4267 4270 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; 4268 4271 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; 4269 4272 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); 4270 - resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET); 4271 4273 4272 4274 if (intr_process) { 4273 4275 u16 seq_id = bp->hwrm_intr_seq_id; ··· 4294 4298 le16_to_cpu(req->req_type)); 4295 4299 return -EBUSY; 4296 4300 } 4297 - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 4298 - HWRM_RESP_LEN_SFT; 4299 - valid = resp_addr + len - 1; 4301 + len = le16_to_cpu(resp->resp_len); 4302 + valid = ((u8 *)resp) + len - 1; 4300 4303 } else { 4301 4304 int j; 4302 4305 ··· 4306 4311 */ 4307 4312 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4308 4313 return -EBUSY; 4309 - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 4310 - HWRM_RESP_LEN_SFT; 4314 + len = le16_to_cpu(resp->resp_len); 4311 4315 if (len) 4312 4316 break; 4313 4317 /* on first few passes, just barely sleep */ ··· 4328 4334 } 4329 4335 4330 4336 /* Last byte of resp contains valid bit */ 4331 - valid = resp_addr + len - 1; 4337 + valid = ((u8 *)resp) + len - 1; 4332 4338 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { 4333 4339 /* make sure we read from updated DMA memory */ 4334 4340 dma_rmb(); ··· 9327 9333 bnxt_free_skbs(bp); 9328 9334 9329 9335 /* Save ring stats before shutdown */ 9330 - if (bp->bnapi) 9336 + if (bp->bnapi && irq_re_init) 9331 9337 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 9332 9338 if (irq_re_init) { 9333 9339 bnxt_free_irq(bp);
-5
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 659 659 #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) 660 660 #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) 661 661 #define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) 662 - #define HWRM_RESP_ERR_CODE_MASK 0xffff 663 - #define HWRM_RESP_LEN_OFFSET 4 664 - #define HWRM_RESP_LEN_MASK 0xffff0000 665 - #define HWRM_RESP_LEN_SFT 16 666 - #define HWRM_RESP_VALID_MASK 0xff000000 667 662 #define BNXT_HWRM_REQ_MAX_SIZE 128 668 663 #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ 669 664 BNXT_HWRM_REQ_MAX_SIZE)
+5 -4
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 2119 2119 2120 2120 bnxt_hwrm_fw_set_time(bp); 2121 2121 2122 - if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 2123 - BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 2124 - &index, &item_len, NULL) != 0) { 2122 + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 2123 + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 2124 + &index, &item_len, NULL); 2125 + if (rc) { 2125 2126 netdev_err(dev, "PKG update area not created in nvram\n"); 2126 - return -ENOBUFS; 2127 + return rc; 2127 2128 } 2128 2129 2129 2130 rc = request_firmware(&fw, filename, &dev->dev);
+1 -1
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
··· 2914 2914 } 2915 2915 2916 2916 /* Do this here, so we can be verbose early */ 2917 - SET_NETDEV_DEV(net_dev, dev); 2917 + SET_NETDEV_DEV(net_dev, dev->parent); 2918 2918 dev_set_drvdata(dev, net_dev); 2919 2919 2920 2920 priv = netdev_priv(net_dev);
+3 -5
drivers/net/ethernet/ibm/ibmvnic.c
··· 4678 4678 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 4679 4679 break; 4680 4680 } 4681 - dev_info(dev, "Partner protocol version is %d\n", 4682 - crq->version_exchange_rsp.version); 4683 - if (be16_to_cpu(crq->version_exchange_rsp.version) < 4684 - ibmvnic_version) 4685 - ibmvnic_version = 4681 + ibmvnic_version = 4686 4682 be16_to_cpu(crq->version_exchange_rsp.version); 4683 + dev_info(dev, "Partner protocol version is %d\n", 4684 + ibmvnic_version); 4687 4685 send_cap_queries(adapter); 4688 4686 break; 4689 4687 case QUERY_CAPABILITY_RSP:
+6 -4
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 995 995 996 996 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, 997 997 int num_channels); 998 - void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, 999 - u8 cq_period_mode); 1000 - void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, 1001 - u8 cq_period_mode); 998 + 999 + void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode); 1000 + void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode); 1001 + void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); 1002 + void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); 1003 + 1002 1004 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); 1003 1005 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, 1004 1006 struct mlx5e_params *params);
+13 -11
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
··· 369 369 *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \ 370 370 } while (0) 371 371 372 - #define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \ 373 - do { \ 374 - u16 *__policy = &(policy); \ 375 - bool _write = (write); \ 376 - \ 377 - if (_write && *__policy) \ 378 - *__policy = find_first_bit((u_long *)__policy, \ 379 - sizeof(u16) * BITS_PER_BYTE);\ 380 - MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \ 381 - if (!_write && *__policy) \ 382 - *__policy = 1 << *__policy; \ 372 + #define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \ 373 + do { \ 374 + unsigned long policy_long; \ 375 + u16 *__policy = &(policy); \ 376 + bool _write = (write); \ 377 + \ 378 + policy_long = *__policy; \ 379 + if (_write && *__policy) \ 380 + *__policy = find_first_bit(&policy_long, \ 381 + sizeof(policy_long) * BITS_PER_BYTE);\ 382 + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \ 383 + if (!_write && *__policy) \ 384 + *__policy = 1 << *__policy; \ 383 385 } while (0) 384 386 385 387 /* get/set FEC admin field for a given speed */
+28 -13
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 527 527 struct dim_cq_moder *rx_moder, *tx_moder; 528 528 struct mlx5_core_dev *mdev = priv->mdev; 529 529 struct mlx5e_channels new_channels = {}; 530 + bool reset_rx, reset_tx; 530 531 int err = 0; 531 - bool reset; 532 532 533 533 if (!MLX5_CAP_GEN(mdev, cq_moderation)) 534 534 return -EOPNOTSUPP; ··· 566 566 } 567 567 /* we are opened */ 568 568 569 - reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) || 570 - (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled); 569 + reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; 570 + reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; 571 571 572 - if (!reset) { 572 + if (!reset_rx && !reset_tx) { 573 573 mlx5e_set_priv_channels_coalesce(priv, coal); 574 574 priv->channels.params = new_channels.params; 575 575 goto out; 576 + } 577 + 578 + if (reset_rx) { 579 + u8 mode = MLX5E_GET_PFLAG(&new_channels.params, 580 + MLX5E_PFLAG_RX_CQE_BASED_MODER); 581 + 582 + mlx5e_reset_rx_moderation(&new_channels.params, mode); 583 + } 584 + if (reset_tx) { 585 + u8 mode = MLX5E_GET_PFLAG(&new_channels.params, 586 + MLX5E_PFLAG_TX_CQE_BASED_MODER); 587 + 588 + mlx5e_reset_tx_moderation(&new_channels.params, mode); 576 589 } 577 590 578 591 err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); ··· 678 665 static int get_fec_supported_advertised(struct mlx5_core_dev *dev, 679 666 struct ethtool_link_ksettings *link_ksettings) 680 667 { 681 - u_long active_fec = 0; 668 + unsigned long active_fec_long; 669 + u32 active_fec; 682 670 u32 bitn; 683 671 int err; 684 672 685 - err = mlx5e_get_fec_mode(dev, (u32 *)&active_fec, NULL); 673 + err = mlx5e_get_fec_mode(dev, &active_fec, NULL); 686 674 if (err) 687 675 return (err == -EOPNOTSUPP) ? 0 : err; 688 676 ··· 696 682 MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1, 697 683 ETHTOOL_LINK_MODE_FEC_LLRS_BIT); 698 684 685 + active_fec_long = active_fec; 699 686 /* active fec is a bit set, find out which bit is set and 700 687 * advertise the corresponding ethtool bit 701 688 */ 702 - bitn = find_first_bit(&active_fec, sizeof(u32) * BITS_PER_BYTE); 689 + bitn = find_first_bit(&active_fec_long, sizeof(active_fec_long) * BITS_PER_BYTE); 703 690 if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes)) 704 691 __set_bit(pplm_fec_2_ethtool_linkmodes[bitn], 705 692 link_ksettings->link_modes.advertising); ··· 1532 1517 { 1533 1518 struct mlx5e_priv *priv = netdev_priv(netdev); 1534 1519 struct mlx5_core_dev *mdev = priv->mdev; 1535 - u16 fec_configured = 0; 1536 - u32 fec_active = 0; 1520 + u16 fec_configured; 1521 + u32 fec_active; 1537 1522 int err; 1538 1523 1539 1524 err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured); ··· 1541 1526 if (err) 1542 1527 return err; 1543 1528 1544 - fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active, 1545 - sizeof(u32) * BITS_PER_BYTE); 1529 + fecparam->active_fec = pplm2ethtool_fec((unsigned long)fec_active, 1530 + sizeof(unsigned long) * BITS_PER_BYTE); 1546 1531 1547 1532 if (!fecparam->active_fec) 1548 1533 return -EOPNOTSUPP; 1549 1534 1550 - fecparam->fec = pplm2ethtool_fec((u_long)fec_configured, 1551 - sizeof(u16) * BITS_PER_BYTE); 1535 + fecparam->fec = pplm2ethtool_fec((unsigned long)fec_configured, 1536 + sizeof(unsigned long) * BITS_PER_BYTE); 1552 1537 1553 1538 return 0; 1554 1539 }
+14 -6
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 4707 4707 DIM_CQ_PERIOD_MODE_START_FROM_EQE; 4708 4708 } 4709 4709 4710 - void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4710 + void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 4711 4711 { 4712 4712 if (params->tx_dim_enabled) { 4713 4713 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); ··· 4716 4716 } else { 4717 4717 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); 4718 4718 } 4719 - 4720 - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 4721 - params->tx_cq_moderation.cq_period_mode == 4722 - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 4723 4719 } 4724 4720 4725 - void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4721 + void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 4726 4722 { 4727 4723 if (params->rx_dim_enabled) { 4728 4724 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); ··· 4727 4731 } else { 4728 4732 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); 4729 4733 } 4734 + } 4730 4735 4736 + void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4737 + { 4738 + mlx5e_reset_tx_moderation(params, cq_period_mode); 4739 + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 4740 + params->tx_cq_moderation.cq_period_mode == 4741 + MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 4742 + } 4743 + 4744 + void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4745 + { 4746 + mlx5e_reset_rx_moderation(params, cq_period_mode); 4731 4747 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, 4732 4748 params->rx_cq_moderation.cq_period_mode == 4733 4749 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+4 -8
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 2153 2153 flow_rule_match_meta(rule, &match); 2154 2154 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 2155 2155 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); 2156 - return -EINVAL; 2156 + return -EOPNOTSUPP; 2157 2157 } 2158 2158 2159 2159 ingress_dev = __dev_get_by_index(dev_net(filter_dev), ··· 2161 2161 if (!ingress_dev) { 2162 2162 NL_SET_ERR_MSG_MOD(extack, 2163 2163 "Can't find the ingress port to match on"); 2164 - return -EINVAL; 2164 + return -ENOENT; 2165 2165 } 2166 2166 2167 2167 if (ingress_dev != filter_dev) { 2168 2168 NL_SET_ERR_MSG_MOD(extack, 2169 2169 "Can't match on the ingress filter port"); 2170 - return -EINVAL; 2170 + return -EOPNOTSUPP; 2171 2171 } 2172 2172 2173 2173 return 0; ··· 4162 4162 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { 4163 4163 NL_SET_ERR_MSG_MOD(extack, 4164 4164 "devices are not on same switch HW, can't offload forwarding"); 4165 - netdev_warn(priv->netdev, 4166 - "devices %s %s not on same switch HW, can't offload forwarding\n", 4167 - priv->netdev->name, 4168 - out_dev->name); 4169 4165 return -EOPNOTSUPP; 4170 4166 } 4171 4167 ··· 4946 4950 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; 4947 4951 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; 4948 4952 rpriv->prev_vf_vport_stats = cur_stats; 4949 - flow_stats_update(&ma->stats, dpkts, dbytes, jiffies, 4953 + flow_stats_update(&ma->stats, dbytes, dpkts, jiffies, 4950 4954 FLOW_ACTION_HW_STATS_DELAYED); 4951 4955 } 4952 4956
+18
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1544 1544 mlx5_pci_disable_device(dev); 1545 1545 } 1546 1546 1547 + static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state) 1548 + { 1549 + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1550 + 1551 + mlx5_unload_one(dev, false); 1552 + 1553 + return 0; 1554 + } 1555 + 1556 + static int mlx5_resume(struct pci_dev *pdev) 1557 + { 1558 + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1559 + 1560 + return mlx5_load_one(dev, false); 1561 + } 1562 + 1547 1563 static const struct pci_device_id mlx5_core_pci_table[] = { 1548 1564 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) }, 1549 1565 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ ··· 1603 1587 .id_table = mlx5_core_pci_table, 1604 1588 .probe = init_one, 1605 1589 .remove = remove_one, 1590 + .suspend = mlx5_suspend, 1591 + .resume = mlx5_resume, 1606 1592 .shutdown = shutdown, 1607 1593 .err_handler = &mlx5_err_handler, 1608 1594 .sriov_configure = mlx5_core_sriov_configure,
+2 -1
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 1440 1440 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); 1441 1441 priv->stats[ctx_id].pkts += pkts; 1442 1442 priv->stats[ctx_id].bytes += bytes; 1443 - max_t(u64, priv->stats[ctx_id].used, used); 1443 + priv->stats[ctx_id].used = max_t(u64, used, 1444 + priv->stats[ctx_id].used); 1444 1445 } 1445 1446 } 1446 1447
+3 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
··· 3651 3651 ahw->diag_cnt = 0; 3652 3652 ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); 3653 3653 if (ret) 3654 - goto fail_diag_irq; 3654 + goto fail_mbx_args; 3655 3655 3656 3656 if (adapter->flags & QLCNIC_MSIX_ENABLED) 3657 3657 intrpt_id = ahw->intr_tbl[0].id; ··· 3681 3681 3682 3682 done: 3683 3683 qlcnic_free_mbx_args(&cmd); 3684 + 3685 + fail_mbx_args: 3684 3686 qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); 3685 3687 3686 3688 fail_diag_irq:
+2 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 630 630 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 631 631 ptp_v2 = PTP_TCR_TSVER2ENA; 632 632 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 633 - ts_event_en = PTP_TCR_TSEVNTENA; 633 + if (priv->synopsys_id != DWMAC_CORE_5_10) 634 + ts_event_en = PTP_TCR_TSEVNTENA; 634 635 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 635 636 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 636 637 ptp_over_ethernet = PTP_TCR_TSIPENA;
+1
drivers/net/usb/qmi_wwan.c
··· 1324 1324 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1325 1325 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1326 1326 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 1327 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ 1327 1328 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 1328 1329 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ 1329 1330 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+3 -1
drivers/nfc/st21nfca/dep.c
··· 173 173 memcpy(atr_res->gbi, atr_req->gbi, gb_len); 174 174 r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi, 175 175 gb_len); 176 - if (r < 0) 176 + if (r < 0) { 177 + kfree_skb(skb); 177 178 return r; 179 + } 178 180 } 179 181 180 182 info->dep_info.curr_nfc_dep_pni = 0;
+7 -4
drivers/nvme/host/pci.c
··· 1382 1382 1383 1383 /* 1384 1384 * Called only on a device that has been disabled and after all other threads 1385 - * that can check this device's completion queues have synced. This is the 1386 - * last chance for the driver to see a natural completion before 1387 - * nvme_cancel_request() terminates all incomplete requests. 1385 + * that can check this device's completion queues have synced, except 1386 + * nvme_poll(). This is the last chance for the driver to see a natural 1387 + * completion before nvme_cancel_request() terminates all incomplete requests. 1388 1388 */ 1389 1389 static void nvme_reap_pending_cqes(struct nvme_dev *dev) 1390 1390 { 1391 1391 int i; 1392 1392 1393 - for (i = dev->ctrl.queue_count - 1; i > 0; i--) 1393 + for (i = dev->ctrl.queue_count - 1; i > 0; i--) { 1394 + spin_lock(&dev->queues[i].cq_poll_lock); 1394 1395 nvme_process_cq(&dev->queues[i]); 1396 + spin_unlock(&dev->queues[i].cq_poll_lock); 1397 + } 1395 1398 } 1396 1399 1397 1400 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
+3 -1
drivers/soc/mediatek/mtk-cmdq-helper.c
··· 351 351 spin_unlock_irqrestore(&client->lock, flags); 352 352 } 353 353 354 - mbox_send_message(client->chan, pkt); 354 + err = mbox_send_message(client->chan, pkt); 355 + if (err < 0) 356 + return err; 355 357 /* We can send next packet immediately, so just call txdone. */ 356 358 mbox_client_txdone(client->chan, 0); 357 359
+1 -1
fs/binfmt_elf.c
··· 1733 1733 (!regset->active || regset->active(t->task, regset) > 0)) { 1734 1734 int ret; 1735 1735 size_t size = regset_size(t->task, regset); 1736 - void *data = kmalloc(size, GFP_KERNEL); 1736 + void *data = kzalloc(size, GFP_KERNEL); 1737 1737 if (unlikely(!data)) 1738 1738 return 0; 1739 1739 ret = regset->get(t->task, regset,
+1 -1
fs/ceph/caps.c
··· 3991 3991 __ceph_queue_cap_release(session, cap); 3992 3992 spin_unlock(&session->s_cap_lock); 3993 3993 } 3994 - goto done; 3994 + goto flush_cap_releases; 3995 3995 } 3996 3996 3997 3997 /* these will work even if we don't have a cap yet */
+5 -10
fs/gfs2/lops.c
··· 509 509 unsigned int bsize = sdp->sd_sb.sb_bsize, off; 510 510 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; 511 511 unsigned int shift = PAGE_SHIFT - bsize_shift; 512 - unsigned int max_bio_size = 2 * 1024 * 1024; 512 + unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift; 513 513 struct gfs2_journal_extent *je; 514 514 int sz, ret = 0; 515 515 struct bio *bio = NULL; 516 516 struct page *page = NULL; 517 - bool bio_chained = false, done = false; 517 + bool done = false; 518 518 errseq_t since; 519 519 520 520 memset(head, 0, sizeof(*head)); ··· 537 537 off = 0; 538 538 } 539 539 540 - if (!bio || (bio_chained && !off) || 541 - bio->bi_iter.bi_size >= max_bio_size) { 542 - /* start new bio */ 543 - } else { 540 + if (bio && (off || block < blocks_submitted + max_blocks)) { 544 541 sector_t sector = dblock << sdp->sd_fsb2bb_shift; 545 542 546 543 if (bio_end_sector(bio) == sector) { ··· 550 553 (PAGE_SIZE - off) >> bsize_shift; 551 554 552 555 bio = gfs2_chain_bio(bio, blocks); 553 - bio_chained = true; 554 556 goto add_block_to_new_bio; 555 557 } 556 558 } 557 559 558 560 if (bio) { 559 - blocks_submitted = block + 1; 561 + blocks_submitted = block; 560 562 submit_bio(bio); 561 563 } 562 564 563 565 bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); 564 566 bio->bi_opf = REQ_OP_READ; 565 - bio_chained = false; 566 567 add_block_to_new_bio: 567 568 sz = bio_add_page(bio, page, bsize, off); 568 569 BUG_ON(sz != bsize); ··· 568 573 off += bsize; 569 574 if (off == PAGE_SIZE) 570 575 page = NULL; 571 - if (blocks_submitted < 2 * max_bio_size >> bsize_shift) { 576 + if (blocks_submitted <= blocks_read + max_blocks) { 572 577 /* Keep at least one bio in flight */ 573 578 continue; 574 579 }
+1 -1
fs/notify/fanotify/fanotify.c
··· 520 520 BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC); 521 521 BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM); 522 522 523 - BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 20); 523 + BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19); 524 524 525 525 mask = fanotify_group_event_mask(group, iter_info, mask, data, 526 526 data_type);
+3 -3
fs/xattr.c
··· 876 876 struct simple_xattr *new_xattr = NULL; 877 877 int err = 0; 878 878 879 + if (removed_size) 880 + *removed_size = -1; 881 + 879 882 /* value == NULL means remove */ 880 883 if (value) { 881 884 new_xattr = simple_xattr_alloc(value, size); ··· 917 914 list_add(&new_xattr->list, &xattrs->head); 918 915 xattr = NULL; 919 916 } 920 - 921 - if (removed_size) 922 - *removed_size = -1; 923 917 out: 924 918 spin_unlock(&xattrs->lock); 925 919 if (xattr) {
+1 -1
include/asm-generic/topology.h
··· 48 48 #ifdef CONFIG_NEED_MULTIPLE_NODES 49 49 #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) 50 50 #else 51 - #define cpumask_of_node(node) ((void)node, cpu_online_mask) 51 + #define cpumask_of_node(node) ((void)(node), cpu_online_mask) 52 52 #endif 53 53 #endif 54 54 #ifndef pcibus_to_node
+5 -9
include/linux/device_cgroup.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #include <linux/fs.h> 3 - #include <linux/bpf-cgroup.h> 4 3 5 4 #define DEVCG_ACC_MKNOD 1 6 5 #define DEVCG_ACC_READ 2 ··· 10 11 #define DEVCG_DEV_CHAR 2 11 12 #define DEVCG_DEV_ALL 4 /* this represents all devices */ 12 13 13 - #ifdef CONFIG_CGROUP_DEVICE 14 - int devcgroup_check_permission(short type, u32 major, u32 minor, 15 - short access); 16 - #else 17 - static inline int devcgroup_check_permission(short type, u32 major, u32 minor, 18 - short access) 19 - { return 0; } 20 - #endif 21 14 22 15 #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) 16 + int devcgroup_check_permission(short type, u32 major, u32 minor, 17 + short access); 23 18 static inline int devcgroup_inode_permission(struct inode *inode, int mask) 24 19 { 25 20 short type, access = 0; ··· 54 61 } 55 62 56 63 #else 64 + static inline int devcgroup_check_permission(short type, u32 major, u32 minor, 65 + short access) 66 + { return 0; } 57 67 static inline int devcgroup_inode_permission(struct inode *inode, int mask) 58 68 { return 0; } 59 69 static inline int devcgroup_inode_mknod(int mode, dev_t dev)
+1 -2
include/linux/fanotify.h
··· 47 47 * Directory entry modification events - reported only to directory 48 48 * where entry is modified and not to a watching parent. 49 49 */ 50 - #define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \ 51 - FAN_DIR_MODIFY) 50 + #define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE) 52 51 53 52 /* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */ 54 53 #define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \
+1 -1
include/linux/ieee80211.h
··· 2196 2196 } 2197 2197 2198 2198 /* HE Operation defines */ 2199 - #define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003 2199 + #define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007 2200 2200 #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 2201 2201 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 2202 2202 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
+1 -1
include/linux/input/lm8333.h
··· 1 1 /* 2 2 * public include for LM8333 keypad driver - same license as driver 3 - * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de> 3 + * Copyright (C) 2012 Wolfram Sang, Pengutronix <kernel@pengutronix.de> 4 4 */ 5 5 6 6 #ifndef _LM8333_H
+13 -2
include/linux/mm.h
··· 782 782 783 783 extern void kvfree(const void *addr); 784 784 785 + /* 786 + * Mapcount of compound page as a whole, does not include mapped sub-pages. 787 + * 788 + * Must be called only for compound pages or any their tail sub-pages. 789 + */ 785 790 static inline int compound_mapcount(struct page *page) 786 791 { 787 792 VM_BUG_ON_PAGE(!PageCompound(page), page); ··· 806 801 807 802 int __page_mapcount(struct page *page); 808 803 804 + /* 805 + * Mapcount of 0-order page; when compound sub-page, includes 806 + * compound_mapcount(). 807 + * 808 + * Result is undefined for pages which cannot be mapped into userspace. 809 + * For example SLAB or special types of pages. See function page_has_type(). 810 + * They use this place in struct page differently. 811 + */ 809 812 static inline int page_mapcount(struct page *page) 810 813 { 811 - VM_BUG_ON_PAGE(PageSlab(page), page); 812 - 813 814 if (unlikely(PageCompound(page))) 814 815 return __page_mapcount(page); 815 816 return atomic_read(&page->_mapcount) + 1;
+1 -1
include/linux/netfilter/nf_conntrack_pptp.h
··· 10 10 #include <net/netfilter/nf_conntrack_expect.h> 11 11 #include <uapi/linux/netfilter/nf_conntrack_tuple_common.h> 12 12 13 - extern const char *const pptp_msg_name[]; 13 + const char *pptp_msg_name(u_int16_t msg); 14 14 15 15 /* state of the control session */ 16 16 enum pptp_ctrlsess_state {
+18 -7
include/linux/virtio_net.h
··· 31 31 { 32 32 unsigned int gso_type = 0; 33 33 unsigned int thlen = 0; 34 + unsigned int p_off = 0; 34 35 unsigned int ip_proto; 35 36 36 37 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { ··· 69 68 if (!skb_partial_csum_set(skb, start, off)) 70 69 return -EINVAL; 71 70 72 - if (skb_transport_offset(skb) + thlen > skb_headlen(skb)) 71 + p_off = skb_transport_offset(skb) + thlen; 72 + if (p_off > skb_headlen(skb)) 73 73 return -EINVAL; 74 74 } else { 75 75 /* gso packets without NEEDS_CSUM do not set transport_offset. ··· 94 92 return -EINVAL; 95 93 } 96 94 97 - if (keys.control.thoff + thlen > skb_headlen(skb) || 95 + p_off = keys.control.thoff + thlen; 96 + if (p_off > skb_headlen(skb) || 98 97 keys.basic.ip_proto != ip_proto) 99 98 return -EINVAL; 100 99 101 100 skb_set_transport_header(skb, keys.control.thoff); 101 + } else if (gso_type) { 102 + p_off = thlen; 103 + if (p_off > skb_headlen(skb)) 104 + return -EINVAL; 102 105 } 103 106 } 104 107 105 108 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 106 109 u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); 110 + struct skb_shared_info *shinfo = skb_shinfo(skb); 107 111 108 - skb_shinfo(skb)->gso_size = gso_size; 109 - skb_shinfo(skb)->gso_type = gso_type; 112 + /* Too small packets are not really GSO ones. */ 113 + if (skb->len - p_off > gso_size) { 114 + shinfo->gso_size = gso_size; 115 + shinfo->gso_type = gso_type; 110 116 111 - /* Header must be checked, and gso_segs computed. */ 112 - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 113 - skb_shinfo(skb)->gso_segs = 0; 117 + /* Header must be checked, and gso_segs computed. */ 118 + shinfo->gso_type |= SKB_GSO_DODGY; 119 + shinfo->gso_segs = 0; 120 + } 114 121 } 115 122 116 123 return 0;
+1
include/net/espintcp.h
··· 25 25 struct espintcp_msg partial; 26 26 void (*saved_data_ready)(struct sock *sk); 27 27 void (*saved_write_space)(struct sock *sk); 28 + void (*saved_destruct)(struct sock *sk); 28 29 struct work_struct work; 29 30 bool tx_running; 30 31 };
+12
include/net/ip_fib.h
··· 447 447 #endif 448 448 int fib_unmerge(struct net *net); 449 449 450 + static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc, 451 + const struct net_device *dev) 452 + { 453 + if (nhc->nhc_dev == dev || 454 + l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) 455 + return true; 456 + 457 + return false; 458 + } 459 + 450 460 /* Exported by fib_semantics.c */ 451 461 int ip_fib_check_default(__be32 gw, struct net_device *dev); 452 462 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); ··· 489 479 void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri); 490 480 void fib_trie_init(void); 491 481 struct fib_table *fib_trie_table(u32 id, struct fib_table *alias); 482 + bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, 483 + const struct flowi4 *flp); 492 484 493 485 static inline void fib_combine_itag(u32 *itag, const struct fib_result *res) 494 486 {
+84 -16
include/net/nexthop.h
··· 73 73 }; 74 74 75 75 struct nh_group { 76 + struct nh_group *spare; /* spare group for removals */ 76 77 u16 num_nh; 77 78 bool mpath; 78 79 bool has_v4; ··· 153 152 { 154 153 unsigned int rc = 1; 155 154 156 - if (nexthop_is_multipath(nh)) { 155 + if (nh->is_group) { 157 156 struct nh_group *nh_grp; 158 157 159 158 nh_grp = rcu_dereference_rtnl(nh->nh_grp); 160 - rc = nh_grp->num_nh; 159 + if (nh_grp->mpath) 160 + rc = nh_grp->num_nh; 161 161 } 162 162 163 163 return rc; 164 164 } 165 165 166 166 static inline 167 - struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel) 167 + struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel) 168 168 { 169 - const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); 170 - 171 169 /* for_nexthops macros in fib_semantics.c grabs a pointer to 172 170 * the nexthop before checking nhsel 173 171 */ ··· 201 201 { 202 202 const struct nh_info *nhi; 203 203 204 - if (nexthop_is_multipath(nh)) { 205 - if (nexthop_num_path(nh) > 1) 204 + if (nh->is_group) { 205 + struct nh_group *nh_grp; 206 + 207 + nh_grp = rcu_dereference_rtnl(nh->nh_grp); 208 + if (nh_grp->num_nh > 1) 206 209 return false; 207 - nh = nexthop_mpath_select(nh, 0); 208 - if (!nh) 209 - return false; 210 + 211 + nh = nh_grp->nh_entries[0].nh; 210 212 } 211 213 212 214 nhi = rcu_dereference_rtnl(nh->nh_info); ··· 234 232 BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0); 235 233 BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0); 236 234 237 - if (nexthop_is_multipath(nh)) { 238 - nh = nexthop_mpath_select(nh, nhsel); 239 - if (!nh) 240 - return NULL; 235 + if (nh->is_group) { 236 + struct nh_group *nh_grp; 237 + 238 + nh_grp = rcu_dereference_rtnl(nh->nh_grp); 239 + if (nh_grp->mpath) { 240 + nh = nexthop_mpath_select(nh_grp, nhsel); 241 + if (!nh) 242 + return NULL; 243 + } 241 244 } 242 245 243 246 nhi = rcu_dereference_rtnl(nh->nh_info); 244 247 return &nhi->fib_nhc; 248 + } 249 + 250 + /* called from fib_table_lookup with rcu_lock */ 251 + static inline 252 + struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh, 253 + int fib_flags, 254 + const struct flowi4 *flp, 255 + int *nhsel) 256 + { 257 + struct nh_info *nhi; 258 + 259 + if (nh->is_group) { 260 + struct nh_group *nhg = rcu_dereference(nh->nh_grp); 261 + int i; 262 + 263 + for (i = 0; i < nhg->num_nh; i++) { 264 + struct nexthop *nhe = nhg->nh_entries[i].nh; 265 + 266 + nhi = rcu_dereference(nhe->nh_info); 267 + if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) { 268 + *nhsel = i; 269 + return &nhi->fib_nhc; 270 + } 271 + } 272 + } else { 273 + nhi = rcu_dereference(nh->nh_info); 274 + if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) { 275 + *nhsel = 0; 276 + return &nhi->fib_nhc; 277 + } 278 + } 279 + 280 + return NULL; 281 + } 282 + 283 + static inline bool nexthop_uses_dev(const struct nexthop *nh, 284 + const struct net_device *dev) 285 + { 286 + struct nh_info *nhi; 287 + 288 + if (nh->is_group) { 289 + struct nh_group *nhg = rcu_dereference(nh->nh_grp); 290 + int i; 291 + 292 + for (i = 0; i < nhg->num_nh; i++) { 293 + struct nexthop *nhe = nhg->nh_entries[i].nh; 294 + 295 + nhi = rcu_dereference(nhe->nh_info); 296 + if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) 297 + return true; 298 + } 299 + } else { 300 + nhi = rcu_dereference(nh->nh_info); 301 + if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) 302 + return true; 303 + } 304 + 305 + return false; 245 306 } 246 307 247 308 static inline unsigned int fib_info_num_path(const struct fib_info *fi) ··· 344 279 { 345 280 struct nh_info *nhi; 346 281 347 - if (nexthop_is_multipath(nh)) { 348 - nh = nexthop_mpath_select(nh, 0); 282 + if (nh->is_group) { 283 + struct nh_group *nh_grp; 284 + 285 + nh_grp = rcu_dereference_rtnl(nh->nh_grp); 286 + nh = nexthop_mpath_select(nh_grp, 0); 349 287 if (!nh) 350 288 return NULL; 351 289 }
+4
include/net/tls.h
··· 135 135 struct tls_rec *open_rec; 136 136 struct list_head tx_list; 137 137 atomic_t encrypt_pending; 138 + /* protect crypto_wait with encrypt_pending */ 139 + spinlock_t encrypt_compl_lock; 138 140 int async_notify; 139 141 u8 async_capable:1; 140 142 ··· 157 155 u8 async_capable:1; 158 156 u8 decrypted:1; 159 157 atomic_t decrypt_pending; 158 + /* protect crypto_wait with decrypt_pending*/ 159 + spinlock_t decrypt_compl_lock; 160 160 bool async_notify; 161 161 }; 162 162
+1 -1
include/rdma/uverbs_std_types.h
··· 88 88 89 89 static inline void uobj_put_destroy(struct ib_uobject *uobj) 90 90 { 91 - rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); 91 + rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); 92 92 } 93 93 94 94 static inline void uobj_put_read(struct ib_uobject *uobj)
+1 -1
include/uapi/linux/xfrm.h
··· 304 304 XFRMA_PROTO, /* __u8 */ 305 305 XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ 306 306 XFRMA_PAD, 307 - XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ 307 + XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */ 308 308 XFRMA_SET_MARK, /* __u32 */ 309 309 XFRMA_SET_MARK_MASK, /* __u32 */ 310 310 XFRMA_IF_ID, /* __u32 */
+16 -18
kernel/bpf/verifier.c
··· 1217 1217 * but must be positive otherwise set to worse case bounds 1218 1218 * and refine later from tnum. 1219 1219 */ 1220 - if (reg->s32_min_value > 0) 1221 - reg->smin_value = reg->s32_min_value; 1222 - else 1223 - reg->smin_value = 0; 1224 - if (reg->s32_max_value > 0) 1220 + if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0) 1225 1221 reg->smax_value = reg->s32_max_value; 1226 1222 else 1227 1223 reg->smax_value = U32_MAX; 1224 + if (reg->s32_min_value >= 0) 1225 + reg->smin_value = reg->s32_min_value; 1226 + else 1227 + reg->smin_value = 0; 1228 1228 } 1229 1229 1230 1230 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) ··· 10522 10522 } 10523 10523 #define SECURITY_PREFIX "security_" 10524 10524 10525 - static int check_attach_modify_return(struct bpf_verifier_env *env) 10525 + static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr) 10526 10526 { 10527 - struct bpf_prog *prog = env->prog; 10528 - unsigned long addr = (unsigned long) prog->aux->trampoline->func.addr; 10529 - 10530 - /* This is expected to be cleaned up in the future with the KRSI effort 10531 - * introducing the LSM_HOOK macro for cleaning up lsm_hooks.h. 10532 - */ 10533 10527 if (within_error_injection_list(addr) || 10534 10528 !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name, 10535 10529 sizeof(SECURITY_PREFIX) - 1)) 10536 10530 return 0; 10537 - 10538 - verbose(env, "fmod_ret attach_btf_id %u (%s) is not modifiable\n", 10539 - prog->aux->attach_btf_id, prog->aux->attach_func_name); 10540 10531 10541 10532 return -EINVAL; 10542 10533 } ··· 10756 10765 goto out; 10757 10766 } 10758 10767 } 10768 + 10769 + if (prog->expected_attach_type == BPF_MODIFY_RETURN) { 10770 + ret = check_attach_modify_return(prog, addr); 10771 + if (ret) 10772 + verbose(env, "%s() is not modifiable\n", 10773 + prog->aux->attach_func_name); 10774 + } 10775 + 10776 + if (ret) 10777 + goto out; 10759 10778 tr->func.addr = (void *)addr; 10760 10779 prog->aux->trampoline = tr; 10761 - 10762 - if (prog->expected_attach_type == BPF_MODIFY_RETURN) 10763 - ret = check_attach_modify_return(env); 10764 10780 out: 10765 10781 mutex_unlock(&tr->mutex); 10766 10782 if (ret)
+3 -13
kernel/cgroup/rstat.c
··· 33 33 return; 34 34 35 35 /* 36 - * Paired with the one in cgroup_rstat_cpu_pop_updated(). Either we 37 - * see NULL updated_next or they see our updated stat. 38 - */ 39 - smp_mb(); 40 - 41 - /* 36 + * Speculative already-on-list test. This may race leading to 37 + * temporary inaccuracies, which is fine. 38 + * 42 39 * Because @parent's updated_children is terminated with @parent 43 40 * instead of NULL, we can tell whether @cgrp is on the list by 44 41 * testing the next pointer for NULL. ··· 130 133 131 134 *nextp = rstatc->updated_next; 132 135 rstatc->updated_next = NULL; 133 - 134 - /* 135 - * Paired with the one in cgroup_rstat_cpu_updated(). 136 - * Either they see NULL updated_next or we see their 137 - * updated stat. 138 - */ 139 - smp_mb(); 140 136 141 137 return pos; 142 138 }
+1 -1
kernel/sched/fair.c
··· 2907 2907 /* 2908 2908 * We don't care about NUMA placement if we don't have memory. 2909 2909 */ 2910 - if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) 2910 + if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) 2911 2911 return; 2912 2912 2913 2913 /*
+1
mm/khugepaged.c
··· 1692 1692 if (page_has_private(page) && 1693 1693 !try_to_release_page(page, GFP_KERNEL)) { 1694 1694 result = SCAN_PAGE_HAS_PRIVATE; 1695 + putback_lru_page(page); 1695 1696 goto out_unlock; 1696 1697 } 1697 1698
+3
mm/z3fold.c
··· 43 43 #include <linux/spinlock.h> 44 44 #include <linux/zpool.h> 45 45 #include <linux/magic.h> 46 + #include <linux/kmemleak.h> 46 47 47 48 /* 48 49 * NCHUNKS_ORDER determines the internal allocation granularity, effectively ··· 216 215 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE))); 217 216 218 217 if (slots) { 218 + /* It will be freed separately in free_handle(). */ 219 + kmemleak_not_leak(slots); 219 220 memset(slots->slot, 0, sizeof(slots->slot)); 220 221 slots->pool = (unsigned long)pool; 221 222 rwlock_init(&slots->lock);
+2 -1
net/bridge/br_multicast.c
··· 2413 2413 free_percpu(br->mcast_stats); 2414 2414 } 2415 2415 2416 - static void mcast_stats_add_dir(u64 *dst, u64 *src) 2416 + /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 2417 + static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 2417 2418 { 2418 2419 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2419 2420 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
+6
net/bridge/netfilter/nft_reject_bridge.c
··· 31 31 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); 32 32 eth->h_proto = eth_hdr(oldskb)->h_proto; 33 33 skb_pull(nskb, ETH_HLEN); 34 + 35 + if (skb_vlan_tag_present(oldskb)) { 36 + u16 vid = skb_vlan_tag_get(oldskb); 37 + 38 + __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid); 39 + } 34 40 } 35 41 36 42 static int nft_bridge_iphdr_validate(struct sk_buff *skb)
+3 -1
net/ceph/osd_client.c
··· 3649 3649 * supported. 3650 3650 */ 3651 3651 req->r_t.target_oloc.pool = m.redirect.oloc.pool; 3652 - req->r_flags |= CEPH_OSD_FLAG_REDIRECTED; 3652 + req->r_flags |= CEPH_OSD_FLAG_REDIRECTED | 3653 + CEPH_OSD_FLAG_IGNORE_OVERLAY | 3654 + CEPH_OSD_FLAG_IGNORE_CACHE; 3653 3655 req->r_tid = 0; 3654 3656 __submit_request(req, false); 3655 3657 goto out_unlock_osdc;
+2 -2
net/core/neighbour.c
··· 1082 1082 } 1083 1083 1084 1084 if (neigh->nud_state & NUD_IN_TIMER) { 1085 - if (time_before(next, jiffies + HZ/2)) 1086 - next = jiffies + HZ/2; 1085 + if (time_before(next, jiffies + HZ/100)) 1086 + next = jiffies + HZ/100; 1087 1087 if (!mod_timer(&neigh->timer, next)) 1088 1088 neigh_hold(neigh); 1089 1089 }
+1
net/dsa/slave.c
··· 1746 1746 if (ds->ops->port_vlan_add && ds->ops->port_vlan_del) 1747 1747 slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1748 1748 slave_dev->hw_features |= NETIF_F_HW_TC; 1749 + slave_dev->features |= NETIF_F_LLTX; 1749 1750 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; 1750 1751 if (!IS_ERR_OR_NULL(port->mac)) 1751 1752 ether_addr_copy(slave_dev->dev_addr, port->mac);
+1
net/ipv4/devinet.c
··· 276 276 err = devinet_sysctl_register(in_dev); 277 277 if (err) { 278 278 in_dev->dead = 1; 279 + neigh_parms_release(&arp_tbl, in_dev->arp_parms); 279 280 in_dev_put(in_dev); 280 281 in_dev = NULL; 281 282 goto out;
+18 -12
net/ipv4/esp4_offload.c
··· 63 63 sp->olen++; 64 64 65 65 xo = xfrm_offload(skb); 66 - if (!xo) { 67 - xfrm_state_put(x); 66 + if (!xo) 68 67 goto out_reset; 69 - } 70 68 } 71 69 72 70 xo->flags |= XFRM_GRO; ··· 137 139 struct xfrm_offload *xo = xfrm_offload(skb); 138 140 struct sk_buff *segs = ERR_PTR(-EINVAL); 139 141 const struct net_offload *ops; 140 - int proto = xo->proto; 142 + u8 proto = xo->proto; 141 143 142 144 skb->transport_header += x->props.header_len; 143 145 144 - if (proto == IPPROTO_BEETPH) { 145 - struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data; 146 + if (x->sel.family != AF_INET6) { 147 + if (proto == IPPROTO_BEETPH) { 148 + struct ip_beet_phdr *ph = 149 + (struct ip_beet_phdr *)skb->data; 146 150 147 - skb->transport_header += ph->hdrlen * 8; 148 - proto = ph->nexthdr; 149 - } else if (x->sel.family != AF_INET6) { 150 - skb->transport_header -= IPV4_BEET_PHMAXLEN; 151 - } else if (proto == IPPROTO_TCP) { 152 - skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 151 + skb->transport_header += ph->hdrlen * 8; 152 + proto = ph->nexthdr; 153 + } else { 154 + skb->transport_header -= IPV4_BEET_PHMAXLEN; 155 + } 156 + } else { 157 + __be16 frag; 158 + 159 + skb->transport_header += 160 + ipv6_skip_exthdr(skb, 0, &proto, &frag); 161 + if (proto == IPPROTO_TCP) 162 + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 153 163 } 154 164 155 165 __skb_pull(skb, skb_transport_offset(skb));
+10 -9
net/ipv4/fib_frontend.c
··· 309 309 { 310 310 bool dev_match = false; 311 311 #ifdef CONFIG_IP_ROUTE_MULTIPATH 312 - int ret; 312 + if (unlikely(fi->nh)) { 313 + dev_match = nexthop_uses_dev(fi->nh, dev); 314 + } else { 315 + int ret; 313 316 314 - for (ret = 0; ret < fib_info_num_path(fi); ret++) { 315 - const struct fib_nh_common *nhc = fib_info_nhc(fi, ret); 317 + for (ret = 0; ret < fib_info_num_path(fi); ret++) { 318 + const struct fib_nh_common *nhc = fib_info_nhc(fi, ret); 316 319 317 - if (nhc->nhc_dev == dev) { 318 - dev_match = true; 319 - break; 320 - } else if (l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) { 321 - dev_match = true; 322 - break; 320 + if (nhc_l3mdev_matches_dev(nhc, dev)) { 321 + dev_match = true; 322 + break; 323 + } 323 324 } 324 325 } 325 326 #else
+36 -15
net/ipv4/fib_trie.c
··· 1371 1371 return (key ^ prefix) & (prefix | -prefix); 1372 1372 } 1373 1373 1374 + bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, 1375 + const struct flowi4 *flp) 1376 + { 1377 + if (nhc->nhc_flags & RTNH_F_DEAD) 1378 + return false; 1379 + 1380 + if (ip_ignore_linkdown(nhc->nhc_dev) && 1381 + nhc->nhc_flags & RTNH_F_LINKDOWN && 1382 + !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) 1383 + return false; 1384 + 1385 + if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) { 1386 + if (flp->flowi4_oif && 1387 + flp->flowi4_oif != nhc->nhc_oif) 1388 + return false; 1389 + } 1390 + 1391 + return true; 1392 + } 1393 + 1374 1394 /* should be called with rcu_read_lock */ 1375 1395 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, 1376 1396 struct fib_result *res, int fib_flags) ··· 1523 1503 /* Step 3: Process the leaf, if that fails fall back to backtracing */ 1524 1504 hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { 1525 1505 struct fib_info *fi = fa->fa_info; 1506 + struct fib_nh_common *nhc; 1526 1507 int nhsel, err; 1527 1508 1528 1509 if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) { ··· 1549 1528 if (fi->fib_flags & RTNH_F_DEAD) 1550 1529 continue; 1551 1530 1552 - if (unlikely(fi->nh && nexthop_is_blackhole(fi->nh))) { 1553 - err = fib_props[RTN_BLACKHOLE].error; 1554 - goto out_reject; 1531 + if (unlikely(fi->nh)) { 1532 + if (nexthop_is_blackhole(fi->nh)) { 1533 + err = fib_props[RTN_BLACKHOLE].error; 1534 + goto out_reject; 1535 + } 1536 + 1537 + nhc = nexthop_get_nhc_lookup(fi->nh, fib_flags, flp, 1538 + &nhsel); 1539 + if (nhc) 1540 + goto set_result; 1541 + goto miss; 1555 1542 } 1556 1543 1557 1544 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { 1558 - struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel); 1545 + nhc = fib_info_nhc(fi, nhsel); 1559 1546 1560 - if (nhc->nhc_flags & RTNH_F_DEAD) 1547 + if (!fib_lookup_good_nhc(nhc, fib_flags, flp)) 1561 1548 continue; 1562 - if (ip_ignore_linkdown(nhc->nhc_dev) && 1563 - nhc->nhc_flags & RTNH_F_LINKDOWN && 1564 - !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) 1565 - continue; 1566 - if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) { 1567 - if (flp->flowi4_oif && 1568 - flp->flowi4_oif != nhc->nhc_oif) 1569 - continue; 1570 - } 1571 - 1549 + set_result: 1572 1550 if (!(fib_flags & FIB_LOOKUP_NOREF)) 1573 1551 refcount_inc(&fi->fib_clntref); 1574 1552 ··· 1588 1568 return err; 1589 1569 } 1590 1570 } 1571 + miss: 1591 1572 #ifdef CONFIG_IP_FIB_TRIE_STATS 1592 1573 this_cpu_inc(stats->semantic_match_miss); 1593 1574 #endif
+22 -1
net/ipv4/ip_vti.c
··· 93 93 94 94 static int vti_rcv_tunnel(struct sk_buff *skb) 95 95 { 96 - return vti_rcv(skb, ip_hdr(skb)->saddr, true); 96 + struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id); 97 + const struct iphdr *iph = ip_hdr(skb); 98 + struct ip_tunnel *tunnel; 99 + 100 + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 101 + iph->saddr, iph->daddr, 0); 102 + if (tunnel) { 103 + struct tnl_ptk_info tpi = { 104 + .proto = htons(ETH_P_IP), 105 + }; 106 + 107 + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 108 + goto drop; 109 + if (iptunnel_pull_header(skb, 0, tpi.proto, false)) 110 + goto drop; 111 + return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false); 112 + } 113 + 114 + return -EINVAL; 115 + drop: 116 + kfree_skb(skb); 117 + return 0; 97 118 } 98 119 99 120 static int vti_rcv_cb(struct sk_buff *skb, int err)
+2 -5
net/ipv4/netfilter/nf_nat_pptp.c
··· 166 166 break; 167 167 default: 168 168 pr_debug("unknown outbound packet 0x%04x:%s\n", msg, 169 - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : 170 - pptp_msg_name[0]); 169 + pptp_msg_name(msg)); 171 170 fallthrough; 172 171 case PPTP_SET_LINK_INFO: 173 172 /* only need to NAT in case PAC is behind NAT box */ ··· 267 268 pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); 268 269 break; 269 270 default: 270 - pr_debug("unknown inbound packet %s\n", 271 - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : 272 - pptp_msg_name[0]); 271 + pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg)); 273 272 fallthrough; 274 273 case PPTP_START_SESSION_REQUEST: 275 274 case PPTP_START_SESSION_REPLY:
+63 -39
net/ipv4/nexthop.c
··· 75 75 int i; 76 76 77 77 nhg = rcu_dereference_raw(nh->nh_grp); 78 - for (i = 0; i < nhg->num_nh; ++i) 79 - WARN_ON(nhg->nh_entries[i].nh); 78 + for (i = 0; i < nhg->num_nh; ++i) { 79 + struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 80 80 81 + WARN_ON(!list_empty(&nhge->nh_list)); 82 + nexthop_put(nhge->nh); 83 + } 84 + 85 + WARN_ON(nhg->spare == nhg); 86 + 87 + kfree(nhg->spare); 81 88 kfree(nhg); 82 89 } 83 90 ··· 765 758 } 766 759 } 767 760 768 - static void remove_nh_grp_entry(struct nh_grp_entry *nhge, 769 - struct nh_group *nhg, 761 + static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge, 770 762 struct nl_info *nlinfo) 771 763 { 764 + struct nh_grp_entry *nhges, *new_nhges; 765 + struct nexthop *nhp = nhge->nh_parent; 772 766 struct nexthop *nh = nhge->nh; 773 - struct nh_grp_entry *nhges; 774 - bool found = false; 775 - int i; 767 + struct nh_group *nhg, *newg; 768 + int i, j; 776 769 777 770 WARN_ON(!nh); 778 771 779 - nhges = nhg->nh_entries; 780 - for (i = 0; i < nhg->num_nh; ++i) { 781 - if (found) { 782 - nhges[i-1].nh = nhges[i].nh; 783 - nhges[i-1].weight = nhges[i].weight; 784 - list_del(&nhges[i].nh_list); 785 - list_add(&nhges[i-1].nh_list, &nhges[i-1].nh->grp_list); 786 - } else if (nhg->nh_entries[i].nh == nh) { 787 - found = true; 788 - } 772 + nhg = rtnl_dereference(nhp->nh_grp); 773 + newg = nhg->spare; 774 + 775 + /* last entry, keep it visible and remove the parent */ 776 + if (nhg->num_nh == 1) { 777 + remove_nexthop(net, nhp, nlinfo); 778 + return; 789 779 } 790 780 791 - if (WARN_ON(!found)) 792 - return; 781 + newg->has_v4 = nhg->has_v4; 782 + newg->mpath = nhg->mpath; 783 + newg->num_nh = nhg->num_nh; 793 784 794 - nhg->num_nh--; 795 - nhg->nh_entries[nhg->num_nh].nh = NULL; 785 + /* copy old entries to new except the one getting removed */ 786 + nhges = nhg->nh_entries; 787 + new_nhges = newg->nh_entries; 788 + for (i = 0, j = 0; i < nhg->num_nh; ++i) { 789 + /* current nexthop getting removed */ 790 + if (nhg->nh_entries[i].nh == nh) { 791 + newg->num_nh--; 792 + continue; 793 + } 796 794 797 - nh_group_rebalance(nhg); 795 + list_del(&nhges[i].nh_list); 796 + new_nhges[j].nh_parent = nhges[i].nh_parent; 797 + new_nhges[j].nh = nhges[i].nh; 798 + new_nhges[j].weight = nhges[i].weight; 799 + list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list); 800 + j++; 801 + } 798 802 799 - nexthop_put(nh); 803 + nh_group_rebalance(newg); 804 + rcu_assign_pointer(nhp->nh_grp, newg); 805 + 806 + list_del(&nhge->nh_list); 807 + nexthop_put(nhge->nh); 800 808 801 809 if (nlinfo) 802 - nexthop_notify(RTM_NEWNEXTHOP, nhge->nh_parent, nlinfo); 810 + nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo); 803 811 } 804 812 805 813 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, ··· 822 800 { 823 801 struct nh_grp_entry *nhge, *tmp; 824 802 825 - list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) { 826 - struct nh_group *nhg; 803 + list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) 804 + remove_nh_grp_entry(net, nhge, nlinfo); 827 805 828 - list_del(&nhge->nh_list); 829 - nhg = rtnl_dereference(nhge->nh_parent->nh_grp); 830 - remove_nh_grp_entry(nhge, nhg, nlinfo); 831 - 832 - /* if this group has no more entries then remove it */ 833 - if (!nhg->num_nh) 834 - remove_nexthop(net, nhge->nh_parent, nlinfo); 835 - } 806 + /* make sure all see the newly published array before releasing rtnl */ 807 + synchronize_rcu(); 836 808 } 837 809 838 810 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) ··· 840 824 if (WARN_ON(!nhge->nh)) 841 825 continue; 842 826 843 - list_del(&nhge->nh_list); 844 - nexthop_put(nhge->nh); 845 - nhge->nh = NULL; 846 - nhg->num_nh--; 827 + list_del_init(&nhge->nh_list); 847 828 } 848 829 } 849 830 ··· 1166 1153 { 1167 1154 struct nlattr *grps_attr = cfg->nh_grp; 1168 1155 struct nexthop_grp *entry = nla_data(grps_attr); 1156 + u16 num_nh = nla_len(grps_attr) / sizeof(*entry); 1169 1157 struct nh_group *nhg; 1170 1158 struct nexthop *nh; 1171 1159 int i; ··· 1177 1163 1178 1164 nh->is_group = 1; 1179 1165 1180 - nhg = nexthop_grp_alloc(nla_len(grps_attr) / sizeof(*entry)); 1166 + nhg = nexthop_grp_alloc(num_nh); 1181 1167 if (!nhg) { 1182 1168 kfree(nh); 1183 1169 return ERR_PTR(-ENOMEM); 1184 1170 } 1171 + 1172 + /* spare group used for removals */ 1173 + nhg->spare = nexthop_grp_alloc(num_nh); 1174 + if (!nhg) { 1175 + kfree(nhg); 1176 + kfree(nh); 1177 + return NULL; 1178 + } 1179 + nhg->spare->spare = nhg; 1185 1180 1186 1181 for (i = 0; i < nhg->num_nh; ++i) { 1187 1182 struct nexthop *nhe; ··· 1226 1203 for (; i >= 0; --i) 1227 1204 nexthop_put(nhg->nh_entries[i].nh); 1228 1205 1206 + kfree(nhg->spare); 1229 1207 kfree(nhg); 1230 1208 kfree(nh); 1231 1209
+25 -12
net/ipv6/esp6_offload.c
··· 85 85 sp->olen++; 86 86 87 87 xo = xfrm_offload(skb); 88 - if (!xo) { 89 - xfrm_state_put(x); 88 + if (!xo) 90 89 goto out_reset; 91 - } 92 90 } 93 91 94 92 xo->flags |= XFRM_GRO; ··· 121 123 struct ip_esp_hdr *esph; 122 124 struct ipv6hdr *iph = ipv6_hdr(skb); 123 125 struct xfrm_offload *xo = xfrm_offload(skb); 124 - int proto = iph->nexthdr; 126 + u8 proto = iph->nexthdr; 125 127 126 128 skb_push(skb, -skb_network_offset(skb)); 129 + 130 + if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) { 131 + __be16 frag; 132 + 133 + ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag); 134 + } 135 + 127 136 esph = ip_esp_hdr(skb); 128 137 *skb_mac_header(skb) = IPPROTO_ESP; 129 138 ··· 171 166 struct xfrm_offload *xo = xfrm_offload(skb); 172 167 struct sk_buff *segs = ERR_PTR(-EINVAL); 173 168 const struct net_offload *ops; 174 - int proto = xo->proto; 169 + u8 proto = xo->proto; 175 170 176 171 skb->transport_header += x->props.header_len; 177 - 178 - if (proto == IPPROTO_BEETPH) { 179 - struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data; 180 - 181 - skb->transport_header += ph->hdrlen * 8; 182 - proto = ph->nexthdr; 183 - } 184 172 185 173 if (x->sel.family != AF_INET6) { 186 174 skb->transport_header -= 187 175 (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); 188 176 177 + if (proto == IPPROTO_BEETPH) { 178 + struct ip_beet_phdr *ph = 179 + (struct ip_beet_phdr *)skb->data; 180 + 181 + skb->transport_header += ph->hdrlen * 8; 182 + proto = ph->nexthdr; 183 + } else { 184 + skb->transport_header -= IPV4_BEET_PHMAXLEN; 185 + } 186 + 189 187 if (proto == IPPROTO_TCP) 190 188 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; 189 + } else { 190 + __be16 frag; 191 + 192 + skb->transport_header += 193 + ipv6_skip_exthdr(skb, 0, &proto, &frag); 191 194 } 192 195 193 196 __skb_pull(skb, skb_transport_offset(skb));
+3
net/l2tp/l2tp_core.c
··· 1458 1458 if (sk->sk_type != SOCK_DGRAM) 1459 1459 return -EPROTONOSUPPORT; 1460 1460 1461 + if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) 1462 + return -EPROTONOSUPPORT; 1463 + 1461 1464 if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) || 1462 1465 (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP)) 1463 1466 return -EPROTONOSUPPORT;
+22 -7
net/l2tp/l2tp_ip.c
··· 20 20 #include <net/icmp.h> 21 21 #include <net/udp.h> 22 22 #include <net/inet_common.h> 23 - #include <net/inet_hashtables.h> 24 23 #include <net/tcp_states.h> 25 24 #include <net/protocol.h> 26 25 #include <net/xfrm.h> ··· 208 209 return 0; 209 210 } 210 211 212 + static int l2tp_ip_hash(struct sock *sk) 213 + { 214 + if (sk_unhashed(sk)) { 215 + write_lock_bh(&l2tp_ip_lock); 216 + sk_add_node(sk, &l2tp_ip_table); 217 + write_unlock_bh(&l2tp_ip_lock); 218 + } 219 + return 0; 220 + } 221 + 222 + static void l2tp_ip_unhash(struct sock *sk) 223 + { 224 + if (sk_unhashed(sk)) 225 + return; 226 + write_lock_bh(&l2tp_ip_lock); 227 + sk_del_node_init(sk); 228 + write_unlock_bh(&l2tp_ip_lock); 229 + } 230 + 211 231 static int l2tp_ip_open(struct sock *sk) 212 232 { 213 233 /* Prevent autobind. We don't have ports. */ 214 234 inet_sk(sk)->inet_num = IPPROTO_L2TP; 215 235 216 - write_lock_bh(&l2tp_ip_lock); 217 - sk_add_node(sk, &l2tp_ip_table); 218 - write_unlock_bh(&l2tp_ip_lock); 219 - 236 + l2tp_ip_hash(sk); 220 237 return 0; 221 238 } 222 239 ··· 609 594 .sendmsg = l2tp_ip_sendmsg, 610 595 .recvmsg = l2tp_ip_recvmsg, 611 596 .backlog_rcv = l2tp_ip_backlog_recv, 612 - .hash = inet_hash, 613 - .unhash = inet_unhash, 597 + .hash = l2tp_ip_hash, 598 + .unhash = l2tp_ip_unhash, 614 599 .obj_size = sizeof(struct l2tp_ip_sock), 615 600 #ifdef CONFIG_COMPAT 616 601 .compat_setsockopt = compat_ip_setsockopt,
+22 -8
net/l2tp/l2tp_ip6.c
··· 20 20 #include <net/icmp.h> 21 21 #include <net/udp.h> 22 22 #include <net/inet_common.h> 23 - #include <net/inet_hashtables.h> 24 - #include <net/inet6_hashtables.h> 25 23 #include <net/tcp_states.h> 26 24 #include <net/protocol.h> 27 25 #include <net/xfrm.h> ··· 220 222 return 0; 221 223 } 222 224 225 + static int l2tp_ip6_hash(struct sock *sk) 226 + { 227 + if (sk_unhashed(sk)) { 228 + write_lock_bh(&l2tp_ip6_lock); 229 + sk_add_node(sk, &l2tp_ip6_table); 230 + write_unlock_bh(&l2tp_ip6_lock); 231 + } 232 + return 0; 233 + } 234 + 235 + static void l2tp_ip6_unhash(struct sock *sk) 236 + { 237 + if (sk_unhashed(sk)) 238 + return; 239 + write_lock_bh(&l2tp_ip6_lock); 240 + sk_del_node_init(sk); 241 + write_unlock_bh(&l2tp_ip6_lock); 242 + } 243 + 223 244 static int l2tp_ip6_open(struct sock *sk) 224 245 { 225 246 /* Prevent autobind. We don't have ports. */ 226 247 inet_sk(sk)->inet_num = IPPROTO_L2TP; 227 248 228 - write_lock_bh(&l2tp_ip6_lock); 229 - sk_add_node(sk, &l2tp_ip6_table); 230 - write_unlock_bh(&l2tp_ip6_lock); 231 - 249 + l2tp_ip6_hash(sk); 232 250 return 0; 233 251 } 234 252 ··· 742 728 .sendmsg = l2tp_ip6_sendmsg, 743 729 .recvmsg = l2tp_ip6_recvmsg, 744 730 .backlog_rcv = l2tp_ip6_backlog_recv, 745 - .hash = inet6_hash, 746 - .unhash = inet_unhash, 731 + .hash = l2tp_ip6_hash, 732 + .unhash = l2tp_ip6_unhash, 747 733 .obj_size = sizeof(struct l2tp_ip6_sock), 748 734 #ifdef CONFIG_COMPAT 749 735 .compat_setsockopt = compat_ipv6_setsockopt,
+7
net/mac80211/mesh_hwmp.c
··· 1103 1103 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn, 1104 1104 target_flags, mpath->dst, mpath->sn, da, 0, 1105 1105 ttl, lifetime, 0, ifmsh->preq_id++, sdata); 1106 + 1107 + spin_lock_bh(&mpath->state_lock); 1108 + if (mpath->flags & MESH_PATH_DELETED) { 1109 + spin_unlock_bh(&mpath->state_lock); 1110 + goto enddiscovery; 1111 + } 1106 1112 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 1113 + spin_unlock_bh(&mpath->state_lock); 1107 1114 1108 1115 enddiscovery: 1109 1116 rcu_read_unlock();
+48 -19
net/mptcp/protocol.c
··· 1032 1032 1033 1033 pr_debug("block timeout %ld", timeo); 1034 1034 mptcp_wait_data(sk, &timeo); 1035 - if (unlikely(__mptcp_tcp_fallback(msk))) 1035 + ssock = __mptcp_tcp_fallback(msk); 1036 + if (unlikely(ssock)) 1036 1037 goto fallback; 1037 1038 } 1038 1039 ··· 1347 1346 1348 1347 lock_sock(sk); 1349 1348 1350 - mptcp_token_destroy(msk->token); 1351 1349 inet_sk_state_store(sk, TCP_CLOSE); 1352 1350 1353 - __mptcp_flush_join_list(msk); 1354 - 1351 + /* be sure to always acquire the join list lock, to sync vs 1352 + * mptcp_finish_join(). 1353 + */ 1354 + spin_lock_bh(&msk->join_list_lock); 1355 + list_splice_tail_init(&msk->join_list, &msk->conn_list); 1356 + spin_unlock_bh(&msk->join_list_lock); 1355 1357 list_splice_init(&msk->conn_list, &conn_list); 1356 1358 1357 1359 data_fin_tx_seq = msk->write_seq; ··· 1544 1540 { 1545 1541 struct mptcp_sock *msk = mptcp_sk(sk); 1546 1542 1543 + mptcp_token_destroy(msk->token); 1547 1544 if (msk->cached_ext) 1548 1545 __skb_ext_put(msk->cached_ext); 1549 1546 ··· 1711 1706 if (!msk->pm.server_side) 1712 1707 return true; 1713 1708 1714 - /* passive connection, attach to msk socket */ 1709 + if (!mptcp_pm_allow_new_subflow(msk)) 1710 + return false; 1711 + 1712 + /* active connections are already on conn_list, and we can't acquire 1713 + * msk lock here. 1714 + * use the join list lock as synchronization point and double-check 1715 + * msk status to avoid racing with mptcp_close() 1716 + */ 1717 + spin_lock_bh(&msk->join_list_lock); 1718 + ret = inet_sk_state_load(parent) == TCP_ESTABLISHED; 1719 + if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) 1720 + list_add_tail(&subflow->node, &msk->join_list); 1721 + spin_unlock_bh(&msk->join_list_lock); 1722 + if (!ret) 1723 + return false; 1724 + 1725 + /* attach to msk socket only after we are sure he will deal with us 1726 + * at close time 1727 + */ 1715 1728 parent_sock = READ_ONCE(parent->sk_socket); 1716 1729 if (parent_sock && !sk->sk_socket) 1717 1730 mptcp_sock_graft(sk, parent_sock); 1718 - 1719 - ret = mptcp_pm_allow_new_subflow(msk); 1720 - if (ret) { 1721 - subflow->map_seq = msk->ack_seq; 1722 - 1723 - /* active connections are already on conn_list */ 1724 - spin_lock_bh(&msk->join_list_lock); 1725 - if (!WARN_ON_ONCE(!list_empty(&subflow->node))) 1726 - list_add_tail(&subflow->node, &msk->join_list); 1727 - spin_unlock_bh(&msk->join_list_lock); 1728 - } 1729 - return ret; 1731 + subflow->map_seq = msk->ack_seq; 1732 + return true; 1730 1733 } 1731 1734 1732 1735 static bool mptcp_memory_free(const struct sock *sk, int wake) ··· 1801 1788 int err; 1802 1789 1803 1790 lock_sock(sock->sk); 1791 + if (sock->state != SS_UNCONNECTED && msk->subflow) { 1792 + /* pending connection or invalid state, let existing subflow 1793 + * cope with that 1794 + */ 1795 + ssock = msk->subflow; 1796 + goto do_connect; 1797 + } 1798 + 1804 1799 ssock = __mptcp_socket_create(msk, TCP_SYN_SENT); 1805 1800 if (IS_ERR(ssock)) { 1806 1801 err = PTR_ERR(ssock); ··· 1823 1802 mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0; 1824 1803 #endif 1825 1804 1805 + do_connect: 1826 1806 err = ssock->ops->connect(ssock, uaddr, addr_len, flags); 1827 - inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); 1828 - mptcp_copy_inaddrs(sock->sk, ssock->sk); 1807 + sock->state = ssock->state; 1808 + 1809 + /* on successful connect, the msk state will be moved to established by 1810 + * subflow_finish_connect() 1811 + */ 1812 + if (!err || err == EINPROGRESS) 1813 + mptcp_copy_inaddrs(sock->sk, ssock->sk); 1814 + else 1815 + inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); 1829 1816 1830 1817 unlock: 1831 1818 release_sock(sock->sk);
+1 -1
net/netfilter/ipset/ip_set_list_set.c
··· 59 59 /* Don't lookup sub-counters at all */ 60 60 opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS; 61 61 if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE) 62 - opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE; 62 + opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE; 63 63 list_for_each_entry_rcu(e, &map->members, list) { 64 64 ret = ip_set_test(e->id, skb, par, opt); 65 65 if (ret <= 0)
+73 -7
net/netfilter/nf_conntrack_core.c
··· 2016 2016 nf_conntrack_get(skb_nfct(nskb)); 2017 2017 } 2018 2018 2019 - static int nf_conntrack_update(struct net *net, struct sk_buff *skb) 2019 + static int __nf_conntrack_update(struct net *net, struct sk_buff *skb, 2020 + struct nf_conn *ct, 2021 + enum ip_conntrack_info ctinfo) 2020 2022 { 2021 2023 struct nf_conntrack_tuple_hash *h; 2022 2024 struct nf_conntrack_tuple tuple; 2023 - enum ip_conntrack_info ctinfo; 2024 2025 struct nf_nat_hook *nat_hook; 2025 2026 unsigned int status; 2026 - struct nf_conn *ct; 2027 2027 int dataoff; 2028 2028 u16 l3num; 2029 2029 u8 l4num; 2030 - 2031 - ct = nf_ct_get(skb, &ctinfo); 2032 - if (!ct || nf_ct_is_confirmed(ct)) 2033 - return 0; 2034 2030 2035 2031 l3num = nf_ct_l3num(ct); 2036 2032 ··· 2082 2086 return -1; 2083 2087 2084 2088 return 0; 2089 + } 2090 + 2091 + /* This packet is coming from userspace via nf_queue, complete the packet 2092 + * processing after the helper invocation in nf_confirm(). 2093 + */ 2094 + static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct, 2095 + enum ip_conntrack_info ctinfo) 2096 + { 2097 + const struct nf_conntrack_helper *helper; 2098 + const struct nf_conn_help *help; 2099 + int protoff; 2100 + 2101 + help = nfct_help(ct); 2102 + if (!help) 2103 + return 0; 2104 + 2105 + helper = rcu_dereference(help->helper); 2106 + if (!(helper->flags & NF_CT_HELPER_F_USERSPACE)) 2107 + return 0; 2108 + 2109 + switch (nf_ct_l3num(ct)) { 2110 + case NFPROTO_IPV4: 2111 + protoff = skb_network_offset(skb) + ip_hdrlen(skb); 2112 + break; 2113 + #if IS_ENABLED(CONFIG_IPV6) 2114 + case NFPROTO_IPV6: { 2115 + __be16 frag_off; 2116 + u8 pnum; 2117 + 2118 + pnum = ipv6_hdr(skb)->nexthdr; 2119 + protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, 2120 + &frag_off); 2121 + if (protoff < 0 || (frag_off & htons(~0x7)) != 0) 2122 + return 0; 2123 + break; 2124 + } 2125 + #endif 2126 + default: 2127 + return 0; 2128 + } 2129 + 2130 + if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && 2131 + !nf_is_loopback_packet(skb)) { 2132 + if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { 2133 + NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); 2134 + return -1; 2135 + } 2136 + } 2137 + 2138 + /* We've seen it coming out the other side: confirm it */ 2139 + return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0; 2140 + } 2141 + 2142 + static int nf_conntrack_update(struct net *net, struct sk_buff *skb) 2143 + { 2144 + enum ip_conntrack_info ctinfo; 2145 + struct nf_conn *ct; 2146 + int err; 2147 + 2148 + ct = nf_ct_get(skb, &ctinfo); 2149 + if (!ct) 2150 + return 0; 2151 + 2152 + if (!nf_ct_is_confirmed(ct)) { 2153 + err = __nf_conntrack_update(net, skb, ct, ctinfo); 2154 + if (err < 0) 2155 + return err; 2156 + } 2157 + 2158 + return nf_confirm_cthelper(skb, ct, ctinfo); 2085 2159 } 2086 2160 2087 2161 static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+35 -27
net/netfilter/nf_conntrack_pptp.c
··· 72 72 73 73 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) 74 74 /* PptpControlMessageType names */ 75 - const char *const pptp_msg_name[] = { 76 - "UNKNOWN_MESSAGE", 77 - "START_SESSION_REQUEST", 78 - "START_SESSION_REPLY", 79 - "STOP_SESSION_REQUEST", 80 - "STOP_SESSION_REPLY", 81 - "ECHO_REQUEST", 82 - "ECHO_REPLY", 83 - "OUT_CALL_REQUEST", 84 - "OUT_CALL_REPLY", 85 - "IN_CALL_REQUEST", 86 - "IN_CALL_REPLY", 87 - "IN_CALL_CONNECT", 88 - "CALL_CLEAR_REQUEST", 89 - "CALL_DISCONNECT_NOTIFY", 90 - "WAN_ERROR_NOTIFY", 91 - "SET_LINK_INFO" 75 + static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = { 76 + [0] = "UNKNOWN_MESSAGE", 77 + [PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST", 78 + [PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY", 79 + [PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST", 80 + [PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY", 81 + [PPTP_ECHO_REQUEST] = "ECHO_REQUEST", 82 + [PPTP_ECHO_REPLY] = "ECHO_REPLY", 83 + [PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST", 84 + [PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY", 85 + [PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST", 86 + [PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY", 87 + [PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT", 88 + [PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST", 89 + [PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY", 90 + [PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY", 91 + [PPTP_SET_LINK_INFO] = "SET_LINK_INFO" 92 92 }; 93 + 94 + const char *pptp_msg_name(u_int16_t msg) 95 + { 96 + if (msg > PPTP_MSG_MAX) 97 + return pptp_msg_name_array[0]; 98 + 99 + return pptp_msg_name_array[msg]; 100 + } 93 101 EXPORT_SYMBOL(pptp_msg_name); 94 102 #endif 95 103 ··· 284 276 typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; 285 277 286 278 msg = ntohs(ctlh->messageType); 287 - pr_debug("inbound control message %s\n", pptp_msg_name[msg]); 279 + pr_debug("inbound control message %s\n", pptp_msg_name(msg)); 288 280 289 281 switch (msg) { 290 282 case PPTP_START_SESSION_REPLY: ··· 319 311 pcid = pptpReq->ocack.peersCallID; 320 312 if (info->pns_call_id != pcid) 321 313 goto invalid; 322 - pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], 314 + pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg), 323 315 ntohs(cid), ntohs(pcid)); 324 316 325 317 if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { ··· 336 328 goto invalid; 337 329 338 330 cid = pptpReq->icreq.callID; 339 - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); 331 + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); 340 332 info->cstate = PPTP_CALL_IN_REQ; 341 333 info->pac_call_id = cid; 342 334 break; ··· 355 347 if (info->pns_call_id != pcid) 356 348 goto invalid; 357 349 358 - pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); 350 + pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid)); 359 351 info->cstate = PPTP_CALL_IN_CONF; 360 352 361 353 /* we expect a GRE connection from PAC to PNS */ ··· 365 357 case PPTP_CALL_DISCONNECT_NOTIFY: 366 358 /* server confirms disconnect */ 367 359 cid = pptpReq->disc.callID; 368 - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); 360 + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); 369 361 info->cstate = PPTP_CALL_NONE; 370 362 371 363 /* untrack this call id, unexpect GRE packets */ ··· 392 384 invalid: 393 385 pr_debug("invalid %s: type=%d cid=%u pcid=%u " 394 386 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", 395 - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], 387 + pptp_msg_name(msg), 396 388 msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, 397 389 ntohs(info->pns_call_id), ntohs(info->pac_call_id)); 398 390 return NF_ACCEPT; ··· 412 404 typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; 413 405 414 406 msg = ntohs(ctlh->messageType); 415 - pr_debug("outbound control message %s\n", pptp_msg_name[msg]); 407 + pr_debug("outbound control message %s\n", pptp_msg_name(msg)); 416 408 417 409 switch (msg) { 418 410 case PPTP_START_SESSION_REQUEST: ··· 434 426 info->cstate = PPTP_CALL_OUT_REQ; 435 427 /* track PNS call id */ 436 428 cid = pptpReq->ocreq.callID; 437 - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); 429 + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); 438 430 info->pns_call_id = cid; 439 431 break; 440 432 ··· 448 440 pcid = pptpReq->icack.peersCallID; 449 441 if (info->pac_call_id != pcid) 450 442 goto invalid; 451 - pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], 443 + pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg), 452 444 ntohs(cid), ntohs(pcid)); 453 445 454 446 if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { ··· 488 480 invalid: 489 481 pr_debug("invalid %s: type=%d cid=%u pcid=%u " 490 482 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", 491 - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], 483 + pptp_msg_name(msg), 492 484 msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, 493 485 ntohs(info->pns_call_id), ntohs(info->pac_call_id)); 494 486 return NF_ACCEPT;
+5 -5
net/qrtr/ns.c
··· 714 714 goto err_sock; 715 715 } 716 716 717 + qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1); 718 + if (!qrtr_ns.workqueue) 719 + goto err_sock; 720 + 717 721 qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready; 718 722 719 723 sq.sq_port = QRTR_PORT_CTRL; ··· 726 722 ret = kernel_bind(qrtr_ns.sock, (struct sockaddr *)&sq, sizeof(sq)); 727 723 if (ret < 0) { 728 724 pr_err("failed to bind to socket\n"); 729 - goto err_sock; 725 + goto err_wq; 730 726 } 731 727 732 728 qrtr_ns.bcast_sq.sq_family = AF_QIPCRTR; 733 729 qrtr_ns.bcast_sq.sq_node = QRTR_NODE_BCAST; 734 730 qrtr_ns.bcast_sq.sq_port = QRTR_PORT_CTRL; 735 - 736 - qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1); 737 - if (!qrtr_ns.workqueue) 738 - goto err_sock; 739 731 740 732 ret = say_hello(&qrtr_ns.bcast_sq); 741 733 if (ret < 0)
+3
net/sched/act_ct.c
··· 200 200 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 201 201 struct nf_conntrack_tuple target; 202 202 203 + if (!(ct->status & IPS_NAT_MASK)) 204 + return 0; 205 + 203 206 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); 204 207 205 208 switch (tuple->src.l3num) {
+2 -2
net/sched/sch_fq_pie.c
··· 297 297 goto flow_error; 298 298 } 299 299 q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]); 300 - if (!q->flows_cnt || q->flows_cnt > 65536) { 300 + if (!q->flows_cnt || q->flows_cnt >= 65536) { 301 301 NL_SET_ERR_MSG_MOD(extack, 302 - "Number of flows must be < 65536"); 302 + "Number of flows must range in [1..65535]"); 303 303 goto flow_error; 304 304 } 305 305 }
+1 -1
net/sctp/Kconfig
··· 31 31 homing at either or both ends of an association." 32 32 33 33 To compile this protocol support as a module, choose M here: the 34 - module will be called sctp. Debug messages are handeled by the 34 + module will be called sctp. Debug messages are handled by the 35 35 kernel's dynamic debugging framework. 36 36 37 37 If in doubt, say N.
+3
net/sctp/ulpevent.c
··· 343 343 struct sockaddr_storage addr; 344 344 struct sctp_ulpevent *event; 345 345 346 + if (asoc->state < SCTP_STATE_ESTABLISHED) 347 + return; 348 + 346 349 memset(&addr, 0, sizeof(struct sockaddr_storage)); 347 350 memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len); 348 351
+27 -6
net/tls/tls_sw.c
··· 206 206 207 207 kfree(aead_req); 208 208 209 + spin_lock_bh(&ctx->decrypt_compl_lock); 209 210 pending = atomic_dec_return(&ctx->decrypt_pending); 210 211 211 - if (!pending && READ_ONCE(ctx->async_notify)) 212 + if (!pending && ctx->async_notify) 212 213 complete(&ctx->async_wait.completion); 214 + spin_unlock_bh(&ctx->decrypt_compl_lock); 213 215 } 214 216 215 217 static int tls_do_decryption(struct sock *sk, ··· 469 467 ready = true; 470 468 } 471 469 470 + spin_lock_bh(&ctx->encrypt_compl_lock); 472 471 pending = atomic_dec_return(&ctx->encrypt_pending); 473 472 474 - if (!pending && READ_ONCE(ctx->async_notify)) 473 + if (!pending && ctx->async_notify) 475 474 complete(&ctx->async_wait.completion); 475 + spin_unlock_bh(&ctx->encrypt_compl_lock); 476 476 477 477 if (!ready) 478 478 return; ··· 933 929 int num_zc = 0; 934 930 int orig_size; 935 931 int ret = 0; 932 + int pending; 936 933 937 934 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 938 935 return -EOPNOTSUPP; ··· 1100 1095 goto send_end; 1101 1096 } else if (num_zc) { 1102 1097 /* Wait for pending encryptions to get completed */ 1103 - smp_store_mb(ctx->async_notify, true); 1098 + spin_lock_bh(&ctx->encrypt_compl_lock); 1099 + ctx->async_notify = true; 1104 1100 1105 - if (atomic_read(&ctx->encrypt_pending)) 1101 + pending = atomic_read(&ctx->encrypt_pending); 1102 + spin_unlock_bh(&ctx->encrypt_compl_lock); 1103 + if (pending) 1106 1104 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1107 1105 else 1108 1106 reinit_completion(&ctx->async_wait.completion); 1109 1107 1108 + /* There can be no concurrent accesses, since we have no 1109 + * pending encrypt operations 1110 + */ 1110 1111 WRITE_ONCE(ctx->async_notify, false); 1111 1112 1112 1113 if (ctx->async_wait.err) { ··· 1743 1732 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1744 1733 bool is_peek = flags & MSG_PEEK; 1745 1734 int num_async = 0; 1735 + int pending; 1746 1736 1747 1737 flags |= nonblock; 1748 1738 ··· 1906 1894 recv_end: 1907 1895 if (num_async) { 1908 1896 /* Wait for all previously submitted records to be decrypted */ 1909 - smp_store_mb(ctx->async_notify, true); 1910 - if (atomic_read(&ctx->decrypt_pending)) { 1897 + spin_lock_bh(&ctx->decrypt_compl_lock); 1898 + ctx->async_notify = true; 1899 + pending = atomic_read(&ctx->decrypt_pending); 1900 + spin_unlock_bh(&ctx->decrypt_compl_lock); 1901 + if (pending) { 1911 1902 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1912 1903 if (err) { 1913 1904 /* one of async decrypt failed */ ··· 1922 1907 } else { 1923 1908 reinit_completion(&ctx->async_wait.completion); 1924 1909 } 1910 + 1911 + /* There can be no concurrent accesses, since we have no 1912 + * pending decrypt operations 1913 + */ 1925 1914 WRITE_ONCE(ctx->async_notify, false); 1926 1915 1927 1916 /* Drain records from the rx_list & copy if required */ ··· 2312 2293 2313 2294 if (tx) { 2314 2295 crypto_init_wait(&sw_ctx_tx->async_wait); 2296 + spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); 2315 2297 crypto_info = &ctx->crypto_send.info; 2316 2298 cctx = &ctx->tx; 2317 2299 aead = &sw_ctx_tx->aead_send; ··· 2321 2301 sw_ctx_tx->tx_work.sk = sk; 2322 2302 } else { 2323 2303 crypto_init_wait(&sw_ctx_rx->async_wait); 2304 + spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); 2324 2305 crypto_info = &ctx->crypto_recv.info; 2325 2306 cctx = &ctx->rx; 2326 2307 skb_queue_head_init(&sw_ctx_rx->rx_list);
+1 -1
net/vmw_vsock/af_vsock.c
··· 1408 1408 /* Wait for children sockets to appear; these are the new sockets 1409 1409 * created upon connection establishment. 1410 1410 */ 1411 - timeout = sock_sndtimeo(listener, flags & O_NONBLOCK); 1411 + timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK); 1412 1412 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); 1413 1413 1414 1414 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
+8
net/vmw_vsock/virtio_transport_common.c
··· 1132 1132 1133 1133 lock_sock(sk); 1134 1134 1135 + /* Check if sk has been released before lock_sock */ 1136 + if (sk->sk_shutdown == SHUTDOWN_MASK) { 1137 + (void)virtio_transport_reset_no_sock(t, pkt); 1138 + release_sock(sk); 1139 + sock_put(sk); 1140 + goto free_pkt; 1141 + } 1142 + 1135 1143 /* Update CID in case it has changed after a transport reset event */ 1136 1144 vsk->local_addr.svm_cid = dst.svm_cid; 1137 1145
+1 -1
net/wireless/core.c
··· 142 142 if (result) 143 143 return result; 144 144 145 - if (rdev->wiphy.debugfsdir) 145 + if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir)) 146 146 debugfs_rename(rdev->wiphy.debugfsdir->d_parent, 147 147 rdev->wiphy.debugfsdir, 148 148 rdev->wiphy.debugfsdir->d_parent, newname);
+6 -2
net/xdp/xdp_umem.c
··· 305 305 { 306 306 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; 307 307 u32 chunk_size = mr->chunk_size, headroom = mr->headroom; 308 + u64 npgs, addr = mr->addr, size = mr->len; 308 309 unsigned int chunks, chunks_per_page; 309 - u64 addr = mr->addr, size = mr->len; 310 310 int err; 311 311 312 312 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { ··· 336 336 if ((addr + size) < addr) 337 337 return -EINVAL; 338 338 339 + npgs = div_u64(size, PAGE_SIZE); 340 + if (npgs > U32_MAX) 341 + return -EINVAL; 342 + 339 343 chunks = (unsigned int)div_u64(size, chunk_size); 340 344 if (chunks == 0) 341 345 return -EINVAL; ··· 356 352 umem->size = size; 357 353 umem->headroom = headroom; 358 354 umem->chunk_size = chunk_size; 359 - umem->npgs = size / PAGE_SIZE; 355 + umem->npgs = (u32)npgs; 360 356 umem->pgs = NULL; 361 357 umem->user = NULL; 362 358 umem->flags = mr->flags;
+2
net/xfrm/espintcp.c
··· 390 390 { 391 391 struct espintcp_ctx *ctx = espintcp_getctx(sk); 392 392 393 + ctx->saved_destruct(sk); 393 394 kfree(ctx); 394 395 } 395 396 ··· 446 445 } 447 446 ctx->saved_data_ready = sk->sk_data_ready; 448 447 ctx->saved_write_space = sk->sk_write_space; 448 + ctx->saved_destruct = sk->sk_destruct; 449 449 sk->sk_data_ready = espintcp_data_ready; 450 450 sk->sk_write_space = espintcp_write_space; 451 451 sk->sk_destruct = espintcp_destruct;
+3 -5
net/xfrm/xfrm_device.c
··· 25 25 struct xfrm_offload *xo = xfrm_offload(skb); 26 26 27 27 skb_reset_mac_len(skb); 28 - pskb_pull(skb, skb->mac_len + hsize + x->props.header_len); 29 - 30 - if (xo->flags & XFRM_GSO_SEGMENT) { 31 - skb_reset_transport_header(skb); 28 + if (xo->flags & XFRM_GSO_SEGMENT) 32 29 skb->transport_header -= x->props.header_len; 33 - } 30 + 31 + pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len); 34 32 } 35 33 36 34 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
+1 -1
net/xfrm/xfrm_input.c
··· 645 645 dev_put(skb->dev); 646 646 647 647 spin_lock(&x->lock); 648 - if (nexthdr <= 0) { 648 + if (nexthdr < 0) { 649 649 if (nexthdr == -EBADMSG) { 650 650 xfrm_audit_state_icvfail(x, skb, 651 651 x->type->proto);
+21
net/xfrm/xfrm_interface.c
··· 748 748 .get_link_net = xfrmi_get_link_net, 749 749 }; 750 750 751 + static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list) 752 + { 753 + struct net *net; 754 + LIST_HEAD(list); 755 + 756 + rtnl_lock(); 757 + list_for_each_entry(net, net_exit_list, exit_list) { 758 + struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); 759 + struct xfrm_if __rcu **xip; 760 + struct xfrm_if *xi; 761 + 762 + for (xip = &xfrmn->xfrmi[0]; 763 + (xi = rtnl_dereference(*xip)) != NULL; 764 + xip = &xi->next) 765 + unregister_netdevice_queue(xi->dev, &list); 766 + } 767 + unregister_netdevice_many(&list); 768 + rtnl_unlock(); 769 + } 770 + 751 771 static struct pernet_operations xfrmi_net_ops = { 772 + .exit_batch = xfrmi_exit_batch_net, 752 773 .id = &xfrmi_net_id, 753 774 .size = sizeof(struct xfrmi_net), 754 775 };
+9 -6
net/xfrm/xfrm_output.c
··· 605 605 xfrm_state_hold(x); 606 606 607 607 if (skb_is_gso(skb)) { 608 - skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; 608 + if (skb->inner_protocol) 609 + return xfrm_output_gso(net, sk, skb); 609 610 610 - return xfrm_output2(net, sk, skb); 611 + skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; 612 + goto out; 611 613 } 612 614 613 615 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) 614 616 goto out; 617 + } else { 618 + if (skb_is_gso(skb)) 619 + return xfrm_output_gso(net, sk, skb); 615 620 } 616 - 617 - if (skb_is_gso(skb)) 618 - return xfrm_output_gso(net, sk, skb); 619 621 620 622 if (skb->ip_summed == CHECKSUM_PARTIAL) { 621 623 err = skb_checksum_help(skb); ··· 755 753 756 754 if (skb->protocol == htons(ETH_P_IP)) 757 755 proto = AF_INET; 758 - else if (skb->protocol == htons(ETH_P_IPV6)) 756 + else if (skb->protocol == htons(ETH_P_IPV6) && 757 + skb->sk->sk_family == AF_INET6) 759 758 proto = AF_INET6; 760 759 else 761 760 return;
+1 -6
net/xfrm/xfrm_policy.c
··· 1436 1436 static bool xfrm_policy_mark_match(struct xfrm_policy *policy, 1437 1437 struct xfrm_policy *pol) 1438 1438 { 1439 - u32 mark = policy->mark.v & policy->mark.m; 1440 - 1441 - if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m) 1442 - return true; 1443 - 1444 - if ((mark & pol->mark.m) == pol->mark.v && 1439 + if (policy->mark.v == pol->mark.v && 1445 1440 policy->priority == pol->priority) 1446 1441 return true; 1447 1442
+9 -5
scripts/checkpatch.pl
··· 51 51 my @ignore = (); 52 52 my $help = 0; 53 53 my $configuration_file = ".checkpatch.conf"; 54 - my $max_line_length = 80; 54 + my $max_line_length = 100; 55 55 my $ignore_perl_version = 0; 56 56 my $minimum_perl_version = 5.10.0; 57 57 my $min_conf_desc_length = 4; ··· 97 97 --types TYPE(,TYPE2...) show only these comma separated message types 98 98 --ignore TYPE(,TYPE2...) ignore various comma separated message types 99 99 --show-types show the specific message type in the output 100 - --max-line-length=n set the maximum line length, if exceeded, warn 100 + --max-line-length=n set the maximum line length, (default $max_line_length) 101 + if exceeded, warn on patches 102 + requires --strict for use with --file 101 103 --min-conf-desc-length=n set the min description length, if shorter, warn 102 - --tab-size=n set the number of spaces for tab (default 8) 104 + --tab-size=n set the number of spaces for tab (default $tabsize) 103 105 --root=PATH PATH to the kernel tree root 104 106 --no-summary suppress the per-file summary 105 107 --mailback only produce a report in case of warnings/errors ··· 3242 3240 3243 3241 if ($msg_type ne "" && 3244 3242 (show_type("LONG_LINE") || show_type($msg_type))) { 3245 - WARN($msg_type, 3246 - "line over $max_line_length characters\n" . $herecurr); 3243 + my $msg_level = \&WARN; 3244 + $msg_level = \&CHK if ($file); 3245 + &{$msg_level}($msg_type, 3246 + "line length of $length exceeds $max_line_length columns\n" . $herecurr); 3247 3247 } 3248 3248 } 3249 3249
+1 -1
security/Makefile
··· 30 30 obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/ 31 31 obj-$(CONFIG_SECURITY_SAFESETID) += safesetid/ 32 32 obj-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown/ 33 - obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o 33 + obj-$(CONFIG_CGROUPS) += device_cgroup.o 34 34 obj-$(CONFIG_BPF_LSM) += bpf/ 35 35 36 36 # Object integrity file lists
+1
security/commoncap.c
··· 812 812 int ret; 813 813 kuid_t root_uid; 814 814 815 + new->cap_ambient = old->cap_ambient; 815 816 if (WARN_ON(!cap_ambient_invariant_ok(old))) 816 817 return -EPERM; 817 818
+16 -3
security/device_cgroup.c
··· 15 15 #include <linux/rcupdate.h> 16 16 #include <linux/mutex.h> 17 17 18 + #ifdef CONFIG_CGROUP_DEVICE 19 + 18 20 static DEFINE_MUTEX(devcgroup_mutex); 19 21 20 22 enum devcg_behavior { ··· 794 792 }; 795 793 796 794 /** 797 - * __devcgroup_check_permission - checks if an inode operation is permitted 795 + * devcgroup_legacy_check_permission - checks if an inode operation is permitted 798 796 * @dev_cgroup: the dev cgroup to be tested against 799 797 * @type: device type 800 798 * @major: device major number ··· 803 801 * 804 802 * returns 0 on success, -EPERM case the operation is not permitted 805 803 */ 806 - static int __devcgroup_check_permission(short type, u32 major, u32 minor, 804 + static int devcgroup_legacy_check_permission(short type, u32 major, u32 minor, 807 805 short access) 808 806 { 809 807 struct dev_cgroup *dev_cgroup; ··· 827 825 return 0; 828 826 } 829 827 828 + #endif /* CONFIG_CGROUP_DEVICE */ 829 + 830 + #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) 831 + 830 832 int devcgroup_check_permission(short type, u32 major, u32 minor, short access) 831 833 { 832 834 int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access); ··· 838 832 if (rc) 839 833 return -EPERM; 840 834 841 - return __devcgroup_check_permission(type, major, minor, access); 835 + #ifdef CONFIG_CGROUP_DEVICE 836 + return devcgroup_legacy_check_permission(type, major, minor, access); 837 + 838 + #else /* CONFIG_CGROUP_DEVICE */ 839 + return 0; 840 + 841 + #endif /* CONFIG_CGROUP_DEVICE */ 842 842 } 843 843 EXPORT_SYMBOL(devcgroup_check_permission); 844 + #endif /* defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) */
+2 -2
sound/core/hwdep.c
··· 216 216 if (info.index >= 32) 217 217 return -EINVAL; 218 218 /* check whether the dsp was already loaded */ 219 - if (hw->dsp_loaded & (1 << info.index)) 219 + if (hw->dsp_loaded & (1u << info.index)) 220 220 return -EBUSY; 221 221 err = hw->ops.dsp_load(hw, &info); 222 222 if (err < 0) 223 223 return err; 224 - hw->dsp_loaded |= (1 << info.index); 224 + hw->dsp_loaded |= (1u << info.index); 225 225 return 0; 226 226 } 227 227
+29 -10
sound/pci/hda/patch_realtek.c
··· 384 384 case 0x10ec0282: 385 385 case 0x10ec0283: 386 386 case 0x10ec0286: 387 + case 0x10ec0287: 387 388 case 0x10ec0288: 388 389 case 0x10ec0285: 389 390 case 0x10ec0298: ··· 5485 5484 { 0x19, 0x21a11010 }, /* dock mic */ 5486 5485 { } 5487 5486 }; 5488 - /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise 5489 - * the speaker output becomes too low by some reason on Thinkpads with 5490 - * ALC298 codec 5491 - */ 5492 - static const hda_nid_t preferred_pairs[] = { 5493 - 0x14, 0x03, 0x17, 0x02, 0x21, 0x02, 5494 - 0 5495 - }; 5496 5487 struct alc_spec *spec = codec->spec; 5497 5488 5498 5489 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 5499 - spec->gen.preferred_dacs = preferred_pairs; 5500 5490 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 5501 5491 snd_hda_apply_pincfgs(codec, pincfgs); 5502 5492 } else if (action == HDA_FIXUP_ACT_INIT) { ··· 5498 5506 snd_hda_codec_write(codec, 0x19, 0, 5499 5507 AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); 5500 5508 } 5509 + } 5510 + 5511 + static void alc_fixup_tpt470_dacs(struct hda_codec *codec, 5512 + const struct hda_fixup *fix, int action) 5513 + { 5514 + /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise 5515 + * the speaker output becomes too low by some reason on Thinkpads with 5516 + * ALC298 codec 5517 + */ 5518 + static const hda_nid_t preferred_pairs[] = { 5519 + 0x14, 0x03, 0x17, 0x02, 0x21, 0x02, 5520 + 0 5521 + }; 5522 + struct alc_spec *spec = codec->spec; 5523 + 5524 + if (action == HDA_FIXUP_ACT_PRE_PROBE) 5525 + spec->gen.preferred_dacs = preferred_pairs; 5501 5526 } 5502 5527 5503 5528 static void alc_shutup_dell_xps13(struct hda_codec *codec) ··· 6072 6063 ALC700_FIXUP_INTEL_REFERENCE, 6073 6064 ALC274_FIXUP_DELL_BIND_DACS, 6074 6065 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, 6066 + ALC298_FIXUP_TPT470_DOCK_FIX, 6075 6067 ALC298_FIXUP_TPT470_DOCK, 6076 6068 ALC255_FIXUP_DUMMY_LINEOUT_VERB, 6077 6069 ALC255_FIXUP_DELL_HEADSET_MIC, ··· 7004 6994 .chained = true, 7005 6995 .chain_id = ALC274_FIXUP_DELL_BIND_DACS 7006 6996 }, 7007 - [ALC298_FIXUP_TPT470_DOCK] = { 6997 + [ALC298_FIXUP_TPT470_DOCK_FIX] = { 7008 6998 .type = HDA_FIXUP_FUNC, 7009 6999 .v.func = alc_fixup_tpt470_dock, 7010 7000 .chained = true, 7011 7001 .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE 7002 + }, 7003 + [ALC298_FIXUP_TPT470_DOCK] = { 7004 + .type = HDA_FIXUP_FUNC, 7005 + .v.func = alc_fixup_tpt470_dacs, 7006 + .chained = true, 7007 + .chain_id = ALC298_FIXUP_TPT470_DOCK_FIX 7012 7008 }, 7013 7009 [ALC255_FIXUP_DUMMY_LINEOUT_VERB] = { 7014 7010 .type = HDA_FIXUP_PINS, ··· 7654 7638 {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"}, 7655 7639 {.id = ALC292_FIXUP_TPT440, .name = "tpt440"}, 7656 7640 {.id = ALC292_FIXUP_TPT460, .name = "tpt460"}, 7641 + {.id = ALC298_FIXUP_TPT470_DOCK_FIX, .name = "tpt470-dock-fix"}, 7657 7642 {.id = ALC298_FIXUP_TPT470_DOCK, .name = "tpt470-dock"}, 7658 7643 {.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"}, 7659 7644 {.id = ALC700_FIXUP_INTEL_REFERENCE, .name = "alc700-ref"}, ··· 8293 8276 case 0x10ec0215: 8294 8277 case 0x10ec0245: 8295 8278 case 0x10ec0285: 8279 + case 0x10ec0287: 8296 8280 case 0x10ec0289: 8297 8281 spec->codec_variant = ALC269_TYPE_ALC215; 8298 8282 spec->shutup = alc225_shutup; ··· 9572 9554 HDA_CODEC_ENTRY(0x10ec0284, "ALC284", patch_alc269), 9573 9555 HDA_CODEC_ENTRY(0x10ec0285, "ALC285", patch_alc269), 9574 9556 HDA_CODEC_ENTRY(0x10ec0286, "ALC286", patch_alc269), 9557 + HDA_CODEC_ENTRY(0x10ec0287, "ALC287", patch_alc269), 9575 9558 HDA_CODEC_ENTRY(0x10ec0288, "ALC288", patch_alc269), 9576 9559 HDA_CODEC_ENTRY(0x10ec0289, "ALC289", patch_alc269), 9577 9560 HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269),
+8
sound/usb/mixer.c
··· 1182 1182 cval->res = 384; 1183 1183 } 1184 1184 break; 1185 + case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */ 1186 + if ((strstr(kctl->id.name, "Playback Volume") != NULL) || 1187 + strstr(kctl->id.name, "Capture Volume") != NULL) { 1188 + cval->min >>= 8; 1189 + cval->max = 0; 1190 + cval->res = 1; 1191 + } 1192 + break; 1185 1193 } 1186 1194 } 1187 1195
+19
sound/usb/mixer_maps.c
··· 397 397 {} 398 398 }; 399 399 400 + /* Rear panel + front mic on Gigabyte TRX40 Aorus Master with ALC1220-VB */ 401 + static const struct usbmix_name_map aorus_master_alc1220vb_map[] = { 402 + { 17, NULL }, /* OT, IEC958?, disabled */ 403 + { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */ 404 + { 16, "Line Out" }, /* OT */ 405 + { 22, "Line Out Playback" }, /* FU */ 406 + { 7, "Line" }, /* IT */ 407 + { 19, "Line Capture" }, /* FU */ 408 + { 8, "Mic" }, /* IT */ 409 + { 20, "Mic Capture" }, /* FU */ 410 + { 9, "Front Mic" }, /* IT */ 411 + { 21, "Front Mic Capture" }, /* FU */ 412 + {} 413 + }; 414 + 400 415 /* 401 416 * Control map entries 402 417 */ ··· 540 525 /* Corsair Virtuoso (wireless mode) */ 541 526 .id = USB_ID(0x1b1c, 0x0a42), 542 527 .map = corsair_virtuoso_map, 528 + }, 529 + { /* Gigabyte TRX40 Aorus Master (rear panel + front mic) */ 530 + .id = USB_ID(0x0414, 0xa001), 531 + .map = aorus_master_alc1220vb_map, 543 532 }, 544 533 { /* Gigabyte TRX40 Aorus Pro WiFi */ 545 534 .id = USB_ID(0x0414, 0xa002),
+25
sound/usb/quirks-table.h
··· 3566 3566 ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */ 3567 3567 #undef ALC1220_VB_DESKTOP 3568 3568 3569 + /* Two entries for Gigabyte TRX40 Aorus Master: 3570 + * TRX40 Aorus Master has two USB-audio devices, one for the front headphone 3571 + * with ESS SABRE9218 DAC chip, while another for the rest I/O (the rear 3572 + * panel and the front mic) with Realtek ALC1220-VB. 3573 + * Here we provide two distinct names for making UCM profiles easier. 3574 + */ 3575 + { 3576 + USB_DEVICE(0x0414, 0xa000), 3577 + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { 3578 + .vendor_name = "Gigabyte", 3579 + .product_name = "Aorus Master Front Headphone", 3580 + .profile_name = "Gigabyte-Aorus-Master-Front-Headphone", 3581 + .ifnum = QUIRK_NO_INTERFACE 3582 + } 3583 + }, 3584 + { 3585 + USB_DEVICE(0x0414, 0xa001), 3586 + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { 3587 + .vendor_name = "Gigabyte", 3588 + .product_name = "Aorus Master Main Audio", 3589 + .profile_name = "Gigabyte-Aorus-Master-Main-Audio", 3590 + .ifnum = QUIRK_NO_INTERFACE 3591 + } 3592 + }, 3593 + 3569 3594 #undef USB_DEVICE_VENDOR_SPEC
+1 -1
tools/arch/x86/include/uapi/asm/unistd.h
··· 3 3 #define _UAPI_ASM_X86_UNISTD_H 4 4 5 5 /* x32 syscall flag bit */ 6 - #define __X32_SYSCALL_BIT 0x40000000UL 6 + #define __X32_SYSCALL_BIT 0x40000000 7 7 8 8 #ifndef __KERNEL__ 9 9 # ifdef __i386__
+32 -14
tools/testing/selftests/bpf/verifier/bounds.c
··· 238 238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 239 239 BPF_LD_MAP_FD(BPF_REG_1, 0), 240 240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 241 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 241 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 242 242 /* r1 = [0x00, 0xff] */ 243 243 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 244 244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), ··· 253 253 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] 254 254 */ 255 255 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), 256 - /* r1 = 0 or 257 - * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] 258 - */ 259 - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), 260 256 /* error on OOB pointer computation */ 261 257 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 262 258 /* exit */ ··· 261 265 }, 262 266 .fixup_map_hash_8b = { 3 }, 263 267 /* not actually fully unbounded, but the bound is very high */ 264 - .errstr = "value 72057594021150720 makes map_value pointer be out of bounds", 265 - .result = REJECT 268 + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root", 269 + .result_unpriv = REJECT, 270 + .errstr = "value -4294967168 makes map_value pointer be out of bounds", 271 + .result = REJECT, 266 272 }, 267 273 { 268 274 "bounds check after truncation of boundary-crossing range (2)", ··· 274 276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 275 277 BPF_LD_MAP_FD(BPF_REG_1, 0), 276 278 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 277 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 279 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 278 280 /* r1 = [0x00, 0xff] */ 279 281 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 280 282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), ··· 291 293 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] 292 294 */ 293 295 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), 294 - /* r1 = 0 or 295 - * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] 296 - */ 297 - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), 298 296 /* error on OOB pointer computation */ 299 297 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 300 298 /* exit */ ··· 299 305 }, 300 306 .fixup_map_hash_8b = { 3 }, 301 307 /* not actually fully unbounded, but the bound is very high */ 302 - .errstr = "value 72057594021150720 makes map_value pointer be out of bounds", 303 - .result = REJECT 308 + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root", 309 + .result_unpriv = REJECT, 310 + .errstr = "value -4294967168 makes map_value pointer be out of bounds", 311 + .result = REJECT, 304 312 }, 305 313 { 306 314 "bounds check after wrapping 32-bit addition", ··· 534 538 BPF_EXIT_INSN(), 535 539 }, 536 540 .result = ACCEPT 541 + }, 542 + { 543 + "assigning 32bit bounds to 64bit for wA = 0, wB = wA", 544 + .insns = { 545 + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, 546 + offsetof(struct __sk_buff, data_end)), 547 + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 548 + offsetof(struct __sk_buff, data)), 549 + BPF_MOV32_IMM(BPF_REG_9, 0), 550 + BPF_MOV32_REG(BPF_REG_2, BPF_REG_9), 551 + BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), 552 + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_2), 553 + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 554 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), 555 + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_8, 1), 556 + BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_6, 0), 557 + BPF_MOV64_IMM(BPF_REG_0, 0), 558 + BPF_EXIT_INSN(), 559 + }, 560 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 561 + .result = ACCEPT, 562 + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 537 563 },
+21
tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
··· 1 + [ 2 + { 3 + "id": "83be", 4 + "name": "Create FQ-PIE with invalid number of flows", 5 + "category": [ 6 + "qdisc", 7 + "fq_pie" 8 + ], 9 + "setup": [ 10 + "$IP link add dev $DUMMY type dummy || /bin/true" 11 + ], 12 + "cmdUnderTest": "$TC qdisc add dev $DUMMY root fq_pie flows 65536", 13 + "expExitCode": "2", 14 + "verifyCmd": "$TC qdisc show dev $DUMMY", 15 + "matchPattern": "qdisc", 16 + "matchCount": "0", 17 + "teardown": [ 18 + "$IP link del dev $DUMMY" 19 + ] 20 + } 21 + ]