Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

include/net/sock.h
7b50ecfcc6cd ("net: Rename ->stream_memory_read to ->sock_is_readable")
4c1e34c0dbff ("vsock: Enable y2038 safe timeval for timeout")

drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
0daa55d033b0 ("octeontx2-af: cn10k: debugfs for dumping LMTST map table")
e77bcdd1f639 ("octeontx2-af: Display all enabled PF VF rsrc_alloc entries.")

Adjacent code addition in both cases, keep both.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+1615 -934
+6 -5
Documentation/devicetree/bindings/mfd/brcm,cru.yaml
··· 32 32 "#size-cells": 33 33 const: 1 34 34 35 - pinctrl: 36 - $ref: ../pinctrl/brcm,ns-pinmux.yaml 37 - 38 35 patternProperties: 39 36 '^clock-controller@[a-f0-9]+$': 40 37 $ref: ../clock/brcm,iproc-clocks.yaml 38 + 39 + '^pin-controller@[a-f0-9]+$': 40 + $ref: ../pinctrl/brcm,ns-pinmux.yaml 41 41 42 42 '^thermal@[a-f0-9]+$': 43 43 $ref: ../thermal/brcm,ns-thermal.yaml ··· 73 73 "iprocfast", "sata1", "sata2"; 74 74 }; 75 75 76 - pinctrl { 76 + pin-controller@1c0 { 77 77 compatible = "brcm,bcm4708-pinmux"; 78 - offset = <0x1c0>; 78 + reg = <0x1c0 0x24>; 79 + reg-names = "cru_gpio_control"; 79 80 }; 80 81 81 82 thermal@2c0 {
+13 -18
Documentation/devicetree/bindings/pinctrl/brcm,ns-pinmux.yaml
··· 17 17 18 18 A list of pins varies across chipsets so few bindings are available. 19 19 20 - Node of the pinmux must be nested in the CRU (Central Resource Unit) "syscon" 21 - node. 22 - 23 20 properties: 24 21 compatible: 25 22 enum: ··· 24 27 - brcm,bcm4709-pinmux 25 28 - brcm,bcm53012-pinmux 26 29 27 - offset: 28 - description: offset of pin registers in the CRU block 30 + reg: 29 31 maxItems: 1 30 - $ref: /schemas/types.yaml#/definitions/uint32-array 32 + 33 + reg-names: 34 + const: cru_gpio_control 31 35 32 36 patternProperties: 33 37 '-pins$': ··· 70 72 uart1_grp ] 71 73 72 74 required: 73 - - offset 75 + - reg 76 + - reg-names 74 77 75 78 additionalProperties: false 76 79 77 80 examples: 78 81 - | 79 - cru@1800c100 { 80 - compatible = "syscon", "simple-mfd"; 81 - reg = <0x1800c100 0x1a4>; 82 + pin-controller@1800c1c0 { 83 + compatible = "brcm,bcm4708-pinmux"; 84 + reg = <0x1800c1c0 0x24>; 85 + reg-names = "cru_gpio_control"; 82 86 83 - pinctrl { 84 - compatible = "brcm,bcm4708-pinmux"; 85 - offset = <0xc0>; 86 - 87 - spi-pins { 88 - function = "spi"; 89 - groups = "spi_grp"; 90 - }; 87 + spi-pins { 88 + function = "spi"; 89 + groups = "spi_grp"; 91 90 }; 92 91 };
+1
Documentation/userspace-api/ioctl/ioctl-number.rst
··· 104 104 '8' all SNP8023 advanced NIC card 105 105 <mailto:mcr@solidum.com> 106 106 ';' 64-7F linux/vfio.h 107 + '=' 00-3f uapi/linux/ptp_clock.h <mailto:richardcochran@gmail.com> 107 108 '@' 00-0F linux/radeonfb.h conflict! 108 109 '@' 00-0F drivers/video/aty/aty128fb.c conflict! 109 110 'A' 00-1F linux/apm_bios.h conflict!
+14 -1
MAINTAINERS
··· 5464 5464 F: include/uapi/linux/devlink.h 5465 5465 F: net/core/devlink.c 5466 5466 5467 + DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT 5468 + M: Christoph Niedermaier <cniedermaier@dh-electronics.com> 5469 + L: kernel@dh-electronics.com 5470 + S: Maintained 5471 + F: arch/arm/boot/dts/imx6*-dhcom-* 5472 + 5473 + DH ELECTRONICS STM32MP1 DHCOM/DHCOR BOARD SUPPORT 5474 + M: Marek Vasut <marex@denx.de> 5475 + L: kernel@dh-electronics.com 5476 + S: Maintained 5477 + F: arch/arm/boot/dts/stm32mp1*-dhcom-* 5478 + F: arch/arm/boot/dts/stm32mp1*-dhcor-* 5479 + 5467 5480 DIALOG SEMICONDUCTOR DRIVERS 5468 5481 M: Support Opensource <support.opensource@diasemi.com> 5469 5482 S: Supported ··· 11297 11284 F: drivers/net/ethernet/marvell/octeontx2/af/ 11298 11285 11299 11286 MARVELL PRESTERA ETHERNET SWITCH DRIVER 11300 - M: Vadym Kochan <vkochan@marvell.com> 11301 11287 M: Taras Chornyi <tchornyi@marvell.com> 11302 11288 S: Supported 11303 11289 W: https://github.com/Marvell-switching/switchdev-prestera ··· 20364 20352 M: Thomas Gleixner <tglx@linutronix.de> 20365 20353 M: Ingo Molnar <mingo@redhat.com> 20366 20354 M: Borislav Petkov <bp@alien8.de> 20355 + M: Dave Hansen <dave.hansen@linux.intel.com> 20367 20356 M: x86@kernel.org 20368 20357 R: "H. Peter Anvin" <hpa@zytor.com> 20369 20358 L: linux-kernel@vger.kernel.org
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 15 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc6 5 + EXTRAVERSION = -rc7 6 6 NAME = Opossums on Parade 7 7 8 8 # *DOCUMENTATION*
+1
arch/arm/Kconfig
··· 92 92 select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL 93 93 select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG 94 94 select HAVE_FUNCTION_TRACER if !XIP_KERNEL 95 + select HAVE_FUTEX_CMPXCHG if FUTEX 95 96 select HAVE_GCC_PLUGINS 96 97 select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) 97 98 select HAVE_IRQ_TIME_ACCOUNTING
+3
arch/arm/boot/compressed/decompress.c
··· 47 47 #endif 48 48 49 49 #ifdef CONFIG_KERNEL_XZ 50 + /* Prevent KASAN override of string helpers in decompressor */ 51 + #undef memmove 50 52 #define memmove memmove 53 + #undef memcpy 51 54 #define memcpy memcpy 52 55 #include "../../../../lib/decompress_unxz.c" 53 56 #endif
+1 -1
arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
··· 112 112 pinctrl-names = "default"; 113 113 pinctrl-0 = <&gmac_rgmii_pins>; 114 114 phy-handle = <&phy1>; 115 - phy-mode = "rgmii"; 115 + phy-mode = "rgmii-id"; 116 116 status = "okay"; 117 117 }; 118 118
+3 -1
arch/arm/include/asm/uaccess.h
··· 176 176 register unsigned long __l asm("r1") = __limit; \ 177 177 register int __e asm("r0"); \ 178 178 unsigned int __ua_flags = uaccess_save_and_enable(); \ 179 + int __tmp_e; \ 179 180 switch (sizeof(*(__p))) { \ 180 181 case 1: \ 181 182 if (sizeof((x)) >= 8) \ ··· 204 203 break; \ 205 204 default: __e = __get_user_bad(); break; \ 206 205 } \ 206 + __tmp_e = __e; \ 207 207 uaccess_restore(__ua_flags); \ 208 208 x = (typeof(*(p))) __r2; \ 209 - __e; \ 209 + __tmp_e; \ 210 210 }) 211 211 212 212 #define get_user(x, p) \
+2 -2
arch/arm/kernel/head.S
··· 253 253 add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER) 254 254 ldr r6, =(_end - 1) 255 255 adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) 256 - #ifdef CONFIG_CPU_ENDIAN_BE8 256 + #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 257 257 str r8, [r5, #4] @ Save physical start of kernel (BE) 258 258 #else 259 259 str r8, [r5] @ Save physical start of kernel (LE) ··· 266 266 bls 1b 267 267 eor r3, r3, r7 @ Remove the MMU flags 268 268 adr_l r5, kernel_sec_end @ _pa(kernel_sec_end) 269 - #ifdef CONFIG_CPU_ENDIAN_BE8 269 + #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 270 270 str r3, [r5, #4] @ Save physical end of kernel (BE) 271 271 #else 272 272 str r3, [r5] @ Save physical end of kernel (LE)
+1 -1
arch/arm/kernel/traps.c
··· 136 136 for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { 137 137 if (p >= bottom && p < top) { 138 138 unsigned long val; 139 - if (get_kernel_nofault(val, (unsigned long *)p)) 139 + if (!get_kernel_nofault(val, (unsigned long *)p)) 140 140 sprintf(str + i * 9, " %08lx", val); 141 141 else 142 142 sprintf(str + i * 9, " ????????");
+5 -1
arch/arm/kernel/vmlinux-xip.lds.S
··· 40 40 ARM_DISCARD 41 41 *(.alt.smp.init) 42 42 *(.pv_table) 43 + #ifndef CONFIG_ARM_UNWIND 44 + *(.ARM.exidx) *(.ARM.exidx.*) 45 + *(.ARM.extab) *(.ARM.extab.*) 46 + #endif 43 47 } 44 48 45 49 . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); ··· 176 172 ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA") 177 173 #endif 178 174 179 - #ifdef CONFIG_ARM_MPU 175 + #if defined(CONFIG_ARM_MPU) && !defined(CONFIG_COMPILE_TEST) 180 176 /* 181 177 * Due to PMSAv7 restriction on base address and size we have to 182 178 * enforce minimal alignment restrictions. It was seen that weaker
+1
arch/arm/mm/proc-macros.S
··· 340 340 341 341 .macro define_tlb_functions name:req, flags_up:req, flags_smp 342 342 .type \name\()_tlb_fns, #object 343 + .align 2 343 344 ENTRY(\name\()_tlb_fns) 344 345 .long \name\()_flush_user_tlb_range 345 346 .long \name\()_flush_kern_tlb_range
+1 -1
arch/arm/probes/kprobes/core.c
··· 439 439 440 440 #endif /* !CONFIG_THUMB2_KERNEL */ 441 441 442 - int __init arch_init_kprobes() 442 + int __init arch_init_kprobes(void) 443 443 { 444 444 arm_probes_decode_init(); 445 445 #ifdef CONFIG_THUMB2_KERNEL
+1 -1
arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
··· 75 75 pinctrl-0 = <&emac_rgmii_pins>; 76 76 phy-supply = <&reg_gmac_3v3>; 77 77 phy-handle = <&ext_rgmii_phy>; 78 - phy-mode = "rgmii"; 78 + phy-mode = "rgmii-id"; 79 79 status = "okay"; 80 80 }; 81 81
+5 -3
arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts
··· 70 70 regulator-name = "rst-usb-eth2"; 71 71 pinctrl-names = "default"; 72 72 pinctrl-0 = <&pinctrl_usb_eth2>; 73 - gpio = <&gpio3 2 GPIO_ACTIVE_LOW>; 73 + gpio = <&gpio3 2 GPIO_ACTIVE_HIGH>; 74 + enable-active-high; 75 + regulator-always-on; 74 76 }; 75 77 76 78 reg_vdd_5v: regulator-5v { ··· 97 95 clocks = <&osc_can>; 98 96 interrupt-parent = <&gpio4>; 99 97 interrupts = <28 IRQ_TYPE_EDGE_FALLING>; 100 - spi-max-frequency = <100000>; 98 + spi-max-frequency = <10000000>; 101 99 vdd-supply = <&reg_vdd_3v3>; 102 100 xceiver-supply = <&reg_vdd_5v>; 103 101 }; ··· 113 111 &fec1 { 114 112 pinctrl-names = "default"; 115 113 pinctrl-0 = <&pinctrl_enet>; 116 - phy-connection-type = "rgmii"; 114 + phy-connection-type = "rgmii-rxid"; 117 115 phy-handle = <&ethphy>; 118 116 status = "okay"; 119 117
+5 -3
arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi
··· 91 91 reg_vdd_soc: BUCK1 { 92 92 regulator-name = "buck1"; 93 93 regulator-min-microvolt = <800000>; 94 - regulator-max-microvolt = <900000>; 94 + regulator-max-microvolt = <850000>; 95 95 regulator-boot-on; 96 96 regulator-always-on; 97 97 regulator-ramp-delay = <3125>; 98 + nxp,dvs-run-voltage = <850000>; 99 + nxp,dvs-standby-voltage = <800000>; 98 100 }; 99 101 100 102 reg_vdd_arm: BUCK2 { ··· 113 111 reg_vdd_dram: BUCK3 { 114 112 regulator-name = "buck3"; 115 113 regulator-min-microvolt = <850000>; 116 - regulator-max-microvolt = <900000>; 114 + regulator-max-microvolt = <950000>; 117 115 regulator-boot-on; 118 116 regulator-always-on; 119 117 }; ··· 152 150 153 151 reg_vdd_snvs: LDO2 { 154 152 regulator-name = "ldo2"; 155 - regulator-min-microvolt = <850000>; 153 + regulator-min-microvolt = <800000>; 156 154 regulator-max-microvolt = <900000>; 157 155 regulator-boot-on; 158 156 regulator-always-on;
+2 -1
arch/arm64/boot/dts/qcom/sm8250.dtsi
··· 2590 2590 power-domains = <&dispcc MDSS_GDSC>; 2591 2591 2592 2592 clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, 2593 + <&gcc GCC_DISP_HF_AXI_CLK>, 2593 2594 <&gcc GCC_DISP_SF_AXI_CLK>, 2594 2595 <&dispcc DISP_CC_MDSS_MDP_CLK>; 2595 - clock-names = "iface", "nrt_bus", "core"; 2596 + clock-names = "iface", "bus", "nrt_bus", "core"; 2596 2597 2597 2598 assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>; 2598 2599 assigned-clock-rates = <460000000>;
+5
arch/arm64/net/bpf_jit_comp.c
··· 1136 1136 return prog; 1137 1137 } 1138 1138 1139 + u64 bpf_jit_alloc_exec_limit(void) 1140 + { 1141 + return BPF_JIT_REGION_SIZE; 1142 + } 1143 + 1139 1144 void *bpf_jit_alloc_exec(unsigned long size) 1140 1145 { 1141 1146 return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
+1 -1
arch/nds32/kernel/ftrace.c
··· 6 6 7 7 #ifndef CONFIG_DYNAMIC_FTRACE 8 8 extern void (*ftrace_trace_function)(unsigned long, unsigned long, 9 - struct ftrace_ops*, struct pt_regs*); 9 + struct ftrace_ops*, struct ftrace_regs*); 10 10 extern void ftrace_graph_caller(void); 11 11 12 12 noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+1
arch/nios2/platform/Kconfig.platform
··· 37 37 38 38 config NIOS2_DTB_SOURCE_BOOL 39 39 bool "Compile and link device tree into kernel image" 40 + depends on !COMPILE_TEST 40 41 help 41 42 This allows you to specify a dts (device tree source) file 42 43 which will be compiled and linked into the kernel image.
+7 -1
arch/riscv/net/bpf_jit_core.c
··· 125 125 126 126 if (i == NR_JIT_ITERATIONS) { 127 127 pr_err("bpf-jit: image did not converge in <%d passes!\n", i); 128 - bpf_jit_binary_free(jit_data->header); 128 + if (jit_data->header) 129 + bpf_jit_binary_free(jit_data->header); 129 130 prog = orig_prog; 130 131 goto out_offset; 131 132 } ··· 165 164 bpf_jit_prog_release_other(prog, prog == orig_prog ? 166 165 tmp : orig_prog); 167 166 return prog; 167 + } 168 + 169 + u64 bpf_jit_alloc_exec_limit(void) 170 + { 171 + return BPF_JIT_REGION_SIZE; 168 172 } 169 173 170 174 void *bpf_jit_alloc_exec(unsigned long size)
+2 -1
arch/x86/include/asm/kvm_host.h
··· 702 702 703 703 struct kvm_pio_request pio; 704 704 void *pio_data; 705 - void *guest_ins_data; 705 + void *sev_pio_data; 706 + unsigned sev_pio_count; 706 707 707 708 u8 event_exit_inst_len; 708 709
+3 -3
arch/x86/kvm/mmu/mmu.c
··· 4596 4596 unsigned bit; 4597 4597 bool wp; 4598 4598 4599 - if (!is_cr4_pke(mmu)) { 4600 - mmu->pkru_mask = 0; 4599 + mmu->pkru_mask = 0; 4600 + 4601 + if (!is_cr4_pke(mmu)) 4601 4602 return; 4602 - } 4603 4603 4604 4604 wp = is_cr0_wp(mmu); 4605 4605
+7
arch/x86/kvm/svm/sev.c
··· 1484 1484 goto e_free_trans; 1485 1485 } 1486 1486 1487 + /* 1488 + * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP 1489 + * encrypts the written data with the guest's key, and the cache may 1490 + * contain dirty, unencrypted data. 1491 + */ 1492 + sev_clflush_pages(guest_page, n); 1493 + 1487 1494 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ 1488 1495 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset; 1489 1496 data.guest_address |= sev_me_mask;
+6 -11
arch/x86/kvm/vmx/vmx.c
··· 6305 6305 6306 6306 /* 6307 6307 * If we are running L2 and L1 has a new pending interrupt 6308 - * which can be injected, we should re-evaluate 6309 - * what should be done with this new L1 interrupt. 6310 - * If L1 intercepts external-interrupts, we should 6311 - * exit from L2 to L1. Otherwise, interrupt should be 6312 - * delivered directly to L2. 6308 + * which can be injected, this may cause a vmexit or it may 6309 + * be injected into L2. Either way, this interrupt will be 6310 + * processed via KVM_REQ_EVENT, not RVI, because we do not use 6311 + * virtual interrupt delivery to inject L1 interrupts into L2. 6313 6312 */ 6314 - if (is_guest_mode(vcpu) && max_irr_updated) { 6315 - if (nested_exit_on_intr(vcpu)) 6316 - kvm_vcpu_exiting_guest_mode(vcpu); 6317 - else 6318 - kvm_make_request(KVM_REQ_EVENT, vcpu); 6319 - } 6313 + if (is_guest_mode(vcpu) && max_irr_updated) 6314 + kvm_make_request(KVM_REQ_EVENT, vcpu); 6320 6315 } else { 6321 6316 max_irr = kvm_lapic_find_highest_irr(vcpu); 6322 6317 }
+104 -48
arch/x86/kvm/x86.c
··· 6906 6906 } 6907 6907 6908 6908 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, 6909 - unsigned short port, void *val, 6909 + unsigned short port, 6910 6910 unsigned int count, bool in) 6911 6911 { 6912 6912 vcpu->arch.pio.port = port; ··· 6914 6914 vcpu->arch.pio.count = count; 6915 6915 vcpu->arch.pio.size = size; 6916 6916 6917 - if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { 6918 - vcpu->arch.pio.count = 0; 6917 + if (!kernel_pio(vcpu, vcpu->arch.pio_data)) 6919 6918 return 1; 6920 - } 6921 6919 6922 6920 vcpu->run->exit_reason = KVM_EXIT_IO; 6923 6921 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; ··· 6927 6929 return 0; 6928 6930 } 6929 6931 6932 + static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size, 6933 + unsigned short port, unsigned int count) 6934 + { 6935 + WARN_ON(vcpu->arch.pio.count); 6936 + memset(vcpu->arch.pio_data, 0, size * count); 6937 + return emulator_pio_in_out(vcpu, size, port, count, true); 6938 + } 6939 + 6940 + static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val) 6941 + { 6942 + int size = vcpu->arch.pio.size; 6943 + unsigned count = vcpu->arch.pio.count; 6944 + memcpy(val, vcpu->arch.pio_data, size * count); 6945 + trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); 6946 + vcpu->arch.pio.count = 0; 6947 + } 6948 + 6930 6949 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, 6931 6950 unsigned short port, void *val, unsigned int count) 6932 6951 { 6933 - int ret; 6952 + if (vcpu->arch.pio.count) { 6953 + /* Complete previous iteration. */ 6954 + } else { 6955 + int r = __emulator_pio_in(vcpu, size, port, count); 6956 + if (!r) 6957 + return r; 6934 6958 6935 - if (vcpu->arch.pio.count) 6936 - goto data_avail; 6937 - 6938 - memset(vcpu->arch.pio_data, 0, size * count); 6939 - 6940 - ret = emulator_pio_in_out(vcpu, size, port, val, count, true); 6941 - if (ret) { 6942 - data_avail: 6943 - memcpy(val, vcpu->arch.pio_data, size * count); 6944 - trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); 6945 - vcpu->arch.pio.count = 0; 6946 - return 1; 6959 + /* Results already available, fall through. */ 6947 6960 } 6948 6961 6949 - return 0; 6962 + WARN_ON(count != vcpu->arch.pio.count); 6963 + complete_emulator_pio_in(vcpu, val); 6964 + return 1; 6950 6965 } 6951 6966 6952 6967 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, ··· 6974 6963 unsigned short port, const void *val, 6975 6964 unsigned int count) 6976 6965 { 6966 + int ret; 6967 + 6977 6968 memcpy(vcpu->arch.pio_data, val, size * count); 6978 6969 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); 6979 - return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); 6970 + ret = emulator_pio_in_out(vcpu, size, port, count, false); 6971 + if (ret) 6972 + vcpu->arch.pio.count = 0; 6973 + 6974 + return ret; 6980 6975 } 6981 6976 6982 6977 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, ··· 9660 9643 if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) 9661 9644 break; 9662 9645 9663 - if (unlikely(kvm_vcpu_exit_request(vcpu))) { 9646 + if (vcpu->arch.apicv_active) 9647 + static_call(kvm_x86_sync_pir_to_irr)(vcpu); 9648 + 9649 + if (unlikely(kvm_vcpu_exit_request(vcpu))) { 9664 9650 exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; 9665 9651 break; 9666 9652 } 9667 - 9668 - if (vcpu->arch.apicv_active) 9669 - static_call(kvm_x86_sync_pir_to_irr)(vcpu); 9670 - } 9653 + } 9671 9654 9672 9655 /* 9673 9656 * Do this here before restoring debug registers on the host. And ··· 12385 12368 } 12386 12369 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read); 12387 12370 12388 - static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) 12389 - { 12390 - memcpy(vcpu->arch.guest_ins_data, vcpu->arch.pio_data, 12391 - vcpu->arch.pio.count * vcpu->arch.pio.size); 12392 - vcpu->arch.pio.count = 0; 12371 + static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 12372 + unsigned int port); 12393 12373 12374 + static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu) 12375 + { 12376 + int size = vcpu->arch.pio.size; 12377 + int port = vcpu->arch.pio.port; 12378 + 12379 + vcpu->arch.pio.count = 0; 12380 + if (vcpu->arch.sev_pio_count) 12381 + return kvm_sev_es_outs(vcpu, size, port); 12394 12382 return 1; 12395 12383 } 12396 12384 12397 12385 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 12398 - unsigned int port, void *data, unsigned int count) 12386 + unsigned int port) 12399 12387 { 12400 - int ret; 12388 + for (;;) { 12389 + unsigned int count = 12390 + min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 12391 + int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); 12401 12392 12402 - ret = emulator_pio_out_emulated(vcpu->arch.emulate_ctxt, size, port, 12403 - data, count); 12404 - if (ret) 12405 - return ret; 12393 + /* memcpy done already by emulator_pio_out. */ 12394 + vcpu->arch.sev_pio_count -= count; 12395 + vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size; 12396 + if (!ret) 12397 + break; 12406 12398 12407 - vcpu->arch.pio.count = 0; 12399 + /* Emulation done by the kernel. */ 12400 + if (!vcpu->arch.sev_pio_count) 12401 + return 1; 12402 + } 12408 12403 12404 + vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; 12409 12405 return 0; 12410 12406 } 12411 12407 12412 12408 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 12413 - unsigned int port, void *data, unsigned int count) 12414 - { 12415 - int ret; 12409 + unsigned int port); 12416 12410 12417 - ret = emulator_pio_in_emulated(vcpu->arch.emulate_ctxt, size, port, 12418 - data, count); 12419 - if (ret) { 12420 - vcpu->arch.pio.count = 0; 12421 - } else { 12422 - vcpu->arch.guest_ins_data = data; 12423 - vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; 12411 + static void advance_sev_es_emulated_ins(struct kvm_vcpu *vcpu) 12412 + { 12413 + unsigned count = vcpu->arch.pio.count; 12414 + complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); 12415 + vcpu->arch.sev_pio_count -= count; 12416 + vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size; 12417 + } 12418 + 12419 + static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) 12420 + { 12421 + int size = vcpu->arch.pio.size; 12422 + int port = vcpu->arch.pio.port; 12423 + 12424 + advance_sev_es_emulated_ins(vcpu); 12425 + if (vcpu->arch.sev_pio_count) 12426 + return kvm_sev_es_ins(vcpu, size, port); 12427 + return 1; 12428 + } 12429 + 12430 + static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 12431 + unsigned int port) 12432 + { 12433 + for (;;) { 12434 + unsigned int count = 12435 + min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 12436 + if (!__emulator_pio_in(vcpu, size, port, count)) 12437 + break; 12438 + 12439 + /* Emulation done by the kernel. */ 12440 + advance_sev_es_emulated_ins(vcpu); 12441 + if (!vcpu->arch.sev_pio_count) 12442 + return 1; 12424 12443 } 12425 12444 12445 + vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; 12426 12446 return 0; 12427 12447 } 12428 12448 ··· 12467 12413 unsigned int port, void *data, unsigned int count, 12468 12414 int in) 12469 12415 { 12470 - return in ? kvm_sev_es_ins(vcpu, size, port, data, count) 12471 - : kvm_sev_es_outs(vcpu, size, port, data, count); 12416 + vcpu->arch.sev_pio_data = data; 12417 + vcpu->arch.sev_pio_count = count; 12418 + return in ? kvm_sev_es_ins(vcpu, size, port) 12419 + : kvm_sev_es_outs(vcpu, size, port); 12472 12420 } 12473 12421 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); 12474 12422
+3 -2
block/blk-cgroup.c
··· 1897 1897 { 1898 1898 int rwd = blk_cgroup_io_type(bio), cpu; 1899 1899 struct blkg_iostat_set *bis; 1900 + unsigned long flags; 1900 1901 1901 1902 cpu = get_cpu(); 1902 1903 bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); 1903 - u64_stats_update_begin(&bis->sync); 1904 + flags = u64_stats_update_begin_irqsave(&bis->sync); 1904 1905 1905 1906 /* 1906 1907 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split ··· 1913 1912 } 1914 1913 bis->cur.ios[rwd]++; 1915 1914 1916 - u64_stats_update_end(&bis->sync); 1915 + u64_stats_update_end_irqrestore(&bis->sync, flags); 1917 1916 if (cgroup_subsys_on_dfl(io_cgrp_subsys)) 1918 1917 cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu); 1919 1918 put_cpu();
+1
block/partitions/core.c
··· 423 423 device_del(pdev); 424 424 out_put: 425 425 put_device(pdev); 426 + return ERR_PTR(err); 426 427 out_put_disk: 427 428 put_disk(disk); 428 429 return ERR_PTR(err);
+1 -6
drivers/acpi/power.c
··· 1035 1035 list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) { 1036 1036 mutex_lock(&resource->resource_lock); 1037 1037 1038 - /* 1039 - * Turn off power resources in an unknown state too, because the 1040 - * platform firmware on some system expects the OS to turn off 1041 - * power resources without any users unconditionally. 1042 - */ 1043 1038 if (!resource->ref_count && 1044 - resource->state != ACPI_POWER_RESOURCE_STATE_OFF) { 1039 + resource->state == ACPI_POWER_RESOURCE_STATE_ON) { 1045 1040 acpi_handle_debug(resource->device.handle, "Turning OFF\n"); 1046 1041 __acpi_power_off(resource); 1047 1042 }
+2 -2
drivers/ata/sata_mv.c
··· 3896 3896 break; 3897 3897 3898 3898 default: 3899 - dev_err(host->dev, "BUG: invalid board index %u\n", board_idx); 3900 - return 1; 3899 + dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx); 3900 + return -EINVAL; 3901 3901 } 3902 3902 3903 3903 hpriv->hp_flags = hp_flags;
+3 -4
drivers/base/regmap/regcache-rbtree.c
··· 281 281 if (!blk) 282 282 return -ENOMEM; 283 283 284 + rbnode->block = blk; 285 + 284 286 if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { 285 287 present = krealloc(rbnode->cache_present, 286 288 BITS_TO_LONGS(blklen) * sizeof(*present), 287 289 GFP_KERNEL); 288 - if (!present) { 289 - kfree(blk); 290 + if (!present) 290 291 return -ENOMEM; 291 - } 292 292 293 293 memset(present + BITS_TO_LONGS(rbnode->blklen), 0, 294 294 (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) ··· 305 305 } 306 306 307 307 /* update the rbnode block, its size and the base register */ 308 - rbnode->block = blk; 309 308 rbnode->blklen = blklen; 310 309 rbnode->base_reg = base_reg; 311 310 rbnode->cache_present = present;
+1
drivers/hv/hyperv_vmbus.h
··· 13 13 #define _HYPERV_VMBUS_H 14 14 15 15 #include <linux/list.h> 16 + #include <linux/bitops.h> 16 17 #include <asm/sync_bitops.h> 17 18 #include <asm/hyperv-tlfs.h> 18 19 #include <linux/atomic.h>
+3 -2
drivers/infiniband/core/sa_query.c
··· 706 706 707 707 /* Construct the family header first */ 708 708 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 709 - memcpy(header->device_name, dev_name(&query->port->agent->device->dev), 710 - LS_DEVICE_NAME_MAX); 709 + strscpy_pad(header->device_name, 710 + dev_name(&query->port->agent->device->dev), 711 + LS_DEVICE_NAME_MAX); 711 712 header->port_num = query->port->port_num; 712 713 713 714 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
+6 -3
drivers/infiniband/hw/hfi1/pio.c
··· 878 878 { 879 879 u64 reg; 880 880 struct pio_buf *pbuf; 881 + LIST_HEAD(wake_list); 881 882 882 883 if (!sc) 883 884 return; ··· 913 912 spin_unlock(&sc->release_lock); 914 913 915 914 write_seqlock(&sc->waitlock); 916 - while (!list_empty(&sc->piowait)) { 915 + if (!list_empty(&sc->piowait)) 916 + list_move(&sc->piowait, &wake_list); 917 + write_sequnlock(&sc->waitlock); 918 + while (!list_empty(&wake_list)) { 917 919 struct iowait *wait; 918 920 struct rvt_qp *qp; 919 921 struct hfi1_qp_priv *priv; 920 922 921 - wait = list_first_entry(&sc->piowait, struct iowait, list); 923 + wait = list_first_entry(&wake_list, struct iowait, list); 922 924 qp = iowait_to_qp(wait); 923 925 priv = qp->priv; 924 926 list_del_init(&priv->s_iowait.list); 925 927 priv->s_iowait.lock = NULL; 926 928 hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN); 927 929 } 928 - write_sequnlock(&sc->waitlock); 929 930 930 931 spin_unlock_irq(&sc->alloc_lock); 931 932 }
+2 -2
drivers/infiniband/hw/irdma/uk.c
··· 1092 1092 if (cq->avoid_mem_cflct) { 1093 1093 ext_cqe = (__le64 *)((u8 *)cqe + 32); 1094 1094 get_64bit_val(ext_cqe, 24, &qword7); 1095 - polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); 1095 + polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7); 1096 1096 } else { 1097 1097 peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size; 1098 1098 ext_cqe = cq->cq_base[peek_head].buf; 1099 1099 get_64bit_val(ext_cqe, 24, &qword7); 1100 - polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); 1100 + polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7); 1101 1101 if (!peek_head) 1102 1102 polarity ^= 1; 1103 1103 }
+6 -2
drivers/infiniband/hw/irdma/verbs.c
··· 3399 3399 } 3400 3400 3401 3401 if (cq_poll_info->ud_vlan_valid) { 3402 - entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK; 3403 - entry->wc_flags |= IB_WC_WITH_VLAN; 3402 + u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK; 3403 + 3404 3404 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT; 3405 + if (vlan) { 3406 + entry->vlan_id = vlan; 3407 + entry->wc_flags |= IB_WC_WITH_VLAN; 3408 + } 3405 3409 } else { 3406 3410 entry->sl = 0; 3407 3411 }
+7 -6
drivers/infiniband/hw/irdma/ws.c
··· 330 330 331 331 tc_node->enable = true; 332 332 ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE); 333 - if (ret) 333 + if (ret) { 334 + vsi->unregister_qset(vsi, tc_node); 334 335 goto reg_err; 336 + } 335 337 } 336 338 ibdev_dbg(to_ibdev(vsi->dev), 337 339 "WS: Using node %d which represents VSI %d TC %d\n", ··· 352 350 } 353 351 goto exit; 354 352 353 + reg_err: 354 + irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE); 355 + list_del(&tc_node->siblings); 356 + irdma_free_node(vsi, tc_node); 355 357 leaf_add_err: 356 358 if (list_empty(&vsi_node->child_list_head)) { 357 359 if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE)) ··· 374 368 375 369 exit: 376 370 mutex_unlock(&vsi->dev->ws_mutex); 377 - return ret; 378 - 379 - reg_err: 380 - mutex_unlock(&vsi->dev->ws_mutex); 381 - irdma_ws_remove(vsi, user_pri); 382 371 return ret; 383 372 } 384 373
+1 -1
drivers/infiniband/hw/mlx5/mr.c
··· 1342 1342 goto err_2; 1343 1343 } 1344 1344 mr->mmkey.type = MLX5_MKEY_MR; 1345 - mr->desc_size = sizeof(struct mlx5_mtt); 1346 1345 mr->umem = umem; 1347 1346 set_mr_fields(dev, mr, umem->length, access_flags, iova); 1348 1347 kvfree(in); ··· 1535 1536 ib_umem_release(&odp->umem); 1536 1537 return ERR_CAST(mr); 1537 1538 } 1539 + xa_init(&mr->implicit_children); 1538 1540 1539 1541 odp->private = mr; 1540 1542 err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
+2
drivers/infiniband/hw/mlx5/qp.c
··· 4458 4458 MLX5_SET(dctc, dctc, mtu, attr->path_mtu); 4459 4459 MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index); 4460 4460 MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit); 4461 + if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) 4462 + MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7); 4461 4463 4462 4464 err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in, 4463 4465 MLX5_ST_SZ_BYTES(create_dct_in), out,
+1
drivers/infiniband/hw/qedr/qedr.h
··· 455 455 /* synchronization objects used with iwarp ep */ 456 456 struct kref refcnt; 457 457 struct completion iwarp_cm_comp; 458 + struct completion qp_rel_comp; 458 459 unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */ 459 460 }; 460 461
+1 -1
drivers/infiniband/hw/qedr/qedr_iw_cm.c
··· 83 83 { 84 84 struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt); 85 85 86 - kfree(qp); 86 + complete(&qp->qp_rel_comp); 87 87 } 88 88 89 89 static void
+4 -1
drivers/infiniband/hw/qedr/verbs.c
··· 1357 1357 if (rdma_protocol_iwarp(&dev->ibdev, 1)) { 1358 1358 kref_init(&qp->refcnt); 1359 1359 init_completion(&qp->iwarp_cm_comp); 1360 + init_completion(&qp->qp_rel_comp); 1360 1361 } 1361 1362 1362 1363 qp->pd = pd; ··· 2858 2857 2859 2858 qedr_free_qp_resources(dev, qp, udata); 2860 2859 2861 - if (rdma_protocol_iwarp(&dev->ibdev, 1)) 2860 + if (rdma_protocol_iwarp(&dev->ibdev, 1)) { 2862 2861 qedr_iw_qp_rem_ref(&qp->ibqp); 2862 + wait_for_completion(&qp->qp_rel_comp); 2863 + } 2863 2864 2864 2865 return 0; 2865 2866 }
+23 -10
drivers/infiniband/hw/qib/qib_user_sdma.c
··· 602 602 /* 603 603 * How many pages in this iovec element? 604 604 */ 605 - static int qib_user_sdma_num_pages(const struct iovec *iov) 605 + static size_t qib_user_sdma_num_pages(const struct iovec *iov) 606 606 { 607 607 const unsigned long addr = (unsigned long) iov->iov_base; 608 608 const unsigned long len = iov->iov_len; ··· 658 658 static int qib_user_sdma_pin_pages(const struct qib_devdata *dd, 659 659 struct qib_user_sdma_queue *pq, 660 660 struct qib_user_sdma_pkt *pkt, 661 - unsigned long addr, int tlen, int npages) 661 + unsigned long addr, int tlen, size_t npages) 662 662 { 663 663 struct page *pages[8]; 664 664 int i, j; ··· 722 722 unsigned long idx; 723 723 724 724 for (idx = 0; idx < niov; idx++) { 725 - const int npages = qib_user_sdma_num_pages(iov + idx); 725 + const size_t npages = qib_user_sdma_num_pages(iov + idx); 726 726 const unsigned long addr = (unsigned long) iov[idx].iov_base; 727 727 728 728 ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr, ··· 824 824 unsigned pktnw; 825 825 unsigned pktnwc; 826 826 int nfrags = 0; 827 - int npages = 0; 828 - int bytes_togo = 0; 827 + size_t npages = 0; 828 + size_t bytes_togo = 0; 829 829 int tiddma = 0; 830 830 int cfur; 831 831 ··· 885 885 886 886 npages += qib_user_sdma_num_pages(&iov[idx]); 887 887 888 - bytes_togo += slen; 888 + if (check_add_overflow(bytes_togo, slen, &bytes_togo) || 889 + bytes_togo > type_max(typeof(pkt->bytes_togo))) { 890 + ret = -EINVAL; 891 + goto free_pbc; 892 + } 889 893 pktnwc += slen >> 2; 890 894 idx++; 891 895 nfrags++; ··· 908 904 } 909 905 910 906 if (frag_size) { 911 - int tidsmsize, n; 912 - size_t pktsize; 907 + size_t tidsmsize, n, pktsize, sz, addrlimit; 913 908 914 909 n = npages*((2*PAGE_SIZE/frag_size)+1); 915 910 pktsize = struct_size(pkt, addr, n); ··· 926 923 else 927 924 tidsmsize = 0; 928 925 929 - pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL); 926 + if (check_add_overflow(pktsize, tidsmsize, &sz)) { 927 + ret = -EINVAL; 928 + goto free_pbc; 929 + } 930 + pkt = kmalloc(sz, GFP_KERNEL); 930 931 if (!pkt) { 931 932 ret = -ENOMEM; 932 933 goto free_pbc; 933 934 } 934 935 pkt->largepkt = 1; 935 936 pkt->frag_size = frag_size; 936 - pkt->addrlimit = n + ARRAY_SIZE(pkt->addr); 937 + if (check_add_overflow(n, ARRAY_SIZE(pkt->addr), 938 + &addrlimit) || 939 + addrlimit > type_max(typeof(pkt->addrlimit))) { 940 + ret = -EINVAL; 941 + goto free_pbc; 942 + } 943 + pkt->addrlimit = addrlimit; 937 944 938 945 if (tiddma) { 939 946 char *tidsm = (char *)pkt + pktsize;
+1 -1
drivers/infiniband/sw/rdmavt/qp.c
··· 1223 1223 spin_lock(&rdi->n_qps_lock); 1224 1224 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { 1225 1225 spin_unlock(&rdi->n_qps_lock); 1226 - ret = ENOMEM; 1226 + ret = -ENOMEM; 1227 1227 goto bail_ip; 1228 1228 } 1229 1229
+8 -8
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
··· 137 137 .name = "uc", 138 138 .cmd = HNAE3_DBG_CMD_MAC_UC, 139 139 .dentry = HNS3_DBG_DENTRY_MAC, 140 - .buf_len = HNS3_DBG_READ_LEN, 140 + .buf_len = HNS3_DBG_READ_LEN_128KB, 141 141 .init = hns3_dbg_common_file_init, 142 142 }, 143 143 { ··· 256 256 .name = "tqp", 257 257 .cmd = HNAE3_DBG_CMD_REG_TQP, 258 258 .dentry = HNS3_DBG_DENTRY_REG, 259 - .buf_len = HNS3_DBG_READ_LEN, 259 + .buf_len = HNS3_DBG_READ_LEN_128KB, 260 260 .init = hns3_dbg_common_file_init, 261 261 }, 262 262 { ··· 298 298 .name = "fd_tcam", 299 299 .cmd = HNAE3_DBG_CMD_FD_TCAM, 300 300 .dentry = HNS3_DBG_DENTRY_FD, 301 - .buf_len = HNS3_DBG_READ_LEN, 301 + .buf_len = HNS3_DBG_READ_LEN_1MB, 302 302 .init = hns3_dbg_common_file_init, 303 303 }, 304 304 { ··· 584 584 { "TAIL", 2 }, 585 585 { "HEAD", 2 }, 586 586 { "FBDNUM", 2 }, 587 - { "PKTNUM", 2 }, 587 + { "PKTNUM", 5 }, 588 588 { "COPYBREAK", 2 }, 589 589 { "RING_EN", 2 }, 590 590 { "RX_RING_EN", 2 }, ··· 687 687 { "HEAD", 2 }, 688 688 { "FBDNUM", 2 }, 689 689 { "OFFSET", 2 }, 690 - { "PKTNUM", 2 }, 690 + { "PKTNUM", 5 }, 691 691 { "RING_EN", 2 }, 692 692 { "TX_RING_EN", 2 }, 693 693 { "BASE_ADDR", 10 }, ··· 912 912 } 913 913 914 914 static const struct hns3_dbg_item tx_bd_info_items[] = { 915 - { "BD_IDX", 5 }, 916 - { "ADDRESS", 2 }, 915 + { "BD_IDX", 2 }, 916 + { "ADDRESS", 13 }, 917 917 { "VLAN_TAG", 2 }, 918 918 { "SIZE", 2 }, 919 919 { "T_CS_VLAN_TSO", 2 }, 920 920 { "OT_VLAN_TAG", 3 }, 921 - { "TV", 2 }, 921 + { "TV", 5 }, 922 922 { "OLT_VLAN_LEN", 2 }, 923 923 { "PAYLEN_OL4CS", 2 }, 924 924 { "BD_FE_SC_VLD", 2 },
+14 -16
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
··· 391 391 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len, 392 392 int *pos) 393 393 { 394 - struct hclge_dbg_bitmap_cmd *bitmap; 394 + struct hclge_dbg_bitmap_cmd req; 395 395 struct hclge_desc desc; 396 396 u16 qset_id, qset_num; 397 397 int ret; ··· 408 408 if (ret) 409 409 return ret; 410 410 411 - bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; 411 + req.bitmap = (u8)le32_to_cpu(desc.data[1]); 412 412 413 413 *pos += scnprintf(buf + *pos, len - *pos, 414 414 "%04u %#x %#x %#x %#x\n", 415 - qset_id, bitmap->bit0, bitmap->bit1, 416 - bitmap->bit2, bitmap->bit3); 415 + qset_id, req.bit0, req.bit1, req.bit2, 416 + req.bit3); 417 417 } 418 418 419 419 return 0; ··· 422 422 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len, 423 423 int *pos) 424 424 { 425 - struct hclge_dbg_bitmap_cmd *bitmap; 425 + struct hclge_dbg_bitmap_cmd req; 426 426 struct hclge_desc desc; 427 427 u8 pri_id, pri_num; 428 428 int ret; ··· 439 439 if (ret) 440 440 return ret; 441 441 442 - bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; 442 + req.bitmap = (u8)le32_to_cpu(desc.data[1]); 443 443 444 444 *pos += scnprintf(buf + *pos, len - *pos, 445 445 "%03u %#x %#x %#x\n", 446 - pri_id, bitmap->bit0, bitmap->bit1, 447 - bitmap->bit2); 446 + pri_id, req.bit0, req.bit1, req.bit2); 448 447 } 449 448 450 449 return 0; ··· 452 453 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len, 453 454 int *pos) 454 455 { 455 - struct hclge_dbg_bitmap_cmd *bitmap; 456 + struct hclge_dbg_bitmap_cmd req; 456 457 struct hclge_desc desc; 457 458 u8 pg_id; 458 459 int ret; ··· 465 466 if (ret) 466 467 return ret; 467 468 468 - bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; 469 + req.bitmap = (u8)le32_to_cpu(desc.data[1]); 469 470 470 471 *pos += scnprintf(buf + *pos, len - *pos, 471 472 "%03u %#x %#x %#x\n", 472 - pg_id, bitmap->bit0, bitmap->bit1, 473 - bitmap->bit2); 473 + pg_id, req.bit0, req.bit1, req.bit2); 474 474 } 475 475 476 476 return 0; ··· 509 511 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len, 510 512 int *pos) 511 513 { 512 - struct hclge_dbg_bitmap_cmd *bitmap; 514 + struct hclge_dbg_bitmap_cmd req; 513 515 struct hclge_desc desc; 514 516 u8 port_id = 0; 515 517 int ret; ··· 519 521 if (ret) 520 522 return ret; 521 523 522 - bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; 524 + req.bitmap = (u8)le32_to_cpu(desc.data[1]); 523 525 524 526 *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n", 525 - bitmap->bit0); 527 + req.bit0); 526 528 *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n", 527 - bitmap->bit1); 529 + req.bit1); 528 530 529 531 return 0; 530 532 }
+6 -29
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 2930 2930 { 2931 2931 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2932 2932 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2933 - mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), 2934 - hclge_wq, &hdev->service_task, 0); 2933 + mod_delayed_work(hclge_wq, &hdev->service_task, 0); 2935 2934 } 2936 2935 2937 2936 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 2938 2937 { 2939 2938 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2939 + test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && 2940 2940 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 2941 - mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), 2942 - hclge_wq, &hdev->service_task, 0); 2941 + mod_delayed_work(hclge_wq, &hdev->service_task, 0); 2943 2942 } 2944 2943 2945 2944 static void hclge_errhand_task_schedule(struct hclge_dev *hdev) 2946 2945 { 2947 2946 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2948 2947 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) 2949 - mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), 2950 - hclge_wq, &hdev->service_task, 0); 2948 + mod_delayed_work(hclge_wq, &hdev->service_task, 0); 2951 2949 } 2952 2950 2953 2951 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) 2954 2952 { 2955 2953 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2956 2954 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) 2957 - mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), 2958 - hclge_wq, &hdev->service_task, 2959 - delay_time); 2955 + mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); 2960 2956 } 2961 2957 2962 2958 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) ··· 3646 3650 hdev->num_msi_used += 1; 3647 3651 } 3648 3652 3649 - static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify, 3650 - const cpumask_t *mask) 3651 - { 3652 - struct hclge_dev *hdev = container_of(notify, struct hclge_dev, 3653 - affinity_notify); 3654 - 3655 - cpumask_copy(&hdev->affinity_mask, mask); 3656 - } 3657 - 3658 - static void hclge_irq_affinity_release(struct kref *ref) 3659 - { 3660 - } 3661 - 3662 3653 static void hclge_misc_affinity_setup(struct hclge_dev *hdev) 3663 3654 { 3664 3655 irq_set_affinity_hint(hdev->misc_vector.vector_irq, 3665 3656 &hdev->affinity_mask); 3666 - 3667 - hdev->affinity_notify.notify = hclge_irq_affinity_notify; 3668 - hdev->affinity_notify.release = hclge_irq_affinity_release; 3669 - irq_set_affinity_notifier(hdev->misc_vector.vector_irq, 3670 - &hdev->affinity_notify); 3671 3657 } 3672 3658 3673 3659 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) 3674 3660 { 3675 - irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL); 3676 3661 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); 3677 3662 } 3678 3663 ··· 13210 13233 { 13211 13234 pr_info("%s is initializing\n", HCLGE_NAME); 13212 13235 13213 - hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME); 13236 + hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME); 13214 13237 if (!hclge_wq) { 13215 13238 pr_err("%s: failed to create workqueue\n", HCLGE_NAME); 13216 13239 return -ENOMEM;
-1
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
··· 974 974 975 975 /* affinity mask and notify for misc interrupt */ 976 976 cpumask_t affinity_mask; 977 - struct irq_affinity_notify affinity_notify; 978 977 struct hclge_ptp *ptp; 979 978 struct devlink *devlink; 980 979 };
+4 -1
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
··· 2232 2232 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 2233 2233 { 2234 2234 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2235 + test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && 2235 2236 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2236 2237 &hdev->state)) 2237 2238 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); ··· 3450 3449 3451 3450 hclgevf_init_rxd_adv_layout(hdev); 3452 3451 3452 + set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); 3453 + 3453 3454 hdev->last_reset_time = jiffies; 3454 3455 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3455 3456 HCLGEVF_DRIVER_NAME); ··· 3902 3899 { 3903 3900 pr_info("%s is initializing\n", HCLGEVF_NAME); 3904 3901 3905 - hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 3902 + hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); 3906 3903 if (!hclgevf_wq) { 3907 3904 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3908 3905 return -ENOMEM;
+1
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
··· 146 146 HCLGEVF_STATE_REMOVING, 147 147 HCLGEVF_STATE_NIC_REGISTERED, 148 148 HCLGEVF_STATE_ROCE_REGISTERED, 149 + HCLGEVF_STATE_SERVICE_INITED, 149 150 /* task states */ 150 151 HCLGEVF_STATE_RST_SERVICE_SCHED, 151 152 HCLGEVF_STATE_RST_HANDLING,
+4 -14
drivers/net/ethernet/intel/ice/ice_lag.c
··· 100 100 */ 101 101 static void ice_lag_info_event(struct ice_lag *lag, void *ptr) 102 102 { 103 - struct net_device *event_netdev, *netdev_tmp; 104 103 struct netdev_notifier_bonding_info *info; 105 104 struct netdev_bonding_info *bonding_info; 105 + struct net_device *event_netdev; 106 106 const char *lag_netdev_name; 107 107 108 108 event_netdev = netdev_notifier_info_to_dev(ptr); ··· 122 122 netdev_dbg(lag->netdev, "Bonding event recv, but slave info not for us\n"); 123 123 goto lag_out; 124 124 } 125 - 126 - rcu_read_lock(); 127 - for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) { 128 - if (!netif_is_ice(netdev_tmp)) 129 - continue; 130 - 131 - if (netdev_tmp && netdev_tmp != lag->netdev && 132 - lag->peer_netdev != netdev_tmp) { 133 - dev_hold(netdev_tmp); 134 - lag->peer_netdev = netdev_tmp; 135 - } 136 - } 137 - rcu_read_unlock(); 138 125 139 126 if (bonding_info->slave.state) 140 127 ice_lag_set_backup(lag); ··· 305 318 break; 306 319 case NETDEV_BONDING_INFO: 307 320 ice_lag_info_event(lag, ptr); 321 + break; 322 + case NETDEV_UNREGISTER: 323 + ice_lag_unlink(lag, ptr); 308 324 break; 309 325 default: 310 326 break;
+3
drivers/net/ethernet/intel/ice/ice_ptp.c
··· 1929 1929 */ 1930 1930 void ice_ptp_release(struct ice_pf *pf) 1931 1931 { 1932 + if (!test_bit(ICE_FLAG_PTP, pf->flags)) 1933 + return; 1934 + 1932 1935 /* Disable timestamping for both Tx and Rx */ 1933 1936 ice_ptp_cfg_timestamp(pf, false); 1934 1937
+115 -33
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
··· 316 316 317 317 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL); 318 318 319 + static void get_lf_str_list(struct rvu_block block, int pcifunc, 320 + char *lfs) 321 + { 322 + int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max; 323 + 324 + for_each_set_bit(lf, block.lf.bmap, block.lf.max) { 325 + if (lf >= block.lf.max) 326 + break; 327 + 328 + if (block.fn_map[lf] != pcifunc) 329 + continue; 330 + 331 + if (lf == prev_lf + 1) { 332 + prev_lf = lf; 333 + seq = 1; 334 + continue; 335 + } 336 + 337 + if (seq) 338 + len += sprintf(lfs + len, "-%d,%d", prev_lf, lf); 339 + else 340 + len += (len ? sprintf(lfs + len, ",%d", lf) : 341 + sprintf(lfs + len, "%d", lf)); 342 + 343 + prev_lf = lf; 344 + seq = 0; 345 + } 346 + 347 + if (seq) 348 + len += sprintf(lfs + len, "-%d", prev_lf); 349 + 350 + lfs[len] = '\0'; 351 + } 352 + 353 + static int get_max_column_width(struct rvu *rvu) 354 + { 355 + int index, pf, vf, lf_str_size = 12, buf_size = 256; 356 + struct rvu_block block; 357 + u16 pcifunc; 358 + char *buf; 359 + 360 + buf = kzalloc(buf_size, GFP_KERNEL); 361 + if (!buf) 362 + return -ENOMEM; 363 + 364 + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 365 + for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { 366 + pcifunc = pf << 10 | vf; 367 + if (!pcifunc) 368 + continue; 369 + 370 + for (index = 0; index < BLK_COUNT; index++) { 371 + block = rvu->hw->block[index]; 372 + if (!strlen(block.name)) 373 + continue; 374 + 375 + get_lf_str_list(block, pcifunc, buf); 376 + if (lf_str_size <= strlen(buf)) 377 + lf_str_size = strlen(buf) + 1; 378 + } 379 + } 380 + } 381 + 382 + kfree(buf); 383 + return lf_str_size; 384 + } 385 + 319 386 /* Dumps current provisioning status of all RVU block LFs */ 320 387 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, 321 388 char __user *buffer, 322 389 size_t count, loff_t *ppos) 323 390 { 324 - int index, off = 0, flag = 0, go_back = 0, len = 0; 391 + int index, off = 0, flag = 0, len = 0, i = 0; 325 392 struct rvu *rvu = filp->private_data; 326 - int lf, pf, vf, pcifunc; 393 + int bytes_not_copied = 0; 327 394 struct rvu_block block; 328 - int bytes_not_copied; 329 - int lf_str_size = 12; 395 + int pf, vf, pcifunc; 330 396 int buf_size = 2048; 397 + int lf_str_size; 331 398 char *lfs; 332 399 char *buf; 333 400 ··· 405 338 buf = kzalloc(buf_size, GFP_KERNEL); 406 339 if (!buf) 407 340 return -ENOSPC; 341 + 342 + /* Get the maximum width of a column */ 343 + lf_str_size = get_max_column_width(rvu); 408 344 409 345 lfs = kzalloc(lf_str_size, GFP_KERNEL); 410 346 if (!lfs) { ··· 422 352 "%-*s", lf_str_size, 423 353 rvu->hw->block[index].name); 424 354 } 355 + 425 356 off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); 357 + bytes_not_copied = copy_to_user(buffer + (i * off), buf, off); 358 + if (bytes_not_copied) 359 + goto out; 360 + 361 + i++; 362 + *ppos += off; 426 363 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 427 364 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { 365 + off = 0; 366 + flag = 0; 428 367 pcifunc = pf << 10 | vf; 429 368 if (!pcifunc) 430 369 continue; 431 370 432 371 if (vf) { 433 372 sprintf(lfs, "PF%d:VF%d", pf, vf - 1); 434 - go_back = scnprintf(&buf[off], 435 - buf_size - 1 - off, 436 - "%-*s", lf_str_size, lfs); 373 + off = scnprintf(&buf[off], 374 + buf_size - 1 - off, 375 + "%-*s", lf_str_size, lfs); 437 376 } else { 438 377 sprintf(lfs, "PF%d", pf); 439 - go_back = scnprintf(&buf[off], 440 - buf_size - 1 - off, 441 - "%-*s", lf_str_size, lfs); 378 + off = scnprintf(&buf[off], 379 + buf_size - 1 - off, 380 + "%-*s", lf_str_size, lfs); 442 381 } 443 382 444 - off += go_back; 445 - for (index = 0; index < BLKTYPE_MAX; index++) { 383 + for (index = 0; index < BLK_COUNT; index++) { 446 384 block = rvu->hw->block[index]; 447 385 if (!strlen(block.name)) 448 386 continue; 449 387 len = 0; 450 388 lfs[len] = '\0'; 451 - for (lf = 0; lf < block.lf.max; lf++) { 452 - if (block.fn_map[lf] != pcifunc) 453 - continue; 389 + get_lf_str_list(block, pcifunc, lfs); 390 + if (strlen(lfs)) 454 391 flag = 1; 455 - len += sprintf(&lfs[len], "%d,", lf); 456 - } 457 392 458 - if (flag) 459 - len--; 460 - lfs[len] = '\0'; 461 393 off += scnprintf(&buf[off], buf_size - 1 - off, 462 394 "%-*s", lf_str_size, lfs); 463 - if (!strlen(lfs)) 464 - go_back += lf_str_size; 465 395 } 466 - if (!flag) 467 - off -= go_back; 468 - else 469 - flag = 0; 470 - off--; 471 - off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); 396 + if (flag) { 397 + off += scnprintf(&buf[off], 398 + buf_size - 1 - off, "\n"); 399 + bytes_not_copied = copy_to_user(buffer + 400 + (i * off), 401 + buf, off); 402 + if (bytes_not_copied) 403 + goto out; 404 + 405 + i++; 406 + *ppos += off; 407 + } 472 408 } 473 409 } 474 410 475 - bytes_not_copied = copy_to_user(buffer, buf, off); 411 + out: 476 412 kfree(lfs); 477 413 kfree(buf); 478 - 479 414 if (bytes_not_copied) 480 415 return -EFAULT; 481 416 482 - *ppos = off; 483 - return off; 417 + return *ppos; 484 418 } 485 419 486 420 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL); ··· 668 594 if (cmd_buf) 669 595 ret = -EINVAL; 670 596 671 - if (!strncmp(subtoken, "help", 4) || ret < 0) { 597 + if (ret < 0 || !strncmp(subtoken, "help", 4)) { 672 598 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string); 673 599 goto qsize_write_done; 674 600 } ··· 1883 1809 u16 pcifunc; 1884 1810 char *str; 1885 1811 1812 + /* Ingress policers do not exist on all platforms */ 1813 + if (!nix_hw->ipolicer) 1814 + return 0; 1815 + 1886 1816 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 1887 1817 if (layer == BAND_PROF_INVAL_LAYER) 1888 1818 continue; ··· 1935 1857 struct nix_ipolicer *ipolicer; 1936 1858 int layer; 1937 1859 char *str; 1860 + 1861 + /* Ingress policers do not exist on all platforms */ 1862 + if (!nix_hw->ipolicer) 1863 + return 0; 1938 1864 1939 1865 seq_puts(m, "\nBandwidth profile resource free count\n"); 1940 1866 seq_puts(m, "=====================================\n");
+3
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
··· 2583 2583 return; 2584 2584 2585 2585 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2586 + if (!nix_hw) 2587 + return; 2588 + 2586 2589 vlan = &nix_hw->txvlan; 2587 2590 2588 2591 mutex_lock(&vlan->rsrc_lock);
+12 -13
drivers/net/ethernet/mellanox/mlxsw/pci.c
··· 353 353 struct sk_buff *skb; 354 354 int err; 355 355 356 - elem_info->u.rdq.skb = NULL; 357 356 skb = netdev_alloc_skb_ip_align(NULL, buf_len); 358 357 if (!skb) 359 358 return -ENOMEM; 360 - 361 - /* Assume that wqe was previously zeroed. */ 362 359 363 360 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, 364 361 buf_len, DMA_FROM_DEVICE); ··· 594 597 struct pci_dev *pdev = mlxsw_pci->pdev; 595 598 struct mlxsw_pci_queue_elem_info *elem_info; 596 599 struct mlxsw_rx_info rx_info = {}; 597 - char *wqe; 600 + char wqe[MLXSW_PCI_WQE_SIZE]; 598 601 struct sk_buff *skb; 599 602 u16 byte_count; 600 603 int err; 601 604 602 605 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); 603 - skb = elem_info->u.sdq.skb; 604 - if (!skb) 605 - return; 606 - wqe = elem_info->elem; 607 - mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); 606 + skb = elem_info->u.rdq.skb; 607 + memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE); 608 608 609 609 if (q->consumer_counter++ != consumer_counter_limit) 610 610 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); 611 + 612 + err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); 613 + if (err) { 614 + dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); 615 + goto out; 616 + } 617 + 618 + mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); 611 619 612 620 if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) { 613 621 rx_info.is_lag = true; ··· 649 647 skb_put(skb, byte_count); 650 648 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); 651 649 652 - memset(wqe, 0, q->elem_size); 653 - err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); 654 - if (err) 655 - dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); 650 + out: 656 651 /* Everything is set up, ring doorbell to pass elem to HW */ 657 652 q->producer_counter++; 658 653 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+31 -4
drivers/net/ethernet/microchip/lan743x_main.c
··· 1743 1743 ret = -EINVAL; 1744 1744 goto cleanup; 1745 1745 } 1746 + if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, 1747 + DMA_BIT_MASK(64))) { 1748 + if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, 1749 + DMA_BIT_MASK(32))) { 1750 + dev_warn(&tx->adapter->pdev->dev, 1751 + "lan743x_: No suitable DMA available\n"); 1752 + ret = -ENOMEM; 1753 + goto cleanup; 1754 + } 1755 + } 1746 1756 ring_allocation_size = ALIGN(tx->ring_size * 1747 1757 sizeof(struct lan743x_tx_descriptor), 1748 1758 PAGE_SIZE); ··· 1944 1934 index); 1945 1935 } 1946 1936 1947 - static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index) 1937 + static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, 1938 + gfp_t gfp) 1948 1939 { 1949 1940 struct net_device *netdev = rx->adapter->netdev; 1950 1941 struct device *dev = &rx->adapter->pdev->dev; ··· 1959 1948 1960 1949 descriptor = &rx->ring_cpu_ptr[index]; 1961 1950 buffer_info = &rx->buffer_info[index]; 1962 - skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA); 1951 + skb = __netdev_alloc_skb(netdev, buffer_length, gfp); 1963 1952 if (!skb) 1964 1953 return -ENOMEM; 1965 1954 dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE); ··· 2121 2110 2122 2111 /* save existing skb, allocate new skb and map to dma */ 2123 2112 skb = buffer_info->skb; 2124 - if (lan743x_rx_init_ring_element(rx, rx->last_head)) { 2113 + if (lan743x_rx_init_ring_element(rx, rx->last_head, 2114 + GFP_ATOMIC | GFP_DMA)) { 2125 2115 /* failed to allocate next skb. 2126 2116 * Memory is very low. 2127 2117 * Drop this packet and reuse buffer. ··· 2288 2276 ret = -EINVAL; 2289 2277 goto cleanup; 2290 2278 } 2279 + if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, 2280 + DMA_BIT_MASK(64))) { 2281 + if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, 2282 + DMA_BIT_MASK(32))) { 2283 + dev_warn(&rx->adapter->pdev->dev, 2284 + "lan743x_: No suitable DMA available\n"); 2285 + ret = -ENOMEM; 2286 + goto cleanup; 2287 + } 2288 + } 2291 2289 ring_allocation_size = ALIGN(rx->ring_size * 2292 2290 sizeof(struct lan743x_rx_descriptor), 2293 2291 PAGE_SIZE); ··· 2337 2315 2338 2316 rx->last_head = 0; 2339 2317 for (index = 0; index < rx->ring_size; index++) { 2340 - ret = lan743x_rx_init_ring_element(rx, index); 2318 + ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL); 2341 2319 if (ret) 2342 2320 goto cleanup; 2343 2321 } 2344 2322 return 0; 2345 2323 2346 2324 cleanup: 2325 + netif_warn(rx->adapter, ifup, rx->adapter->netdev, 2326 + "Error allocating memory for LAN743x\n"); 2327 + 2347 2328 lan743x_rx_ring_cleanup(rx); 2348 2329 return ret; 2349 2330 } ··· 3044 3019 if (ret) { 3045 3020 netif_err(adapter, probe, adapter->netdev, 3046 3021 "lan743x_hardware_init returned %d\n", ret); 3022 + lan743x_pci_cleanup(adapter); 3023 + return ret; 3047 3024 } 3048 3025 3049 3026 /* open netdev when netdev is at running state while resume.
+11 -5
drivers/net/ethernet/netronome/nfp/bpf/main.c
··· 182 182 nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) 183 183 { 184 184 struct nfp_net *nn = netdev_priv(netdev); 185 - unsigned int max_mtu; 185 + struct nfp_bpf_vnic *bv; 186 + struct bpf_prog *prog; 186 187 187 188 if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) 188 189 return 0; 189 190 190 - max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; 191 - if (new_mtu > max_mtu) { 192 - nn_info(nn, "BPF offload active, MTU over %u not supported\n", 193 - max_mtu); 191 + if (nn->xdp_hw.prog) { 192 + prog = nn->xdp_hw.prog; 193 + } else { 194 + bv = nn->app_priv; 195 + prog = bv->tc_prog; 196 + } 197 + 198 + if (nfp_bpf_offload_check_mtu(nn, prog, new_mtu)) { 199 + nn_info(nn, "BPF offload active, potential packet access beyond hardware packet boundary"); 194 200 return -EBUSY; 195 201 } 196 202 return 0;
+2
drivers/net/ethernet/netronome/nfp/bpf/main.h
··· 560 560 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog); 561 561 int nfp_bpf_jit(struct nfp_prog *prog); 562 562 bool nfp_bpf_supported_opcode(u8 code); 563 + bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog, 564 + unsigned int mtu); 563 565 564 566 int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, 565 567 int prev_insn_idx);
+13 -4
drivers/net/ethernet/netronome/nfp/bpf/offload.c
··· 481 481 return 0; 482 482 } 483 483 484 + bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog, 485 + unsigned int mtu) 486 + { 487 + unsigned int fw_mtu, pkt_off; 488 + 489 + fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; 490 + pkt_off = min(prog->aux->max_pkt_offset, mtu); 491 + 492 + return fw_mtu < pkt_off; 493 + } 494 + 484 495 static int 485 496 nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, 486 497 struct netlink_ext_ack *extack) 487 498 { 488 499 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; 489 - unsigned int fw_mtu, pkt_off, max_stack, max_prog_len; 500 + unsigned int max_stack, max_prog_len; 490 501 dma_addr_t dma_addr; 491 502 void *img; 492 503 int err; 493 504 494 - fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; 495 - pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu); 496 - if (fw_mtu < pkt_off) { 505 + if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) { 497 506 NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary"); 498 507 return -EOPNOTSUPP; 499 508 }
+2 -3
drivers/net/ethernet/nxp/lpc_eth.c
··· 1015 1015 napi_disable(&pldat->napi); 1016 1016 netif_stop_queue(ndev); 1017 1017 1018 - if (ndev->phydev) 1019 - phy_stop(ndev->phydev); 1020 - 1021 1018 spin_lock_irqsave(&pldat->lock, flags); 1022 1019 __lpc_eth_reset(pldat); 1023 1020 netif_carrier_off(ndev); ··· 1022 1025 writel(0, LPC_ENET_MAC2(pldat->net_base)); 1023 1026 spin_unlock_irqrestore(&pldat->lock, flags); 1024 1027 1028 + if (ndev->phydev) 1029 + phy_stop(ndev->phydev); 1025 1030 clk_disable_unprepare(pldat->clk); 1026 1031 1027 1032 return 0;
+1
drivers/net/ethernet/realtek/r8169_main.c
··· 156 156 { PCI_VDEVICE(REALTEK, 0x8129) }, 157 157 { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT }, 158 158 { PCI_VDEVICE(REALTEK, 0x8161) }, 159 + { PCI_VDEVICE(REALTEK, 0x8162) }, 159 160 { PCI_VDEVICE(REALTEK, 0x8167) }, 160 161 { PCI_VDEVICE(REALTEK, 0x8168) }, 161 162 { PCI_VDEVICE(NCUBE, 0x8168) },
+89 -67
drivers/net/phy/phy.c
··· 243 243 } 244 244 } 245 245 246 - int phy_ethtool_ksettings_set(struct phy_device *phydev, 247 - const struct ethtool_link_ksettings *cmd) 248 - { 249 - __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 250 - u8 autoneg = cmd->base.autoneg; 251 - u8 duplex = cmd->base.duplex; 252 - u32 speed = cmd->base.speed; 253 - 254 - if (cmd->base.phy_address != phydev->mdio.addr) 255 - return -EINVAL; 256 - 257 - linkmode_copy(advertising, cmd->link_modes.advertising); 258 - 259 - /* We make sure that we don't pass unsupported values in to the PHY */ 260 - linkmode_and(advertising, advertising, phydev->supported); 261 - 262 - /* Verify the settings we care about. */ 263 - if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE) 264 - return -EINVAL; 265 - 266 - if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising)) 267 - return -EINVAL; 268 - 269 - if (autoneg == AUTONEG_DISABLE && 270 - ((speed != SPEED_1000 && 271 - speed != SPEED_100 && 272 - speed != SPEED_10) || 273 - (duplex != DUPLEX_HALF && 274 - duplex != DUPLEX_FULL))) 275 - return -EINVAL; 276 - 277 - phydev->autoneg = autoneg; 278 - 279 - if (autoneg == AUTONEG_DISABLE) { 280 - phydev->speed = speed; 281 - phydev->duplex = duplex; 282 - } 283 - 284 - linkmode_copy(phydev->advertising, advertising); 285 - 286 - linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 287 - phydev->advertising, autoneg == AUTONEG_ENABLE); 288 - 289 - phydev->master_slave_set = cmd->base.master_slave_cfg; 290 - phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; 291 - 292 - /* Restart the PHY */ 293 - phy_start_aneg(phydev); 294 - 295 - return 0; 296 - } 297 - EXPORT_SYMBOL(phy_ethtool_ksettings_set); 298 - 299 246 void phy_ethtool_ksettings_get(struct phy_device *phydev, 300 247 struct ethtool_link_ksettings *cmd) 301 248 { 249 + mutex_lock(&phydev->lock); 302 250 linkmode_copy(cmd->link_modes.supported, phydev->supported); 303 251 linkmode_copy(cmd->link_modes.advertising, phydev->advertising); 304 252 linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising); ··· 265 317 cmd->base.autoneg = phydev->autoneg; 266 318 cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl; 267 319 cmd->base.eth_tp_mdix = phydev->mdix; 320 + mutex_unlock(&phydev->lock); 268 321 } 269 322 EXPORT_SYMBOL(phy_ethtool_ksettings_get); 270 323 ··· 700 751 } 701 752 702 753 /** 754 + * _phy_start_aneg - start auto-negotiation for this PHY device 755 + * @phydev: the phy_device struct 756 + * 757 + * Description: Sanitizes the settings (if we're not autonegotiating 758 + * them), and then calls the driver's config_aneg function. 759 + * If the PHYCONTROL Layer is operating, we change the state to 760 + * reflect the beginning of Auto-negotiation or forcing. 761 + */ 762 + static int _phy_start_aneg(struct phy_device *phydev) 763 + { 764 + int err; 765 + 766 + lockdep_assert_held(&phydev->lock); 767 + 768 + if (!phydev->drv) 769 + return -EIO; 770 + 771 + if (AUTONEG_DISABLE == phydev->autoneg) 772 + phy_sanitize_settings(phydev); 773 + 774 + err = phy_config_aneg(phydev); 775 + if (err < 0) 776 + return err; 777 + 778 + if (phy_is_started(phydev)) 779 + err = phy_check_link_status(phydev); 780 + 781 + return err; 782 + } 783 + 784 + /** 703 785 * phy_start_aneg - start auto-negotiation for this PHY device 704 786 * @phydev: the phy_device struct 705 787 * ··· 743 763 { 744 764 int err; 745 765 746 - if (!phydev->drv) 747 - return -EIO; 748 - 749 766 mutex_lock(&phydev->lock); 750 - 751 - if (AUTONEG_DISABLE == phydev->autoneg) 752 - phy_sanitize_settings(phydev); 753 - 754 - err = phy_config_aneg(phydev); 755 - if (err < 0) 756 - goto out_unlock; 757 - 758 - if (phy_is_started(phydev)) 759 - err = phy_check_link_status(phydev); 760 - out_unlock: 767 + err = _phy_start_aneg(phydev); 761 768 mutex_unlock(&phydev->lock); 762 769 763 770 return err; ··· 766 799 767 800 return ret < 0 ? ret : 0; 768 801 } 802 + 803 + int phy_ethtool_ksettings_set(struct phy_device *phydev, 804 + const struct ethtool_link_ksettings *cmd) 805 + { 806 + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 807 + u8 autoneg = cmd->base.autoneg; 808 + u8 duplex = cmd->base.duplex; 809 + u32 speed = cmd->base.speed; 810 + 811 + if (cmd->base.phy_address != phydev->mdio.addr) 812 + return -EINVAL; 813 + 814 + linkmode_copy(advertising, cmd->link_modes.advertising); 815 + 816 + /* We make sure that we don't pass unsupported values in to the PHY */ 817 + linkmode_and(advertising, advertising, phydev->supported); 818 + 819 + /* Verify the settings we care about. */ 820 + if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE) 821 + return -EINVAL; 822 + 823 + if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising)) 824 + return -EINVAL; 825 + 826 + if (autoneg == AUTONEG_DISABLE && 827 + ((speed != SPEED_1000 && 828 + speed != SPEED_100 && 829 + speed != SPEED_10) || 830 + (duplex != DUPLEX_HALF && 831 + duplex != DUPLEX_FULL))) 832 + return -EINVAL; 833 + 834 + mutex_lock(&phydev->lock); 835 + phydev->autoneg = autoneg; 836 + 837 + if (autoneg == AUTONEG_DISABLE) { 838 + phydev->speed = speed; 839 + phydev->duplex = duplex; 840 + } 841 + 842 + linkmode_copy(phydev->advertising, advertising); 843 + 844 + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 845 + phydev->advertising, autoneg == AUTONEG_ENABLE); 846 + 847 + phydev->master_slave_set = cmd->base.master_slave_cfg; 848 + phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; 849 + 850 + /* Restart the PHY */ 851 + _phy_start_aneg(phydev); 852 + 853 + mutex_unlock(&phydev->lock); 854 + return 0; 855 + } 856 + EXPORT_SYMBOL(phy_ethtool_ksettings_set); 769 857 770 858 /** 771 859 * phy_speed_down - set speed to lowest speed supported by both link partners
+6
drivers/net/usb/lan78xx.c
··· 4122 4122 4123 4123 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1); 4124 4124 4125 + /* Reject broken descriptors. */ 4126 + if (dev->maxpacket == 0) { 4127 + ret = -ENODEV; 4128 + goto out4; 4129 + } 4130 + 4125 4131 /* driver requires remote-wakeup capability during autosuspend. */ 4126 4132 intf->needs_remote_wakeup = 1; 4127 4133
+1
drivers/net/usb/usbnet.c
··· 1792 1792 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); 1793 1793 if (dev->maxpacket == 0) { 1794 1794 /* that is a broken device */ 1795 + status = -ENODEV; 1795 1796 goto out4; 1796 1797 } 1797 1798
-1
drivers/net/vmxnet3/vmxnet3_drv.c
··· 3833 3833 vmxnet3_free_intr_resources(adapter); 3834 3834 3835 3835 netif_device_detach(netdev); 3836 - netif_tx_stop_all_queues(netdev); 3837 3836 3838 3837 /* Create wake-up filters. */ 3839 3838 pmConf = adapter->pm_conf;
+8
drivers/net/xen-netfront.c
··· 1730 1730 1731 1731 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1732 1732 1733 + netif_tx_lock_bh(info->netdev); 1734 + netif_device_detach(info->netdev); 1735 + netif_tx_unlock_bh(info->netdev); 1736 + 1733 1737 xennet_disconnect_backend(info); 1734 1738 return 0; 1735 1739 } ··· 2355 2351 * domain a kick because we've probably just requeued some 2356 2352 * packets. 2357 2353 */ 2354 + netif_tx_lock_bh(np->netdev); 2355 + netif_device_attach(np->netdev); 2356 + netif_tx_unlock_bh(np->netdev); 2357 + 2358 2358 netif_carrier_on(np->netdev); 2359 2359 for (j = 0; j < num_queues; ++j) { 2360 2360 queue = &np->queues[j];
+2 -2
drivers/nfc/port100.c
··· 1006 1006 1007 1007 skb = port100_alloc_skb(dev, 0); 1008 1008 if (!skb) 1009 - return -ENOMEM; 1009 + return 0; 1010 1010 1011 1011 resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb); 1012 1012 if (IS_ERR(resp)) 1013 - return PTR_ERR(resp); 1013 + return 0; 1014 1014 1015 1015 if (resp->len < 8) 1016 1016 mask = 0;
+10 -19
drivers/pinctrl/bcm/pinctrl-ns.c
··· 5 5 6 6 #include <linux/err.h> 7 7 #include <linux/io.h> 8 - #include <linux/mfd/syscon.h> 9 8 #include <linux/module.h> 10 9 #include <linux/of.h> 11 10 #include <linux/of_device.h> ··· 12 13 #include <linux/pinctrl/pinctrl.h> 13 14 #include <linux/pinctrl/pinmux.h> 14 15 #include <linux/platform_device.h> 15 - #include <linux/regmap.h> 16 16 #include <linux/slab.h> 17 17 18 18 #define FLAG_BCM4708 BIT(1) ··· 22 24 struct device *dev; 23 25 unsigned int chipset_flag; 24 26 struct pinctrl_dev *pctldev; 25 - struct regmap *regmap; 26 - u32 offset; 27 + void __iomem *base; 27 28 28 29 struct pinctrl_desc pctldesc; 29 30 struct ns_pinctrl_group *groups; ··· 229 232 unset |= BIT(pin_number); 230 233 } 231 234 232 - regmap_read(ns_pinctrl->regmap, ns_pinctrl->offset, &tmp); 235 + tmp = readl(ns_pinctrl->base); 233 236 tmp &= ~unset; 234 - regmap_write(ns_pinctrl->regmap, ns_pinctrl->offset, tmp); 237 + writel(tmp, ns_pinctrl->base); 235 238 236 239 return 0; 237 240 } ··· 263 266 static int ns_pinctrl_probe(struct platform_device *pdev) 264 267 { 265 268 struct device *dev = &pdev->dev; 266 - struct device_node *np = dev->of_node; 267 269 const struct of_device_id *of_id; 268 270 struct ns_pinctrl *ns_pinctrl; 269 271 struct pinctrl_desc *pctldesc; 270 272 struct pinctrl_pin_desc *pin; 271 273 struct ns_pinctrl_group *group; 272 274 struct ns_pinctrl_function *function; 275 + struct resource *res; 273 276 int i; 274 277 275 278 ns_pinctrl = devm_kzalloc(dev, sizeof(*ns_pinctrl), GFP_KERNEL); ··· 287 290 return -EINVAL; 288 291 ns_pinctrl->chipset_flag = (uintptr_t)of_id->data; 289 292 290 - ns_pinctrl->regmap = syscon_node_to_regmap(of_get_parent(np)); 291 - if (IS_ERR(ns_pinctrl->regmap)) { 292 - int err = PTR_ERR(ns_pinctrl->regmap); 293 - 294 - dev_err(dev, "Failed to map pinctrl regs: %d\n", err); 295 - 296 - return err; 297 - } 298 - 299 - if (of_property_read_u32(np, "offset", &ns_pinctrl->offset)) { 300 - dev_err(dev, "Failed to get register offset\n"); 301 - return -ENOENT; 293 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 294 + "cru_gpio_control"); 295 + ns_pinctrl->base = devm_ioremap_resource(dev, res); 296 + if (IS_ERR(ns_pinctrl->base)) { 297 + dev_err(dev, "Failed to map pinctrl regs\n"); 298 + return PTR_ERR(ns_pinctrl->base); 302 299 } 303 300 304 301 memcpy(pctldesc, &ns_pinctrl_desc, sizeof(*pctldesc));
+31
drivers/pinctrl/pinctrl-amd.c
··· 840 840 .pin_config_group_set = amd_pinconf_group_set, 841 841 }; 842 842 843 + static void amd_gpio_irq_init(struct amd_gpio *gpio_dev) 844 + { 845 + struct pinctrl_desc *desc = gpio_dev->pctrl->desc; 846 + unsigned long flags; 847 + u32 pin_reg, mask; 848 + int i; 849 + 850 + mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) | 851 + BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) | 852 + BIT(WAKE_CNTRL_OFF_S4); 853 + 854 + for (i = 0; i < desc->npins; i++) { 855 + int pin = desc->pins[i].number; 856 + const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin); 857 + 858 + if (!pd) 859 + continue; 860 + 861 + raw_spin_lock_irqsave(&gpio_dev->lock, flags); 862 + 863 + pin_reg = readl(gpio_dev->base + i * 4); 864 + pin_reg &= ~mask; 865 + writel(pin_reg, gpio_dev->base + i * 4); 866 + 867 + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 868 + } 869 + } 870 + 843 871 #ifdef CONFIG_PM_SLEEP 844 872 static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin) 845 873 { ··· 1003 975 dev_err(&pdev->dev, "Couldn't register pinctrl driver\n"); 1004 976 return PTR_ERR(gpio_dev->pctrl); 1005 977 } 978 + 979 + /* Disable and mask interrupts */ 980 + amd_gpio_irq_init(gpio_dev); 1006 981 1007 982 girq = &gpio_dev->gc.irq; 1008 983 girq->chip = &amd_gpio_irqchip;
+2 -2
drivers/pinctrl/stm32/pinctrl-stm32.c
··· 1644 1644 struct stm32_pinctrl_group *g = pctl->groups; 1645 1645 int i; 1646 1646 1647 - for (i = g->pin; i < g->pin + pctl->ngroups; i++) 1648 - stm32_pinctrl_restore_gpio_regs(pctl, i); 1647 + for (i = 0; i < pctl->ngroups; i++, g++) 1648 + stm32_pinctrl_restore_gpio_regs(pctl, g->pin); 1649 1649 1650 1650 return 0; 1651 1651 }
+2 -2
drivers/reset/Kconfig
··· 147 147 bool 148 148 149 149 config RESET_PISTACHIO 150 - bool "Pistachio Reset Driver" if COMPILE_TEST 151 - default MACH_PISTACHIO 150 + bool "Pistachio Reset Driver" 151 + depends on MIPS || COMPILE_TEST 152 152 help 153 153 This enables the reset driver for ImgTec Pistachio SoCs. 154 154
+1 -1
drivers/reset/reset-brcmstb-rescal.c
··· 38 38 } 39 39 40 40 ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg, 41 - !(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000); 41 + (reg & BRCM_RESCAL_STATUS_BIT), 100, 1000); 42 42 if (ret) { 43 43 dev_err(data->dev, "time out on SATA/PCIe rescal\n"); 44 44 return ret;
+26
drivers/reset/reset-socfpga.c
··· 92 92 for_each_matching_node(np, socfpga_early_reset_dt_ids) 93 93 a10_reset_init(np); 94 94 } 95 + 96 + /* 97 + * The early driver is problematic, because it doesn't register 98 + * itself as a driver. This causes certain device links to prevent 99 + * consumer devices from probing. The hacky solution is to register 100 + * an empty driver, whose only job is to attach itself to the reset 101 + * manager and call probe. 102 + */ 103 + static const struct of_device_id socfpga_reset_dt_ids[] = { 104 + { .compatible = "altr,rst-mgr", }, 105 + { /* sentinel */ }, 106 + }; 107 + 108 + static int reset_simple_probe(struct platform_device *pdev) 109 + { 110 + return 0; 111 + } 112 + 113 + static struct platform_driver reset_socfpga_driver = { 114 + .probe = reset_simple_probe, 115 + .driver = { 116 + .name = "socfpga-reset", 117 + .of_match_table = socfpga_reset_dt_ids, 118 + }, 119 + }; 120 + builtin_platform_driver(reset_socfpga_driver);
+8 -1
drivers/reset/tegra/reset-bpmp.c
··· 20 20 struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc); 21 21 struct mrq_reset_request request; 22 22 struct tegra_bpmp_message msg; 23 + int err; 23 24 24 25 memset(&request, 0, sizeof(request)); 25 26 request.cmd = command; ··· 31 30 msg.tx.data = &request; 32 31 msg.tx.size = sizeof(request); 33 32 34 - return tegra_bpmp_transfer(bpmp, &msg); 33 + err = tegra_bpmp_transfer(bpmp, &msg); 34 + if (err) 35 + return err; 36 + if (msg.rx.ret) 37 + return -EINVAL; 38 + 39 + return 0; 35 40 } 36 41 37 42 static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
+2 -1
drivers/scsi/hosts.c
··· 220 220 goto fail; 221 221 } 222 222 223 - shost->cmd_per_lun = min_t(short, shost->cmd_per_lun, 223 + /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */ 224 + shost->cmd_per_lun = min_t(int, shost->cmd_per_lun, 224 225 shost->can_queue); 225 226 226 227 error = scsi_init_sense_cache(shost);
+1 -1
drivers/scsi/mpi3mr/mpi3mr_os.c
··· 3736 3736 shost->max_lun = -1; 3737 3737 shost->unique_id = mrioc->id; 3738 3738 3739 - shost->max_channel = 1; 3739 + shost->max_channel = 0; 3740 3740 shost->max_id = 0xFFFFFFFF; 3741 3741 3742 3742 if (prot_mask >= 0)
+1 -1
drivers/scsi/qla2xxx/qla_bsg.c
··· 431 431 goto done_free_fcport; 432 432 433 433 done_free_fcport: 434 - if (bsg_request->msgcode == FC_BSG_RPT_ELS) 434 + if (bsg_request->msgcode != FC_BSG_RPT_ELS) 435 435 qla2x00_free_fcport(fcport); 436 436 done: 437 437 return rval;
+1 -1
drivers/scsi/qla2xxx/qla_os.c
··· 4157 4157 ql_dbg_pci(ql_dbg_init, ha->pdev, 4158 4158 0xe0ee, "%s: failed alloc dsd\n", 4159 4159 __func__); 4160 - return 1; 4160 + return -ENOMEM; 4161 4161 } 4162 4162 ha->dif_bundle_kallocs++; 4163 4163
+5 -9
drivers/scsi/qla2xxx/qla_target.c
··· 3319 3319 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", 3320 3320 vha->flags.online, qla2x00_reset_active(vha), 3321 3321 cmd->reset_count, qpair->chip_reset); 3322 - spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3323 - return 0; 3322 + goto out_unmap_unlock; 3324 3323 } 3325 3324 3326 3325 /* Does F/W have an IOCBs for this request */ ··· 3444 3445 prm.sg = NULL; 3445 3446 prm.req_cnt = 1; 3446 3447 3447 - /* Calculate number of entries and segments required */ 3448 - if (qlt_pci_map_calc_cnt(&prm) != 0) 3449 - return -EAGAIN; 3450 - 3451 3448 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3452 3449 (cmd->sess && cmd->sess->deleted)) { 3453 3450 /* ··· 3460 3465 cmd->reset_count, qpair->chip_reset); 3461 3466 return 0; 3462 3467 } 3468 + 3469 + /* Calculate number of entries and segments required */ 3470 + if (qlt_pci_map_calc_cnt(&prm) != 0) 3471 + return -EAGAIN; 3463 3472 3464 3473 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3465 3474 /* Does F/W have an IOCBs for this request */ ··· 3868 3869 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 3869 3870 3870 3871 BUG_ON(cmd->cmd_in_wq); 3871 - 3872 - if (cmd->sg_mapped) 3873 - qlt_unmap_sg(cmd->vha, cmd); 3874 3872 3875 3873 if (!cmd->q_full) 3876 3874 qlt_decr_num_pend_cmds(cmd->vha);
+3 -1
drivers/scsi/scsi.c
··· 553 553 */ 554 554 void scsi_device_put(struct scsi_device *sdev) 555 555 { 556 - module_put(sdev->host->hostt->module); 556 + struct module *mod = sdev->host->hostt->module; 557 + 557 558 put_device(&sdev->sdev_gendev); 559 + module_put(mod); 558 560 } 559 561 EXPORT_SYMBOL(scsi_device_put); 560 562
+9
drivers/scsi/scsi_sysfs.c
··· 449 449 struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL; 450 450 struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL; 451 451 unsigned long flags; 452 + struct module *mod; 452 453 453 454 sdev = container_of(work, struct scsi_device, ew.work); 455 + 456 + mod = sdev->host->hostt->module; 454 457 455 458 scsi_dh_release_device(sdev); 456 459 ··· 505 502 506 503 if (parent) 507 504 put_device(parent); 505 + module_put(mod); 508 506 } 509 507 510 508 static void scsi_device_dev_release(struct device *dev) 511 509 { 512 510 struct scsi_device *sdp = to_scsi_device(dev); 511 + 512 + /* Set module pointer as NULL in case of module unloading */ 513 + if (!try_module_get(sdp->host->hostt->module)) 514 + sdp->host->hostt->module = NULL; 515 + 513 516 execute_in_process_context(scsi_device_dev_release_usercontext, 514 517 &sdp->ew); 515 518 }
-2
drivers/scsi/scsi_transport_iscsi.c
··· 2930 2930 session->recovery_tmo = value; 2931 2931 break; 2932 2932 default: 2933 - err = transport->set_param(conn, ev->u.set_param.param, 2934 - data, ev->u.set_param.len); 2935 2933 if ((conn->state == ISCSI_CONN_BOUND) || 2936 2934 (conn->state == ISCSI_CONN_UP)) { 2937 2935 err = transport->set_param(conn, ev->u.set_param.param,
+6 -1
drivers/scsi/sd.c
··· 3683 3683 static int sd_resume_runtime(struct device *dev) 3684 3684 { 3685 3685 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3686 - struct scsi_device *sdp = sdkp->device; 3686 + struct scsi_device *sdp; 3687 + 3688 + if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ 3689 + return 0; 3690 + 3691 + sdp = sdkp->device; 3687 3692 3688 3693 if (sdp->ignore_media_change) { 3689 3694 /* clear the device's sense data */
+23 -9
drivers/scsi/storvsc_drv.c
··· 1285 1285 foreach_vmbus_pkt(desc, channel) { 1286 1286 struct vstor_packet *packet = hv_pkt_data(desc); 1287 1287 struct storvsc_cmd_request *request = NULL; 1288 + u32 pktlen = hv_pkt_datalen(desc); 1288 1289 u64 rqst_id = desc->trans_id; 1290 + u32 minlen = rqst_id ? sizeof(struct vstor_packet) - 1291 + stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation); 1289 1292 1290 - if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) - 1291 - stor_device->vmscsi_size_delta) { 1292 - dev_err(&device->device, "Invalid packet len\n"); 1293 + if (pktlen < minlen) { 1294 + dev_err(&device->device, 1295 + "Invalid pkt: id=%llu, len=%u, minlen=%u\n", 1296 + rqst_id, pktlen, minlen); 1293 1297 continue; 1294 1298 } 1295 1299 ··· 1306 1302 if (rqst_id == 0) { 1307 1303 /* 1308 1304 * storvsc_on_receive() looks at the vstor_packet in the message 1309 - * from the ring buffer. If the operation in the vstor_packet is 1310 - * COMPLETE_IO, then we call storvsc_on_io_completion(), and 1311 - * dereference the guest memory address. Make sure we don't call 1312 - * storvsc_on_io_completion() with a guest memory address that is 1313 - * zero if Hyper-V were to construct and send such a bogus packet. 1305 + * from the ring buffer. 1306 + * 1307 + * - If the operation in the vstor_packet is COMPLETE_IO, then 1308 + * we call storvsc_on_io_completion(), and dereference the 1309 + * guest memory address. Make sure we don't call 1310 + * storvsc_on_io_completion() with a guest memory address 1311 + * that is zero if Hyper-V were to construct and send such 1312 + * a bogus packet. 1313 + * 1314 + * - If the operation in the vstor_packet is FCHBA_DATA, then 1315 + * we call cache_wwn(), and access the data payload area of 1316 + * the packet (wwn_packet); however, there is no guarantee 1317 + * that the packet is big enough to contain such area. 1318 + * Future-proof the code by rejecting such a bogus packet. 1314 1319 */ 1315 - if (packet->operation == VSTOR_OPERATION_COMPLETE_IO) { 1320 + if (packet->operation == VSTOR_OPERATION_COMPLETE_IO || 1321 + packet->operation == VSTOR_OPERATION_FCHBA_DATA) { 1316 1322 dev_err(&device->device, "Invalid packet with ID of 0\n"); 1317 1323 continue; 1318 1324 }
+18 -15
drivers/scsi/ufs/ufshcd-pci.c
··· 370 370 371 371 static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op) 372 372 { 373 - /* 374 - * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base 375 - * address registers must be restored because the restore kernel can 376 - * have used different addresses. 377 - */ 378 - ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), 379 - REG_UTP_TRANSFER_REQ_LIST_BASE_L); 380 - ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), 381 - REG_UTP_TRANSFER_REQ_LIST_BASE_H); 382 - ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), 383 - REG_UTP_TASK_REQ_LIST_BASE_L); 384 - ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), 385 - REG_UTP_TASK_REQ_LIST_BASE_H); 386 - 387 373 if (ufshcd_is_link_hibern8(hba)) { 388 374 int ret = ufshcd_uic_hibern8_exit(hba); 389 375 ··· 448 462 .resume = ufs_intel_resume, 449 463 .device_reset = ufs_intel_device_reset, 450 464 }; 465 + 466 + #ifdef CONFIG_PM_SLEEP 467 + static int ufshcd_pci_restore(struct device *dev) 468 + { 469 + struct ufs_hba *hba = dev_get_drvdata(dev); 470 + 471 + /* Force a full reset and restore */ 472 + ufshcd_set_link_off(hba); 473 + 474 + return ufshcd_system_resume(dev); 475 + } 476 + #endif 451 477 452 478 /** 453 479 * ufshcd_pci_shutdown - main function to put the controller in reset state ··· 544 546 } 545 547 546 548 static const struct dev_pm_ops ufshcd_pci_pm_ops = { 547 - SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) 548 549 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) 549 550 #ifdef CONFIG_PM_SLEEP 551 + .suspend = ufshcd_system_suspend, 552 + .resume = ufshcd_system_resume, 553 + .freeze = ufshcd_system_suspend, 554 + .thaw = ufshcd_system_resume, 555 + .poweroff = ufshcd_system_suspend, 556 + .restore = ufshcd_pci_restore, 550 557 .prepare = ufshcd_suspend_prepare, 551 558 .complete = ufshcd_resume_complete, 552 559 #endif
+1 -1
drivers/spi/spi-altera-dfl.c
··· 134 134 if (!master) 135 135 return -ENOMEM; 136 136 137 - master->bus_num = dfl_dev->id; 137 + master->bus_num = -1; 138 138 139 139 hw = spi_master_get_devdata(master); 140 140
+1 -1
drivers/spi/spi-altera-platform.c
··· 48 48 return err; 49 49 50 50 /* setup the master state. */ 51 - master->bus_num = pdev->id; 51 + master->bus_num = -1; 52 52 53 53 if (pdata) { 54 54 if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {
+3 -2
drivers/spi/spi-pl022.c
··· 1716 1716 return -EINVAL; 1717 1717 } 1718 1718 } else { 1719 - if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) 1719 + if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) { 1720 1720 dev_err(&pl022->adev->dev, 1721 1721 "Microwire half duplex mode requested," 1722 1722 " but this is only available in the" 1723 1723 " ST version of PL022\n"); 1724 - return -EINVAL; 1724 + return -EINVAL; 1725 + } 1725 1726 } 1726 1727 } 1727 1728 return 0;
+1 -1
drivers/spi/spi-tegra20-slink.c
··· 1194 1194 return 0; 1195 1195 } 1196 1196 1197 - static int tegra_slink_runtime_resume(struct device *dev) 1197 + static int __maybe_unused tegra_slink_runtime_resume(struct device *dev) 1198 1198 { 1199 1199 struct spi_master *master = dev_get_drvdata(dev); 1200 1200 struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+25 -4
drivers/vdpa/vdpa_user/vduse_dev.c
··· 80 80 struct vdpa_callback config_cb; 81 81 struct work_struct inject; 82 82 spinlock_t irq_lock; 83 + struct rw_semaphore rwsem; 83 84 int minor; 84 85 bool broken; 85 86 bool connected; ··· 411 410 if (domain->bounce_map) 412 411 vduse_domain_reset_bounce_map(domain); 413 412 413 + down_write(&dev->rwsem); 414 + 414 415 dev->status = 0; 415 416 dev->driver_features = 0; 416 417 dev->generation++; ··· 446 443 flush_work(&vq->inject); 447 444 flush_work(&vq->kick); 448 445 } 446 + 447 + up_write(&dev->rwsem); 449 448 } 450 449 451 450 static int vduse_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 idx, ··· 890 885 spin_unlock_irq(&vq->irq_lock); 891 886 } 892 887 888 + static int vduse_dev_queue_irq_work(struct vduse_dev *dev, 889 + struct work_struct *irq_work) 890 + { 891 + int ret = -EINVAL; 892 + 893 + down_read(&dev->rwsem); 894 + if (!(dev->status & VIRTIO_CONFIG_S_DRIVER_OK)) 895 + goto unlock; 896 + 897 + ret = 0; 898 + queue_work(vduse_irq_wq, irq_work); 899 + unlock: 900 + up_read(&dev->rwsem); 901 + 902 + return ret; 903 + } 904 + 893 905 static long vduse_dev_ioctl(struct file *file, unsigned int cmd, 894 906 unsigned long arg) 895 907 { ··· 988 966 break; 989 967 } 990 968 case VDUSE_DEV_INJECT_CONFIG_IRQ: 991 - ret = 0; 992 - queue_work(vduse_irq_wq, &dev->inject); 969 + ret = vduse_dev_queue_irq_work(dev, &dev->inject); 993 970 break; 994 971 case VDUSE_VQ_SETUP: { 995 972 struct vduse_vq_config config; ··· 1074 1053 if (index >= dev->vq_num) 1075 1054 break; 1076 1055 1077 - ret = 0; 1078 1056 index = array_index_nospec(index, dev->vq_num); 1079 - queue_work(vduse_irq_wq, &dev->vqs[index].inject); 1057 + ret = vduse_dev_queue_irq_work(dev, &dev->vqs[index].inject); 1080 1058 break; 1081 1059 } 1082 1060 default: ··· 1156 1136 INIT_LIST_HEAD(&dev->send_list); 1157 1137 INIT_LIST_HEAD(&dev->recv_list); 1158 1138 spin_lock_init(&dev->irq_lock); 1139 + init_rwsem(&dev->rwsem); 1159 1140 1160 1141 INIT_WORK(&dev->inject, vduse_dev_irq_inject); 1161 1142 init_waitqueue_head(&dev->waitq);
+1 -1
drivers/virtio/virtio_ring.c
··· 576 576 /* Last one doesn't continue. */ 577 577 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); 578 578 if (!indirect && vq->use_dma_api) 579 - vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags = 579 + vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= 580 580 ~VRING_DESC_F_NEXT; 581 581 582 582 if (indirect) {
+3 -9
drivers/watchdog/iTCO_wdt.c
··· 71 71 #define TCOBASE(p) ((p)->tco_res->start) 72 72 /* SMI Control and Enable Register */ 73 73 #define SMI_EN(p) ((p)->smi_res->start) 74 - #define TCO_EN (1 << 13) 75 - #define GBL_SMI_EN (1 << 0) 76 74 77 75 #define TCO_RLD(p) (TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */ 78 76 #define TCOv1_TMR(p) (TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/ ··· 355 357 356 358 tmrval = seconds_to_ticks(p, t); 357 359 358 - /* 359 - * If TCO SMIs are off, the timer counts down twice before rebooting. 360 - * Otherwise, the BIOS generally reboots when the SMI triggers. 361 - */ 362 - if (p->smi_res && 363 - (inl(SMI_EN(p)) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN)) 360 + /* For TCO v1 the timer counts down twice before rebooting */ 361 + if (p->iTCO_version == 1) 364 362 tmrval /= 2; 365 363 366 364 /* from the specs: */ ··· 521 527 * Disables TCO logic generating an SMI# 522 528 */ 523 529 val32 = inl(SMI_EN(p)); 524 - val32 &= ~TCO_EN; /* Turn off SMI clearing watchdog */ 530 + val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */ 525 531 outl(val32, SMI_EN(p)); 526 532 } 527 533
+1 -1
drivers/watchdog/ixp4xx_wdt.c
··· 119 119 iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL); 120 120 if (!iwdt) 121 121 return -ENOMEM; 122 - iwdt->base = dev->platform_data; 122 + iwdt->base = (void __iomem *)dev->platform_data; 123 123 124 124 /* 125 125 * Retrieve rate from a fixed clock from the device tree if
+5 -1
drivers/watchdog/omap_wdt.c
··· 268 268 wdev->wdog.bootstatus = WDIOF_CARDRESET; 269 269 } 270 270 271 - if (!early_enable) 271 + if (early_enable) { 272 + omap_wdt_start(&wdev->wdog); 273 + set_bit(WDOG_HW_RUNNING, &wdev->wdog.status); 274 + } else { 272 275 omap_wdt_disable(wdev); 276 + } 273 277 274 278 ret = watchdog_register_device(&wdev->wdog); 275 279 if (ret) {
+2 -3
drivers/watchdog/sbsa_gwdt.c
··· 130 130 if (gwdt->version == 0) 131 131 return readl(gwdt->control_base + SBSA_GWDT_WOR); 132 132 else 133 - return readq(gwdt->control_base + SBSA_GWDT_WOR); 133 + return lo_hi_readq(gwdt->control_base + SBSA_GWDT_WOR); 134 134 } 135 135 136 136 static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt) ··· 138 138 if (gwdt->version == 0) 139 139 writel((u32)val, gwdt->control_base + SBSA_GWDT_WOR); 140 140 else 141 - writeq(val, gwdt->control_base + SBSA_GWDT_WOR); 141 + lo_hi_writeq(val, gwdt->control_base + SBSA_GWDT_WOR); 142 142 } 143 143 144 144 /* ··· 411 411 MODULE_AUTHOR("Al Stone <al.stone@linaro.org>"); 412 412 MODULE_AUTHOR("Timur Tabi <timur@codeaurora.org>"); 413 413 MODULE_LICENSE("GPL v2"); 414 - MODULE_ALIAS("platform:" DRV_NAME);
+1 -1
fs/autofs/waitq.c
··· 358 358 qstr.len = strlen(p); 359 359 offset = p - name; 360 360 } 361 - qstr.hash = full_name_hash(dentry, name, qstr.len); 361 + qstr.hash = full_name_hash(dentry, qstr.name, qstr.len); 362 362 363 363 if (mutex_lock_interruptible(&sbi->wq_mutex)) { 364 364 kfree(name);
+3
fs/fuse/fuse_i.h
··· 1121 1121 */ 1122 1122 void fuse_conn_destroy(struct fuse_mount *fm); 1123 1123 1124 + /* Drop the connection and free the fuse mount */ 1125 + void fuse_mount_destroy(struct fuse_mount *fm); 1126 + 1124 1127 /** 1125 1128 * Add connection to control filesystem 1126 1129 */
+39 -48
fs/fuse/inode.c
··· 457 457 } 458 458 } 459 459 460 - static void fuse_put_super(struct super_block *sb) 461 - { 462 - struct fuse_mount *fm = get_fuse_mount_super(sb); 463 - 464 - fuse_conn_put(fm->fc); 465 - kfree(fm); 466 - } 467 - 468 460 static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) 469 461 { 470 462 stbuf->f_type = FUSE_SUPER_MAGIC; ··· 995 1003 .evict_inode = fuse_evict_inode, 996 1004 .write_inode = fuse_write_inode, 997 1005 .drop_inode = generic_delete_inode, 998 - .put_super = fuse_put_super, 999 1006 .umount_begin = fuse_umount_begin, 1000 1007 .statfs = fuse_statfs, 1001 1008 .sync_fs = fuse_sync_fs, ··· 1415 1424 if (!fm) 1416 1425 return -ENOMEM; 1417 1426 1427 + fm->fc = fuse_conn_get(fc); 1418 1428 fsc->s_fs_info = fm; 1419 1429 sb = sget_fc(fsc, NULL, set_anon_super_fc); 1420 - if (IS_ERR(sb)) { 1421 - kfree(fm); 1430 + if (fsc->s_fs_info) 1431 + fuse_mount_destroy(fm); 1432 + if (IS_ERR(sb)) 1422 1433 return PTR_ERR(sb); 1423 - } 1424 - fm->fc = fuse_conn_get(fc); 1425 1434 1426 1435 /* Initialize superblock, making @mp_fi its root */ 1427 1436 err = fuse_fill_super_submount(sb, mp_fi); 1428 1437 if (err) { 1429 - fuse_conn_put(fc); 1430 - kfree(fm); 1431 - sb->s_fs_info = NULL; 1432 1438 deactivate_locked_super(sb); 1433 1439 return err; 1434 1440 } ··· 1557 1569 { 1558 1570 struct fuse_fs_context *ctx = fsc->fs_private; 1559 1571 int err; 1560 - struct fuse_conn *fc; 1561 - struct fuse_mount *fm; 1562 1572 1563 1573 if (!ctx->file || !ctx->rootmode_present || 1564 1574 !ctx->user_id_present || !ctx->group_id_present) ··· 1566 1580 * Require mount to happen from the same user namespace which 1567 1581 * opened /dev/fuse to prevent potential attacks. 1568 1582 */ 1569 - err = -EINVAL; 1570 1583 if ((ctx->file->f_op != &fuse_dev_operations) || 1571 1584 (ctx->file->f_cred->user_ns != sb->s_user_ns)) 1572 - goto err; 1585 + return -EINVAL; 1573 1586 ctx->fudptr = &ctx->file->private_data; 1574 - 1575 - fc = kmalloc(sizeof(*fc), GFP_KERNEL); 1576 - err = -ENOMEM; 1577 - if (!fc) 1578 - goto err; 1579 - 1580 - fm = kzalloc(sizeof(*fm), GFP_KERNEL); 1581 - if (!fm) { 1582 - kfree(fc); 1583 - goto err; 1584 - } 1585 - 1586 - fuse_conn_init(fc, fm, sb->s_user_ns, &fuse_dev_fiq_ops, NULL); 1587 - fc->release = fuse_free_conn; 1588 - 1589 - sb->s_fs_info = fm; 1590 1587 1591 1588 err = fuse_fill_super_common(sb, ctx); 1592 1589 if (err) 1593 - goto err_put_conn; 1590 + return err; 1594 1591 /* file->private_data shall be visible on all CPUs after this */ 1595 1592 smp_mb(); 1596 1593 fuse_send_init(get_fuse_mount_super(sb)); 1597 1594 return 0; 1598 - 1599 - err_put_conn: 1600 - fuse_conn_put(fc); 1601 - kfree(fm); 1602 - sb->s_fs_info = NULL; 1603 - err: 1604 - return err; 1605 1595 } 1606 1596 1607 1597 /* ··· 1599 1637 { 1600 1638 struct fuse_fs_context *ctx = fsc->fs_private; 1601 1639 struct fuse_dev *fud; 1640 + struct fuse_conn *fc; 1641 + struct fuse_mount *fm; 1602 1642 struct super_block *sb; 1603 1643 int err; 1644 + 1645 + fc = kmalloc(sizeof(*fc), GFP_KERNEL); 1646 + if (!fc) 1647 + return -ENOMEM; 1648 + 1649 + fm = kzalloc(sizeof(*fm), GFP_KERNEL); 1650 + if (!fm) { 1651 + kfree(fc); 1652 + return -ENOMEM; 1653 + } 1654 + 1655 + fuse_conn_init(fc, fm, fsc->user_ns, &fuse_dev_fiq_ops, NULL); 1656 + fc->release = fuse_free_conn; 1657 + 1658 + fsc->s_fs_info = fm; 1604 1659 1605 1660 if (ctx->fd_present) 1606 1661 ctx->file = fget(ctx->fd); 1607 1662 1608 1663 if (IS_ENABLED(CONFIG_BLOCK) && ctx->is_bdev) { 1609 1664 err = get_tree_bdev(fsc, fuse_fill_super); 1610 - goto out_fput; 1665 + goto out; 1611 1666 } 1612 1667 /* 1613 1668 * While block dev mount can be initialized with a dummy device fd 1614 1669 * (found by device name), normal fuse mounts can't 1615 1670 */ 1671 + err = -EINVAL; 1616 1672 if (!ctx->file) 1617 - return -EINVAL; 1673 + goto out; 1618 1674 1619 1675 /* 1620 1676 * Allow creating a fuse mount with an already initialized fuse ··· 1648 1668 } else { 1649 1669 err = get_tree_nodev(fsc, fuse_fill_super); 1650 1670 } 1651 - out_fput: 1671 + out: 1672 + if (fsc->s_fs_info) 1673 + fuse_mount_destroy(fm); 1652 1674 if (ctx->file) 1653 1675 fput(ctx->file); 1654 1676 return err; ··· 1729 1747 struct fuse_mount *fm = get_fuse_mount_super(sb); 1730 1748 bool last; 1731 1749 1732 - if (fm) { 1750 + if (sb->s_root) { 1733 1751 last = fuse_mount_remove(fm); 1734 1752 if (last) 1735 1753 fuse_conn_destroy(fm); 1736 1754 } 1737 1755 } 1738 1756 1757 + void fuse_mount_destroy(struct fuse_mount *fm) 1758 + { 1759 + fuse_conn_put(fm->fc); 1760 + kfree(fm); 1761 + } 1762 + EXPORT_SYMBOL(fuse_mount_destroy); 1763 + 1739 1764 static void fuse_kill_sb_anon(struct super_block *sb) 1740 1765 { 1741 1766 fuse_sb_destroy(sb); 1742 1767 kill_anon_super(sb); 1768 + fuse_mount_destroy(get_fuse_mount_super(sb)); 1743 1769 } 1744 1770 1745 1771 static struct file_system_type fuse_fs_type = { ··· 1765 1775 { 1766 1776 fuse_sb_destroy(sb); 1767 1777 kill_block_super(sb); 1778 + fuse_mount_destroy(get_fuse_mount_super(sb)); 1768 1779 } 1769 1780 1770 1781 static struct file_system_type fuseblk_fs_type = {
+4 -8
fs/fuse/virtio_fs.c
··· 1394 1394 bool last; 1395 1395 1396 1396 /* If mount failed, we can still be called without any fc */ 1397 - if (fm) { 1397 + if (sb->s_root) { 1398 1398 last = fuse_mount_remove(fm); 1399 1399 if (last) 1400 1400 virtio_fs_conn_destroy(fm); 1401 1401 } 1402 1402 kill_anon_super(sb); 1403 + fuse_mount_destroy(fm); 1403 1404 } 1404 1405 1405 1406 static int virtio_fs_test_super(struct super_block *sb, ··· 1456 1455 1457 1456 fsc->s_fs_info = fm; 1458 1457 sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc); 1459 - if (fsc->s_fs_info) { 1460 - fuse_conn_put(fc); 1461 - kfree(fm); 1462 - } 1458 + if (fsc->s_fs_info) 1459 + fuse_mount_destroy(fm); 1463 1460 if (IS_ERR(sb)) 1464 1461 return PTR_ERR(sb); 1465 1462 1466 1463 if (!sb->s_root) { 1467 1464 err = virtio_fs_fill_super(sb, fsc); 1468 1465 if (err) { 1469 - fuse_conn_put(fc); 1470 - kfree(fm); 1471 - sb->s_fs_info = NULL; 1472 1466 deactivate_locked_super(sb); 1473 1467 return err; 1474 1468 }
+5 -2
fs/io-wq.c
··· 253 253 pr_warn_once("io-wq is not configured for unbound workers"); 254 254 255 255 raw_spin_lock(&wqe->lock); 256 - if (acct->nr_workers == acct->max_workers) { 256 + if (acct->nr_workers >= acct->max_workers) { 257 257 raw_spin_unlock(&wqe->lock); 258 258 return true; 259 259 } ··· 1291 1291 1292 1292 rcu_read_lock(); 1293 1293 for_each_node(node) { 1294 + struct io_wqe *wqe = wq->wqes[node]; 1294 1295 struct io_wqe_acct *acct; 1295 1296 1297 + raw_spin_lock(&wqe->lock); 1296 1298 for (i = 0; i < IO_WQ_ACCT_NR; i++) { 1297 - acct = &wq->wqes[node]->acct[i]; 1299 + acct = &wqe->acct[i]; 1298 1300 prev = max_t(int, acct->max_workers, prev); 1299 1301 if (new_count[i]) 1300 1302 acct->max_workers = new_count[i]; 1301 1303 new_count[i] = prev; 1302 1304 } 1305 + raw_spin_unlock(&wqe->lock); 1303 1306 } 1304 1307 rcu_read_unlock(); 1305 1308 return 0;
+42 -12
fs/io_uring.c
··· 456 456 struct work_struct exit_work; 457 457 struct list_head tctx_list; 458 458 struct completion ref_comp; 459 + u32 iowq_limits[2]; 460 + bool iowq_limits_set; 459 461 }; 460 462 }; 461 463 ··· 1368 1366 req->flags |= REQ_F_INFLIGHT; 1369 1367 atomic_inc(&current->io_uring->inflight_tracked); 1370 1368 } 1371 - } 1372 - 1373 - static inline void io_unprep_linked_timeout(struct io_kiocb *req) 1374 - { 1375 - req->flags &= ~REQ_F_LINK_TIMEOUT; 1376 1369 } 1377 1370 1378 1371 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) ··· 6980 6983 switch (io_arm_poll_handler(req)) { 6981 6984 case IO_APOLL_READY: 6982 6985 if (linked_timeout) 6983 - io_unprep_linked_timeout(req); 6986 + io_queue_linked_timeout(linked_timeout); 6984 6987 goto issue_sqe; 6985 6988 case IO_APOLL_ABORTED: 6986 6989 /* ··· 9635 9638 ret = io_uring_alloc_task_context(current, ctx); 9636 9639 if (unlikely(ret)) 9637 9640 return ret; 9641 + 9638 9642 tctx = current->io_uring; 9643 + if (ctx->iowq_limits_set) { 9644 + unsigned int limits[2] = { ctx->iowq_limits[0], 9645 + ctx->iowq_limits[1], }; 9646 + 9647 + ret = io_wq_max_workers(tctx->io_wq, limits); 9648 + if (ret) 9649 + return ret; 9650 + } 9639 9651 } 9640 9652 if (!xa_load(&tctx->xa, (unsigned long)ctx)) { 9641 9653 node = kmalloc(sizeof(*node), GFP_KERNEL); ··· 10649 10643 10650 10644 static int io_register_iowq_max_workers(struct io_ring_ctx *ctx, 10651 10645 void __user *arg) 10646 + __must_hold(&ctx->uring_lock) 10652 10647 { 10648 + struct io_tctx_node *node; 10653 10649 struct io_uring_task *tctx = NULL; 10654 10650 struct io_sq_data *sqd = NULL; 10655 10651 __u32 new_count[2]; ··· 10682 10674 tctx = current->io_uring; 10683 10675 } 10684 10676 10685 - ret = -EINVAL; 10686 - if (!tctx || !tctx->io_wq) 10687 - goto err; 10677 + BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits)); 10688 10678 10689 - ret = io_wq_max_workers(tctx->io_wq, new_count); 10690 - if (ret) 10691 - goto err; 10679 + memcpy(ctx->iowq_limits, new_count, sizeof(new_count)); 10680 + ctx->iowq_limits_set = true; 10681 + 10682 + ret = -EINVAL; 10683 + if (tctx && tctx->io_wq) { 10684 + ret = io_wq_max_workers(tctx->io_wq, new_count); 10685 + if (ret) 10686 + goto err; 10687 + } else { 10688 + memset(new_count, 0, sizeof(new_count)); 10689 + } 10692 10690 10693 10691 if (sqd) { 10694 10692 mutex_unlock(&sqd->lock); ··· 10704 10690 if (copy_to_user(arg, new_count, sizeof(new_count))) 10705 10691 return -EFAULT; 10706 10692 10693 + /* that's it for SQPOLL, only the SQPOLL task creates requests */ 10694 + if (sqd) 10695 + return 0; 10696 + 10697 + /* now propagate the restriction to all registered users */ 10698 + list_for_each_entry(node, &ctx->tctx_list, ctx_node) { 10699 + struct io_uring_task *tctx = node->task->io_uring; 10700 + 10701 + if (WARN_ON_ONCE(!tctx->io_wq)) 10702 + continue; 10703 + 10704 + for (i = 0; i < ARRAY_SIZE(new_count); i++) 10705 + new_count[i] = ctx->iowq_limits[i]; 10706 + /* ignore errors, it always returns zero anyway */ 10707 + (void)io_wq_max_workers(tctx->io_wq, new_count); 10708 + } 10707 10709 return 0; 10708 10710 err: 10709 10711 if (sqd) {
+9 -7
fs/ksmbd/auth.c
··· 298 298 int blob_len, struct ksmbd_session *sess) 299 299 { 300 300 char *domain_name; 301 - unsigned int lm_off, nt_off; 302 - unsigned short nt_len; 301 + unsigned int nt_off, dn_off; 302 + unsigned short nt_len, dn_len; 303 303 int ret; 304 304 305 305 if (blob_len < sizeof(struct authenticate_message)) { ··· 314 314 return -EINVAL; 315 315 } 316 316 317 - lm_off = le32_to_cpu(authblob->LmChallengeResponse.BufferOffset); 318 317 nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset); 319 318 nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length); 319 + dn_off = le32_to_cpu(authblob->DomainName.BufferOffset); 320 + dn_len = le16_to_cpu(authblob->DomainName.Length); 321 + 322 + if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len) 323 + return -EINVAL; 320 324 321 325 /* TODO : use domain name that imported from configuration file */ 322 - domain_name = smb_strndup_from_utf16((const char *)authblob + 323 - le32_to_cpu(authblob->DomainName.BufferOffset), 324 - le16_to_cpu(authblob->DomainName.Length), true, 325 - sess->conn->local_nls); 326 + domain_name = smb_strndup_from_utf16((const char *)authblob + dn_off, 327 + dn_len, true, sess->conn->local_nls); 326 328 if (IS_ERR(domain_name)) 327 329 return PTR_ERR(domain_name); 328 330
+2
fs/ksmbd/connection.c
··· 61 61 conn->local_nls = load_nls_default(); 62 62 atomic_set(&conn->req_running, 0); 63 63 atomic_set(&conn->r_count, 0); 64 + conn->total_credits = 1; 65 + 64 66 init_waitqueue_head(&conn->req_running_q); 65 67 INIT_LIST_HEAD(&conn->conns_list); 66 68 INIT_LIST_HEAD(&conn->sessions);
+2
fs/ksmbd/ksmbd_netlink.h
··· 211 211 */ 212 212 struct ksmbd_logout_request { 213 213 __s8 account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */ 214 + __u32 account_flags; 214 215 }; 215 216 216 217 /* ··· 318 317 #define KSMBD_USER_FLAG_BAD_UID BIT(2) 319 318 #define KSMBD_USER_FLAG_BAD_USER BIT(3) 320 319 #define KSMBD_USER_FLAG_GUEST_ACCOUNT BIT(4) 320 + #define KSMBD_USER_FLAG_DELAY_SESSION BIT(5) 321 321 322 322 /* 323 323 * Share config flags.
+1 -1
fs/ksmbd/mgmt/user_config.c
··· 55 55 56 56 void ksmbd_free_user(struct ksmbd_user *user) 57 57 { 58 - ksmbd_ipc_logout_request(user->name); 58 + ksmbd_ipc_logout_request(user->name, user->flags); 59 59 kfree(user->name); 60 60 kfree(user->passkey); 61 61 kfree(user);
+1
fs/ksmbd/mgmt/user_config.h
··· 18 18 19 19 size_t passkey_sz; 20 20 char *passkey; 21 + unsigned int failed_login_count; 21 22 }; 22 23 23 24 static inline bool user_guest(struct ksmbd_user *user)
+37 -18
fs/ksmbd/smb2misc.c
··· 284 284 le32_to_cpu(h->MaxOutputResponse); 285 285 } 286 286 287 - static int smb2_validate_credit_charge(struct smb2_hdr *hdr) 287 + static int smb2_validate_credit_charge(struct ksmbd_conn *conn, 288 + struct smb2_hdr *hdr) 288 289 { 289 - int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len; 290 - int credit_charge = le16_to_cpu(hdr->CreditCharge); 290 + unsigned int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len; 291 + unsigned short credit_charge = le16_to_cpu(hdr->CreditCharge); 291 292 void *__hdr = hdr; 293 + int ret; 292 294 293 295 switch (hdr->Command) { 294 296 case SMB2_QUERY_INFO: ··· 312 310 req_len = smb2_ioctl_req_len(__hdr); 313 311 expect_resp_len = smb2_ioctl_resp_len(__hdr); 314 312 break; 315 - default: 313 + case SMB2_CANCEL: 316 314 return 0; 315 + default: 316 + req_len = 1; 317 + break; 317 318 } 318 319 319 - credit_charge = max(1, credit_charge); 320 - max_len = max(req_len, expect_resp_len); 320 + credit_charge = max_t(unsigned short, credit_charge, 1); 321 + max_len = max_t(unsigned int, req_len, expect_resp_len); 321 322 calc_credit_num = DIV_ROUND_UP(max_len, SMB2_MAX_BUFFER_SIZE); 322 323 323 324 if (credit_charge < calc_credit_num) { 324 - pr_err("Insufficient credit charge, given: %d, needed: %d\n", 325 - credit_charge, calc_credit_num); 325 + ksmbd_debug(SMB, "Insufficient credit charge, given: %d, needed: %d\n", 326 + credit_charge, calc_credit_num); 327 + return 1; 328 + } else if (credit_charge > conn->max_credits) { 329 + ksmbd_debug(SMB, "Too large credit charge: %d\n", credit_charge); 326 330 return 1; 327 331 } 328 332 329 - return 0; 333 + spin_lock(&conn->credits_lock); 334 + if (credit_charge <= conn->total_credits) { 335 + conn->total_credits -= credit_charge; 336 + ret = 0; 337 + } else { 338 + ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n", 339 + credit_charge, conn->total_credits); 340 + ret = 1; 341 + } 342 + spin_unlock(&conn->credits_lock); 343 + return ret; 330 344 } 331 345 332 346 int ksmbd_smb2_check_message(struct ksmbd_work *work) ··· 400 382 } 401 383 } 402 384 403 - if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) && 404 - smb2_validate_credit_charge(hdr)) { 405 - work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); 406 - return 1; 407 - } 408 - 409 385 if (smb2_calc_size(hdr, &clc_len)) 410 386 return 1; 411 387 412 388 if (len != clc_len) { 413 389 /* client can return one byte more due to implied bcc[0] */ 414 390 if (clc_len == len + 1) 415 - return 0; 391 + goto validate_credit; 416 392 417 393 /* 418 394 * Some windows servers (win2016) will pad also the final 419 395 * PDU in a compound to 8 bytes. 420 396 */ 421 397 if (ALIGN(clc_len, 8) == len) 422 - return 0; 398 + goto validate_credit; 423 399 424 400 /* 425 401 * windows client also pad up to 8 bytes when compounding. ··· 426 414 "cli req padded more than expected. Length %d not %d for cmd:%d mid:%llu\n", 427 415 len, clc_len, command, 428 416 le64_to_cpu(hdr->MessageId)); 429 - return 0; 417 + goto validate_credit; 430 418 } 431 419 432 420 ksmbd_debug(SMB, ··· 434 422 len, clc_len, command, 435 423 le64_to_cpu(hdr->MessageId)); 436 424 425 + return 1; 426 + } 427 + 428 + validate_credit: 429 + if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) && 430 + smb2_validate_credit_charge(work->conn, hdr)) { 431 + work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); 437 432 return 1; 438 433 } 439 434
+3
fs/ksmbd/smb2ops.c
··· 284 284 285 285 void init_smb2_max_read_size(unsigned int sz) 286 286 { 287 + sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE); 287 288 smb21_server_values.max_read_size = sz; 288 289 smb30_server_values.max_read_size = sz; 289 290 smb302_server_values.max_read_size = sz; ··· 293 292 294 293 void init_smb2_max_write_size(unsigned int sz) 295 294 { 295 + sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE); 296 296 smb21_server_values.max_write_size = sz; 297 297 smb30_server_values.max_write_size = sz; 298 298 smb302_server_values.max_write_size = sz; ··· 302 300 303 301 void init_smb2_max_trans_size(unsigned int sz) 304 302 { 303 + sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE); 305 304 smb21_server_values.max_trans_size = sz; 306 305 smb30_server_values.max_trans_size = sz; 307 306 smb302_server_values.max_trans_size = sz;
+228 -124
fs/ksmbd/smb2pdu.c
··· 292 292 return 0; 293 293 } 294 294 295 - static int smb2_consume_credit_charge(struct ksmbd_work *work, 296 - unsigned short credit_charge) 297 - { 298 - struct ksmbd_conn *conn = work->conn; 299 - unsigned int rsp_credits = 1; 300 - 301 - if (!conn->total_credits) 302 - return 0; 303 - 304 - if (credit_charge > 0) 305 - rsp_credits = credit_charge; 306 - 307 - conn->total_credits -= rsp_credits; 308 - return rsp_credits; 309 - } 310 - 311 295 /** 312 296 * smb2_set_rsp_credits() - set number of credits in response buffer 313 297 * @work: smb work containing smb response buffer ··· 301 317 struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work); 302 318 struct smb2_hdr *hdr = ksmbd_resp_buf_next(work); 303 319 struct ksmbd_conn *conn = work->conn; 304 - unsigned short credits_requested = le16_to_cpu(req_hdr->CreditRequest); 305 - unsigned short credit_charge = 1, credits_granted = 0; 306 - unsigned short aux_max, aux_credits, min_credits; 307 - int rsp_credit_charge; 320 + unsigned short credits_requested; 321 + unsigned short credit_charge, credits_granted = 0; 322 + unsigned short aux_max, aux_credits; 308 323 309 - if (hdr->Command == SMB2_CANCEL) 310 - goto out; 324 + if (work->send_no_response) 325 + return 0; 311 326 312 - /* get default minimum credits by shifting maximum credits by 4 */ 313 - min_credits = conn->max_credits >> 4; 327 + hdr->CreditCharge = req_hdr->CreditCharge; 314 328 315 - if (conn->total_credits >= conn->max_credits) { 329 + if (conn->total_credits > conn->max_credits) { 330 + hdr->CreditRequest = 0; 316 331 pr_err("Total credits overflow: %d\n", conn->total_credits); 317 - conn->total_credits = min_credits; 318 - } 319 - 320 - rsp_credit_charge = 321 - smb2_consume_credit_charge(work, le16_to_cpu(req_hdr->CreditCharge)); 322 - if (rsp_credit_charge < 0) 323 332 return -EINVAL; 324 - 325 - hdr->CreditCharge = cpu_to_le16(rsp_credit_charge); 326 - 327 - if (credits_requested > 0) { 328 - aux_credits = credits_requested - 1; 329 - aux_max = 32; 330 - if (hdr->Command == SMB2_NEGOTIATE) 331 - aux_max = 0; 332 - aux_credits = (aux_credits < aux_max) ? aux_credits : aux_max; 333 - credits_granted = aux_credits + credit_charge; 334 - 335 - /* if credits granted per client is getting bigger than default 336 - * minimum credits then we should wrap it up within the limits. 337 - */ 338 - if ((conn->total_credits + credits_granted) > min_credits) 339 - credits_granted = min_credits - conn->total_credits; 340 - /* 341 - * TODO: Need to adjuct CreditRequest value according to 342 - * current cpu load 343 - */ 344 - } else if (conn->total_credits == 0) { 345 - credits_granted = 1; 346 333 } 334 + 335 + credit_charge = max_t(unsigned short, 336 + le16_to_cpu(req_hdr->CreditCharge), 1); 337 + credits_requested = max_t(unsigned short, 338 + le16_to_cpu(req_hdr->CreditRequest), 1); 339 + 340 + /* according to smb2.credits smbtorture, Windows server 341 + * 2016 or later grant up to 8192 credits at once. 342 + * 343 + * TODO: Need to adjuct CreditRequest value according to 344 + * current cpu load 345 + */ 346 + aux_credits = credits_requested - 1; 347 + if (hdr->Command == SMB2_NEGOTIATE) 348 + aux_max = 0; 349 + else 350 + aux_max = conn->max_credits - credit_charge; 351 + aux_credits = min_t(unsigned short, aux_credits, aux_max); 352 + credits_granted = credit_charge + aux_credits; 353 + 354 + if (conn->max_credits - conn->total_credits < credits_granted) 355 + credits_granted = conn->max_credits - 356 + conn->total_credits; 347 357 348 358 conn->total_credits += credits_granted; 349 359 work->credits_granted += credits_granted; ··· 346 368 /* Update CreditRequest in last request */ 347 369 hdr->CreditRequest = cpu_to_le16(work->credits_granted); 348 370 } 349 - out: 350 371 ksmbd_debug(SMB, 351 372 "credits: requested[%d] granted[%d] total_granted[%d]\n", 352 373 credits_requested, credits_granted, ··· 449 472 return false; 450 473 } 451 474 475 + if ((u64)get_rfc1002_len(work->response_buf) + MAX_CIFS_SMALL_BUFFER_SIZE > 476 + work->response_sz) { 477 + pr_err("next response offset exceeds response buffer size\n"); 478 + return false; 479 + } 480 + 452 481 ksmbd_debug(SMB, "got SMB2 chained command\n"); 453 482 init_chained_smb2_rsp(work); 454 483 return true; ··· 524 541 { 525 542 struct smb2_hdr *hdr = work->request_buf; 526 543 size_t small_sz = MAX_CIFS_SMALL_BUFFER_SIZE; 527 - size_t large_sz = work->conn->vals->max_trans_size + MAX_SMB2_HDR_SIZE; 544 + size_t large_sz = small_sz + work->conn->vals->max_trans_size; 528 545 size_t sz = small_sz; 529 546 int cmd = le16_to_cpu(hdr->Command); 530 547 ··· 1257 1274 return 0; 1258 1275 } 1259 1276 1260 - static int decode_negotiation_token(struct ksmbd_work *work, 1261 - struct negotiate_message *negblob) 1277 + static int decode_negotiation_token(struct ksmbd_conn *conn, 1278 + struct negotiate_message *negblob, 1279 + size_t sz) 1262 1280 { 1263 - struct ksmbd_conn *conn = work->conn; 1264 - struct smb2_sess_setup_req *req; 1265 - int sz; 1266 - 1267 1281 if (!conn->use_spnego) 1268 1282 return -EINVAL; 1269 - 1270 - req = work->request_buf; 1271 - sz = le16_to_cpu(req->SecurityBufferLength); 1272 1283 1273 1284 if (ksmbd_decode_negTokenInit((char *)negblob, sz, conn)) { 1274 1285 if (ksmbd_decode_negTokenTarg((char *)negblob, sz, conn)) { ··· 1275 1298 } 1276 1299 1277 1300 static int ntlm_negotiate(struct ksmbd_work *work, 1278 - struct negotiate_message *negblob) 1301 + struct negotiate_message *negblob, 1302 + size_t negblob_len) 1279 1303 { 1280 - struct smb2_sess_setup_req *req = work->request_buf; 1281 1304 struct smb2_sess_setup_rsp *rsp = work->response_buf; 1282 1305 struct challenge_message *chgblob; 1283 1306 unsigned char *spnego_blob = NULL; ··· 1286 1309 int sz, rc; 1287 1310 1288 1311 ksmbd_debug(SMB, "negotiate phase\n"); 1289 - sz = le16_to_cpu(req->SecurityBufferLength); 1290 - rc = ksmbd_decode_ntlmssp_neg_blob(negblob, sz, work->sess); 1312 + rc = ksmbd_decode_ntlmssp_neg_blob(negblob, negblob_len, work->sess); 1291 1313 if (rc) 1292 1314 return rc; 1293 1315 ··· 1354 1378 struct authenticate_message *authblob; 1355 1379 struct ksmbd_user *user; 1356 1380 char *name; 1357 - int sz; 1381 + unsigned int auth_msg_len, name_off, name_len, secbuf_len; 1358 1382 1383 + secbuf_len = le16_to_cpu(req->SecurityBufferLength); 1384 + if (secbuf_len < sizeof(struct authenticate_message)) { 1385 + ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len); 1386 + return NULL; 1387 + } 1359 1388 authblob = user_authblob(conn, req); 1360 - sz = le32_to_cpu(authblob->UserName.BufferOffset); 1361 - name = smb_strndup_from_utf16((const char *)authblob + sz, 1362 - le16_to_cpu(authblob->UserName.Length), 1389 + name_off = le32_to_cpu(authblob->UserName.BufferOffset); 1390 + name_len = le16_to_cpu(authblob->UserName.Length); 1391 + auth_msg_len = le16_to_cpu(req->SecurityBufferOffset) + secbuf_len; 1392 + 1393 + if (auth_msg_len < (u64)name_off + name_len) 1394 + return NULL; 1395 + 1396 + name = smb_strndup_from_utf16((const char *)authblob + name_off, 1397 + name_len, 1363 1398 true, 1364 1399 conn->local_nls); 1365 1400 if (IS_ERR(name)) { ··· 1616 1629 struct smb2_sess_setup_rsp *rsp = work->response_buf; 1617 1630 struct ksmbd_session *sess; 1618 1631 struct negotiate_message *negblob; 1632 + unsigned int negblob_len, negblob_off; 1619 1633 int rc = 0; 1620 1634 1621 1635 ksmbd_debug(SMB, "Received request for session setup\n"); ··· 1697 1709 if (sess->state == SMB2_SESSION_EXPIRED) 1698 1710 sess->state = SMB2_SESSION_IN_PROGRESS; 1699 1711 1700 - negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId + 1701 - le16_to_cpu(req->SecurityBufferOffset)); 1712 + negblob_off = le16_to_cpu(req->SecurityBufferOffset); 1713 + negblob_len = le16_to_cpu(req->SecurityBufferLength); 1714 + if (negblob_off < (offsetof(struct smb2_sess_setup_req, Buffer) - 4) || 1715 + negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) 1716 + return -EINVAL; 1702 1717 1703 - if (decode_negotiation_token(work, negblob) == 0) { 1718 + negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId + 1719 + negblob_off); 1720 + 1721 + if (decode_negotiation_token(conn, negblob, negblob_len) == 0) { 1704 1722 if (conn->mechToken) 1705 1723 negblob = (struct negotiate_message *)conn->mechToken; 1706 1724 } ··· 1730 1736 sess->Preauth_HashValue = NULL; 1731 1737 } else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) { 1732 1738 if (negblob->MessageType == NtLmNegotiate) { 1733 - rc = ntlm_negotiate(work, negblob); 1739 + rc = ntlm_negotiate(work, negblob, negblob_len); 1734 1740 if (rc) 1735 1741 goto out_err; 1736 1742 rsp->hdr.Status = ··· 1790 1796 conn->mechToken = NULL; 1791 1797 } 1792 1798 1793 - if (rc < 0 && sess) { 1794 - ksmbd_session_destroy(sess); 1795 - work->sess = NULL; 1799 + if (rc < 0) { 1800 + /* 1801 + * SecurityBufferOffset should be set to zero 1802 + * in session setup error response. 1803 + */ 1804 + rsp->SecurityBufferOffset = 0; 1805 + 1806 + if (sess) { 1807 + bool try_delay = false; 1808 + 1809 + /* 1810 + * To avoid dictionary attacks (repeated session setups rapidly sent) to 1811 + * connect to server, ksmbd make a delay of a 5 seconds on session setup 1812 + * failure to make it harder to send enough random connection requests 1813 + * to break into a server. 1814 + */ 1815 + if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION) 1816 + try_delay = true; 1817 + 1818 + ksmbd_session_destroy(sess); 1819 + work->sess = NULL; 1820 + if (try_delay) 1821 + ssleep(5); 1822 + } 1796 1823 } 1797 1824 1798 1825 return rc; ··· 3794 3779 return 0; 3795 3780 } 3796 3781 3782 + static int smb2_calc_max_out_buf_len(struct ksmbd_work *work, 3783 + unsigned short hdr2_len, 3784 + unsigned int out_buf_len) 3785 + { 3786 + int free_len; 3787 + 3788 + if (out_buf_len > work->conn->vals->max_trans_size) 3789 + return -EINVAL; 3790 + 3791 + free_len = (int)(work->response_sz - 3792 + (get_rfc1002_len(work->response_buf) + 4)) - 3793 + hdr2_len; 3794 + if (free_len < 0) 3795 + return -EINVAL; 3796 + 3797 + return min_t(int, out_buf_len, free_len); 3798 + } 3799 + 3797 3800 int smb2_query_dir(struct ksmbd_work *work) 3798 3801 { 3799 3802 struct ksmbd_conn *conn = work->conn; ··· 3888 3855 memset(&d_info, 0, sizeof(struct ksmbd_dir_info)); 3889 3856 d_info.wptr = (char *)rsp->Buffer; 3890 3857 d_info.rptr = (char *)rsp->Buffer; 3891 - d_info.out_buf_len = (work->response_sz - (get_rfc1002_len(rsp_org) + 4)); 3892 - d_info.out_buf_len = min_t(int, d_info.out_buf_len, le32_to_cpu(req->OutputBufferLength)) - 3893 - sizeof(struct smb2_query_directory_rsp); 3858 + d_info.out_buf_len = 3859 + smb2_calc_max_out_buf_len(work, 8, 3860 + le32_to_cpu(req->OutputBufferLength)); 3861 + if (d_info.out_buf_len < 0) { 3862 + rc = -EINVAL; 3863 + goto err_out; 3864 + } 3894 3865 d_info.flags = srch_flag; 3895 3866 3896 3867 /* ··· 4128 4091 le32_to_cpu(req->Flags)); 4129 4092 } 4130 4093 4131 - buf_free_len = work->response_sz - 4132 - (get_rfc1002_len(rsp_org) + 4) - 4133 - sizeof(struct smb2_query_info_rsp); 4134 - 4135 - if (le32_to_cpu(req->OutputBufferLength) < buf_free_len) 4136 - buf_free_len = le32_to_cpu(req->OutputBufferLength); 4094 + buf_free_len = 4095 + smb2_calc_max_out_buf_len(work, 8, 4096 + le32_to_cpu(req->OutputBufferLength)); 4097 + if (buf_free_len < 0) 4098 + return -EINVAL; 4137 4099 4138 4100 rc = ksmbd_vfs_listxattr(path->dentry, &xattr_list); 4139 4101 if (rc < 0) { ··· 4443 4407 struct path *path = &fp->filp->f_path; 4444 4408 ssize_t xattr_list_len; 4445 4409 int nbytes = 0, streamlen, stream_name_len, next, idx = 0; 4410 + int buf_free_len; 4411 + struct smb2_query_info_req *req = ksmbd_req_buf_next(work); 4446 4412 4447 4413 generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp), 4448 4414 &stat); ··· 4457 4419 ksmbd_debug(SMB, "empty xattr in the file\n"); 4458 4420 goto out; 4459 4421 } 4422 + 4423 + buf_free_len = 4424 + smb2_calc_max_out_buf_len(work, 8, 4425 + le32_to_cpu(req->OutputBufferLength)); 4426 + if (buf_free_len < 0) 4427 + goto out; 4460 4428 4461 4429 while (idx < xattr_list_len) { 4462 4430 stream_name = xattr_list + idx; ··· 4488 4444 streamlen = snprintf(stream_buf, streamlen + 1, 4489 4445 ":%s", &stream_name[XATTR_NAME_STREAM_LEN]); 4490 4446 4447 + next = sizeof(struct smb2_file_stream_info) + streamlen * 2; 4448 + if (next > buf_free_len) 4449 + break; 4450 + 4491 4451 file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes]; 4492 4452 streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName, 4493 4453 stream_buf, streamlen, ··· 4502 4454 file_info->StreamSize = cpu_to_le64(stream_name_len); 4503 4455 file_info->StreamAllocationSize = cpu_to_le64(stream_name_len); 4504 4456 4505 - next = sizeof(struct smb2_file_stream_info) + streamlen; 4506 4457 nbytes += next; 4458 + buf_free_len -= next; 4507 4459 file_info->NextEntryOffset = cpu_to_le32(next); 4508 4460 } 4509 4461 4510 - if (!S_ISDIR(stat.mode)) { 4462 + if (!S_ISDIR(stat.mode) && 4463 + buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) { 4511 4464 file_info = (struct smb2_file_stream_info *) 4512 4465 &rsp->Buffer[nbytes]; 4513 4466 streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName, ··· 6269 6220 (offsetof(struct smb2_write_req, Buffer) - 4)) { 6270 6221 data_buf = (char *)&req->Buffer[0]; 6271 6222 } else { 6272 - if ((le16_to_cpu(req->DataOffset) > get_rfc1002_len(req)) || 6273 - (le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req))) { 6223 + if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req)) { 6274 6224 pr_err("invalid write data offset %u, smb_len %u\n", 6275 6225 le16_to_cpu(req->DataOffset), 6276 6226 get_rfc1002_len(req)); ··· 6427 6379 (offsetof(struct smb2_write_req, Buffer) - 4)) { 6428 6380 data_buf = (char *)&req->Buffer[0]; 6429 6381 } else { 6430 - if ((le16_to_cpu(req->DataOffset) > get_rfc1002_len(req)) || 6431 - (le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req))) { 6382 + if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req)) { 6432 6383 pr_err("invalid write data offset %u, smb_len %u\n", 6433 6384 le16_to_cpu(req->DataOffset), 6434 6385 get_rfc1002_len(req)); ··· 7070 7023 return err; 7071 7024 } 7072 7025 7073 - static int fsctl_copychunk(struct ksmbd_work *work, struct smb2_ioctl_req *req, 7026 + static int fsctl_copychunk(struct ksmbd_work *work, 7027 + struct copychunk_ioctl_req *ci_req, 7028 + unsigned int cnt_code, 7029 + unsigned int input_count, 7030 + unsigned long long volatile_id, 7031 + unsigned long long persistent_id, 7074 7032 struct smb2_ioctl_rsp *rsp) 7075 7033 { 7076 - struct copychunk_ioctl_req *ci_req; 7077 7034 struct copychunk_ioctl_rsp *ci_rsp; 7078 7035 struct ksmbd_file *src_fp = NULL, *dst_fp = NULL; 7079 7036 struct srv_copychunk *chunks; 7080 7037 unsigned int i, chunk_count, chunk_count_written = 0; 7081 7038 unsigned int chunk_size_written = 0; 7082 7039 loff_t total_size_written = 0; 7083 - int ret, cnt_code; 7040 + int ret = 0; 7084 7041 7085 - cnt_code = le32_to_cpu(req->CntCode); 7086 - ci_req = (struct copychunk_ioctl_req *)&req->Buffer[0]; 7087 7042 ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0]; 7088 7043 7089 - rsp->VolatileFileId = req->VolatileFileId; 7090 - rsp->PersistentFileId = req->PersistentFileId; 7044 + rsp->VolatileFileId = cpu_to_le64(volatile_id); 7045 + rsp->PersistentFileId = cpu_to_le64(persistent_id); 7091 7046 ci_rsp->ChunksWritten = 7092 7047 cpu_to_le32(ksmbd_server_side_copy_max_chunk_count()); 7093 7048 ci_rsp->ChunkBytesWritten = ··· 7099 7050 7100 7051 chunks = (struct srv_copychunk *)&ci_req->Chunks[0]; 7101 7052 chunk_count = le32_to_cpu(ci_req->ChunkCount); 7053 + if (chunk_count == 0) 7054 + goto out; 7102 7055 total_size_written = 0; 7103 7056 7104 7057 /* verify the SRV_COPYCHUNK_COPY packet */ 7105 7058 if (chunk_count > ksmbd_server_side_copy_max_chunk_count() || 7106 - le32_to_cpu(req->InputCount) < 7107 - offsetof(struct copychunk_ioctl_req, Chunks) + 7059 + input_count < offsetof(struct copychunk_ioctl_req, Chunks) + 7108 7060 chunk_count * sizeof(struct srv_copychunk)) { 7109 7061 rsp->hdr.Status = STATUS_INVALID_PARAMETER; 7110 7062 return -EINVAL; ··· 7126 7076 7127 7077 src_fp = ksmbd_lookup_foreign_fd(work, 7128 7078 le64_to_cpu(ci_req->ResumeKey[0])); 7129 - dst_fp = ksmbd_lookup_fd_slow(work, 7130 - le64_to_cpu(req->VolatileFileId), 7131 - le64_to_cpu(req->PersistentFileId)); 7079 + dst_fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id); 7132 7080 ret = -EINVAL; 7133 7081 if (!src_fp || 7134 7082 src_fp->persistent_id != le64_to_cpu(ci_req->ResumeKey[1])) { ··· 7201 7153 } 7202 7154 7203 7155 static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn, 7204 - struct smb2_ioctl_req *req, 7205 - struct smb2_ioctl_rsp *rsp) 7156 + struct smb2_ioctl_rsp *rsp, 7157 + unsigned int out_buf_len) 7206 7158 { 7207 7159 struct network_interface_info_ioctl_rsp *nii_rsp = NULL; 7208 7160 int nbytes = 0; ··· 7214 7166 7215 7167 rtnl_lock(); 7216 7168 for_each_netdev(&init_net, netdev) { 7169 + if (out_buf_len < 7170 + nbytes + sizeof(struct network_interface_info_ioctl_rsp)) { 7171 + rtnl_unlock(); 7172 + return -ENOSPC; 7173 + } 7174 + 7217 7175 if (netdev->type == ARPHRD_LOOPBACK) 7218 7176 continue; 7219 7177 ··· 7299 7245 if (nii_rsp) 7300 7246 nii_rsp->Next = 0; 7301 7247 7302 - if (!nbytes) { 7303 - rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL; 7304 - return -EINVAL; 7305 - } 7306 - 7307 7248 rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID); 7308 7249 rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID); 7309 7250 return nbytes; ··· 7306 7257 7307 7258 static int fsctl_validate_negotiate_info(struct ksmbd_conn *conn, 7308 7259 struct validate_negotiate_info_req *neg_req, 7309 - struct validate_negotiate_info_rsp *neg_rsp) 7260 + struct validate_negotiate_info_rsp *neg_rsp, 7261 + unsigned int in_buf_len) 7310 7262 { 7311 7263 int ret = 0; 7312 7264 int dialect; 7265 + 7266 + if (in_buf_len < sizeof(struct validate_negotiate_info_req) + 7267 + le16_to_cpu(neg_req->DialectCount) * sizeof(__le16)) 7268 + return -EINVAL; 7313 7269 7314 7270 dialect = ksmbd_lookup_dialect_by_id(neg_req->Dialects, 7315 7271 neg_req->DialectCount); ··· 7349 7295 static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id, 7350 7296 struct file_allocated_range_buffer *qar_req, 7351 7297 struct file_allocated_range_buffer *qar_rsp, 7352 - int in_count, int *out_count) 7298 + unsigned int in_count, unsigned int *out_count) 7353 7299 { 7354 7300 struct ksmbd_file *fp; 7355 7301 loff_t start, length; ··· 7376 7322 } 7377 7323 7378 7324 static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id, 7379 - int out_buf_len, struct smb2_ioctl_req *req, 7325 + unsigned int out_buf_len, 7326 + struct smb2_ioctl_req *req, 7380 7327 struct smb2_ioctl_rsp *rsp) 7381 7328 { 7382 7329 struct ksmbd_rpc_command *rpc_resp; ··· 7491 7436 { 7492 7437 struct smb2_ioctl_req *req; 7493 7438 struct smb2_ioctl_rsp *rsp, *rsp_org; 7494 - int cnt_code, nbytes = 0; 7495 - int out_buf_len; 7439 + unsigned int cnt_code, nbytes = 0, out_buf_len, in_buf_len; 7496 7440 u64 id = KSMBD_NO_FID; 7497 7441 struct ksmbd_conn *conn = work->conn; 7498 7442 int ret = 0; ··· 7519 7465 } 7520 7466 7521 7467 cnt_code = le32_to_cpu(req->CntCode); 7522 - out_buf_len = le32_to_cpu(req->MaxOutputResponse); 7523 - out_buf_len = min(KSMBD_IPC_MAX_PAYLOAD, out_buf_len); 7468 + ret = smb2_calc_max_out_buf_len(work, 48, 7469 + le32_to_cpu(req->MaxOutputResponse)); 7470 + if (ret < 0) { 7471 + rsp->hdr.Status = STATUS_INVALID_PARAMETER; 7472 + goto out; 7473 + } 7474 + out_buf_len = (unsigned int)ret; 7475 + in_buf_len = le32_to_cpu(req->InputCount); 7524 7476 7525 7477 switch (cnt_code) { 7526 7478 case FSCTL_DFS_GET_REFERRALS: ··· 7554 7494 break; 7555 7495 } 7556 7496 case FSCTL_PIPE_TRANSCEIVE: 7497 + out_buf_len = min_t(u32, KSMBD_IPC_MAX_PAYLOAD, out_buf_len); 7557 7498 nbytes = fsctl_pipe_transceive(work, id, out_buf_len, req, rsp); 7558 7499 break; 7559 7500 case FSCTL_VALIDATE_NEGOTIATE_INFO: ··· 7563 7502 goto out; 7564 7503 } 7565 7504 7505 + if (in_buf_len < sizeof(struct validate_negotiate_info_req)) 7506 + return -EINVAL; 7507 + 7508 + if (out_buf_len < sizeof(struct validate_negotiate_info_rsp)) 7509 + return -EINVAL; 7510 + 7566 7511 ret = fsctl_validate_negotiate_info(conn, 7567 7512 (struct validate_negotiate_info_req *)&req->Buffer[0], 7568 - (struct validate_negotiate_info_rsp *)&rsp->Buffer[0]); 7513 + (struct validate_negotiate_info_rsp *)&rsp->Buffer[0], 7514 + in_buf_len); 7569 7515 if (ret < 0) 7570 7516 goto out; 7571 7517 ··· 7581 7513 rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID); 7582 7514 break; 7583 7515 case FSCTL_QUERY_NETWORK_INTERFACE_INFO: 7584 - nbytes = fsctl_query_iface_info_ioctl(conn, req, rsp); 7585 - if (nbytes < 0) 7516 + ret = fsctl_query_iface_info_ioctl(conn, rsp, out_buf_len); 7517 + if (ret < 0) 7586 7518 goto out; 7519 + nbytes = ret; 7587 7520 break; 7588 7521 case FSCTL_REQUEST_RESUME_KEY: 7589 7522 if (out_buf_len < sizeof(struct resume_key_ioctl_rsp)) { ··· 7609 7540 goto out; 7610 7541 } 7611 7542 7543 + if (in_buf_len < sizeof(struct copychunk_ioctl_req)) { 7544 + ret = -EINVAL; 7545 + goto out; 7546 + } 7547 + 7612 7548 if (out_buf_len < sizeof(struct copychunk_ioctl_rsp)) { 7613 7549 ret = -EINVAL; 7614 7550 goto out; 7615 7551 } 7616 7552 7617 7553 nbytes = sizeof(struct copychunk_ioctl_rsp); 7618 - fsctl_copychunk(work, req, rsp); 7554 + rsp->VolatileFileId = req->VolatileFileId; 7555 + rsp->PersistentFileId = req->PersistentFileId; 7556 + fsctl_copychunk(work, 7557 + (struct copychunk_ioctl_req *)&req->Buffer[0], 7558 + le32_to_cpu(req->CntCode), 7559 + le32_to_cpu(req->InputCount), 7560 + le64_to_cpu(req->VolatileFileId), 7561 + le64_to_cpu(req->PersistentFileId), 7562 + rsp); 7619 7563 break; 7620 7564 case FSCTL_SET_SPARSE: 7565 + if (in_buf_len < sizeof(struct file_sparse)) { 7566 + ret = -EINVAL; 7567 + goto out; 7568 + } 7569 + 7621 7570 ret = fsctl_set_sparse(work, id, 7622 7571 (struct file_sparse *)&req->Buffer[0]); 7623 7572 if (ret < 0) ··· 7651 7564 ksmbd_debug(SMB, 7652 7565 "User does not have write permission\n"); 7653 7566 ret = -EACCES; 7567 + goto out; 7568 + } 7569 + 7570 + if (in_buf_len < sizeof(struct file_zero_data_information)) { 7571 + ret = -EINVAL; 7654 7572 goto out; 7655 7573 } 7656 7574 ··· 7678 7586 break; 7679 7587 } 7680 7588 case FSCTL_QUERY_ALLOCATED_RANGES: 7589 + if (in_buf_len < sizeof(struct file_allocated_range_buffer)) { 7590 + ret = -EINVAL; 7591 + goto out; 7592 + } 7593 + 7681 7594 ret = fsctl_query_allocated_ranges(work, id, 7682 7595 (struct file_allocated_range_buffer *)&req->Buffer[0], 7683 7596 (struct file_allocated_range_buffer *)&rsp->Buffer[0], ··· 7722 7625 struct ksmbd_file *fp_in, *fp_out = NULL; 7723 7626 struct duplicate_extents_to_file *dup_ext; 7724 7627 loff_t src_off, dst_off, length, cloned; 7628 + 7629 + if (in_buf_len < sizeof(struct duplicate_extents_to_file)) { 7630 + ret = -EINVAL; 7631 + goto out; 7632 + } 7725 7633 7726 7634 dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0]; 7727 7635 ··· 7798 7696 rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND; 7799 7697 else if (ret == -EOPNOTSUPP) 7800 7698 rsp->hdr.Status = STATUS_NOT_SUPPORTED; 7699 + else if (ret == -ENOSPC) 7700 + rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL; 7801 7701 else if (ret < 0 || rsp->hdr.Status == 0) 7802 7702 rsp->hdr.Status = STATUS_INVALID_PARAMETER; 7803 7703 smb2_set_err_rsp(work);
+2
fs/ksmbd/smb2pdu.h
··· 113 113 #define SMB21_DEFAULT_IOSIZE (1024 * 1024) 114 114 #define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024) 115 115 #define SMB3_DEFAULT_TRANS_SIZE (1024 * 1024) 116 + #define SMB3_MIN_IOSIZE (64 * 1024) 117 + #define SMB3_MAX_IOSIZE (8 * 1024 * 1024) 116 118 117 119 /* 118 120 * SMB2 Header Definition
+2 -1
fs/ksmbd/transport_ipc.c
··· 601 601 return ret; 602 602 } 603 603 604 - int ksmbd_ipc_logout_request(const char *account) 604 + int ksmbd_ipc_logout_request(const char *account, int flags) 605 605 { 606 606 struct ksmbd_ipc_msg *msg; 607 607 struct ksmbd_logout_request *req; ··· 616 616 617 617 msg->type = KSMBD_EVENT_LOGOUT_REQUEST; 618 618 req = (struct ksmbd_logout_request *)msg->payload; 619 + req->account_flags = flags; 619 620 strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ); 620 621 621 622 ret = ipc_msg_send(msg);
+1 -1
fs/ksmbd/transport_ipc.h
··· 25 25 struct sockaddr *peer_addr); 26 26 int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id, 27 27 unsigned long long connect_id); 28 - int ksmbd_ipc_logout_request(const char *account); 28 + int ksmbd_ipc_logout_request(const char *account, int flags); 29 29 struct ksmbd_share_config_response * 30 30 ksmbd_ipc_share_config_request(const char *name); 31 31 struct ksmbd_spnego_authen_response *
+19 -2
fs/ksmbd/transport_rdma.c
··· 549 549 550 550 switch (recvmsg->type) { 551 551 case SMB_DIRECT_MSG_NEGOTIATE_REQ: 552 + if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) { 553 + put_empty_recvmsg(t, recvmsg); 554 + return; 555 + } 552 556 t->negotiation_requested = true; 553 557 t->full_packet_received = true; 554 558 wake_up_interruptible(&t->wait_status); ··· 560 556 case SMB_DIRECT_MSG_DATA_TRANSFER: { 561 557 struct smb_direct_data_transfer *data_transfer = 562 558 (struct smb_direct_data_transfer *)recvmsg->packet; 563 - int data_length = le32_to_cpu(data_transfer->data_length); 559 + unsigned int data_length; 564 560 int avail_recvmsg_count, receive_credits; 565 561 562 + if (wc->byte_len < 563 + offsetof(struct smb_direct_data_transfer, padding)) { 564 + put_empty_recvmsg(t, recvmsg); 565 + return; 566 + } 567 + 568 + data_length = le32_to_cpu(data_transfer->data_length); 566 569 if (data_length) { 570 + if (wc->byte_len < sizeof(struct smb_direct_data_transfer) + 571 + (u64)data_length) { 572 + put_empty_recvmsg(t, recvmsg); 573 + return; 574 + } 575 + 567 576 if (t->full_packet_received) 568 577 recvmsg->first_segment = true; 569 578 ··· 585 568 else 586 569 t->full_packet_received = true; 587 570 588 - enqueue_reassembly(t, recvmsg, data_length); 571 + enqueue_reassembly(t, recvmsg, (int)data_length); 589 572 wake_up_interruptible(&t->wait_reassembly_queue); 590 573 591 574 spin_lock(&t->receive_credit_lock);
+1 -1
fs/ksmbd/vfs.c
··· 1023 1023 1024 1024 int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length, 1025 1025 struct file_allocated_range_buffer *ranges, 1026 - int in_count, int *out_count) 1026 + unsigned int in_count, unsigned int *out_count) 1027 1027 { 1028 1028 struct file *f = fp->filp; 1029 1029 struct inode *inode = file_inode(fp->filp);
+1 -1
fs/ksmbd/vfs.h
··· 166 166 struct file_allocated_range_buffer; 167 167 int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length, 168 168 struct file_allocated_range_buffer *ranges, 169 - int in_count, int *out_count); 169 + unsigned int in_count, unsigned int *out_count); 170 170 int ksmbd_vfs_unlink(struct user_namespace *user_ns, 171 171 struct dentry *dir, struct dentry *dentry); 172 172 void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat);
+7 -2
include/acpi/platform/acgcc.h
··· 22 22 #define va_arg(v, l) __builtin_va_arg(v, l) 23 23 #define va_copy(d, s) __builtin_va_copy(d, s) 24 24 #else 25 + #ifdef __KERNEL__ 25 26 #include <linux/stdarg.h> 26 - #endif 27 - #endif 27 + #else 28 + /* Used to build acpi tools */ 29 + #include <stdarg.h> 30 + #endif /* __KERNEL__ */ 31 + #endif /* ACPI_USE_BUILTIN_STDARG */ 32 + #endif /* ! va_arg */ 28 33 29 34 #define ACPI_INLINE __inline__ 30 35
+5 -2
include/linux/bpf.h
··· 931 931 * stored in the map to make sure that all callers and callees have 932 932 * the same prog type and JITed flag. 933 933 */ 934 - enum bpf_prog_type type; 935 - bool jited; 934 + struct { 935 + spinlock_t lock; 936 + enum bpf_prog_type type; 937 + bool jited; 938 + } owner; 936 939 /* Programs with direct jumps into programs part of this array. */ 937 940 struct list_head poke_progs; 938 941 struct bpf_map *map;
+4 -4
include/linux/bpf_types.h
··· 101 101 #endif 102 102 BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) 103 103 BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) 104 - #ifdef CONFIG_NET 105 - BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) 106 - BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops) 107 - BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops) 108 104 #ifdef CONFIG_BPF_LSM 109 105 BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops) 110 106 #endif 111 107 BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops) 108 + #ifdef CONFIG_NET 109 + BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) 110 + BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops) 111 + BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops) 112 112 BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) 113 113 #if defined(CONFIG_XDP_SOCKETS) 114 114 BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
+1
include/linux/filter.h
··· 1050 1050 extern int bpf_jit_harden; 1051 1051 extern int bpf_jit_kallsyms; 1052 1052 extern long bpf_jit_limit; 1053 + extern long bpf_jit_limit_max; 1053 1054 1054 1055 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); 1055 1056
+1
include/linux/skmsg.h
··· 128 128 struct sk_msg *msg, u32 bytes); 129 129 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, 130 130 int len, int flags); 131 + bool sk_msg_is_readable(struct sock *sk); 131 132 132 133 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) 133 134 {
-2
include/net/cfg80211.h
··· 5442 5442 * netdev and may otherwise be used by driver read-only, will be update 5443 5443 * by cfg80211 on change_interface 5444 5444 * @mgmt_registrations: list of registrations for management frames 5445 - * @mgmt_registrations_lock: lock for the list 5446 5445 * @mgmt_registrations_need_update: mgmt registrations were updated, 5447 5446 * need to propagate the update to the driver 5448 5447 * @mtx: mutex used to lock data in this struct, may be used by drivers ··· 5488 5489 u32 identifier; 5489 5490 5490 5491 struct list_head mgmt_registrations; 5491 - spinlock_t mgmt_registrations_lock; 5492 5492 u8 mgmt_registrations_need_update:1; 5493 5493 5494 5494 struct mutex mtx;
+4
include/net/mptcp.h
··· 71 71 struct { 72 72 u64 sndr_key; 73 73 u64 rcvr_key; 74 + u64 data_seq; 75 + u32 subflow_seq; 76 + u16 data_len; 77 + __sum16 csum; 74 78 }; 75 79 struct { 76 80 struct mptcp_addr_info addr;
+7 -1
include/net/sock.h
··· 1213 1213 int (*forward_alloc_get)(const struct sock *sk); 1214 1214 1215 1215 bool (*stream_memory_free)(const struct sock *sk, int wake); 1216 - bool (*stream_memory_read)(const struct sock *sk); 1216 + bool (*sock_is_readable)(struct sock *sk); 1217 1217 /* Memory pressure */ 1218 1218 void (*enter_memory_pressure)(struct sock *sk); 1219 1219 void (*leave_memory_pressure)(struct sock *sk); ··· 2854 2854 int sock_copy_user_timeval(struct __kernel_sock_timeval *tv, 2855 2855 sockptr_t optval, int optlen, bool old_timeval); 2856 2856 2857 + static inline bool sk_is_readable(struct sock *sk) 2858 + { 2859 + if (sk->sk_prot->sock_is_readable) 2860 + return sk->sk_prot->sock_is_readable(sk); 2861 + return false; 2862 + } 2857 2863 #endif /* _SOCK_H */
+3 -8
include/net/tls.h
··· 361 361 int __user *optlen); 362 362 int tls_sk_attach(struct sock *sk, int optname, char __user *optval, 363 363 unsigned int optlen); 364 + void tls_err_abort(struct sock *sk, int err); 364 365 365 366 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); 366 367 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); ··· 379 378 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); 380 379 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 381 380 int nonblock, int flags, int *addr_len); 382 - bool tls_sw_stream_read(const struct sock *sk); 381 + bool tls_sw_sock_is_readable(struct sock *sk); 383 382 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 384 383 struct pipe_inode_info *pipe, 385 384 size_t len, unsigned int flags); ··· 470 469 #endif 471 470 } 472 471 473 - static inline void tls_err_abort(struct sock *sk, int err) 474 - { 475 - sk->sk_err = err; 476 - sk_error_report(sk); 477 - } 478 - 479 472 static inline bool tls_bigint_increment(unsigned char *seq, int len) 480 473 { 481 474 int i; ··· 510 515 struct cipher_context *ctx) 511 516 { 512 517 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size)) 513 - tls_err_abort(sk, EBADMSG); 518 + tls_err_abort(sk, -EBADMSG); 514 519 515 520 if (prot->version != TLS_1_3_VERSION && 516 521 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
+3 -2
include/net/udp.h
··· 494 494 * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial 495 495 * packets in udp_gro_complete_segment. As does UDP GSO, verified by 496 496 * udp_send_skb. But when those packets are looped in dev_loopback_xmit 497 - * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this 498 - * specific case, where PARTIAL is both correct and required. 497 + * their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY. 498 + * Reset in this specific case, where PARTIAL is both correct and 499 + * required. 499 500 */ 500 501 if (skb->pkt_type == PACKET_LOOPBACK) 501 502 skb->ip_summed = CHECKSUM_PARTIAL;
+1
kernel/bpf/arraymap.c
··· 1071 1071 INIT_WORK(&aux->work, prog_array_map_clear_deferred); 1072 1072 INIT_LIST_HEAD(&aux->poke_progs); 1073 1073 mutex_init(&aux->poke_mutex); 1074 + spin_lock_init(&aux->owner.lock); 1074 1075 1075 1076 map = array_map_alloc(attr); 1076 1077 if (IS_ERR(map)) {
+16 -8
kernel/bpf/core.c
··· 524 524 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 525 525 int bpf_jit_harden __read_mostly; 526 526 long bpf_jit_limit __read_mostly; 527 + long bpf_jit_limit_max __read_mostly; 527 528 528 529 static void 529 530 bpf_prog_ksym_set_addr(struct bpf_prog *prog) ··· 818 817 static int __init bpf_jit_charge_init(void) 819 818 { 820 819 /* Only used as heuristic here to derive limit. */ 821 - bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2, 820 + bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); 821 + bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2, 822 822 PAGE_SIZE), LONG_MAX); 823 823 return 0; 824 824 } ··· 1823 1821 bool bpf_prog_array_compatible(struct bpf_array *array, 1824 1822 const struct bpf_prog *fp) 1825 1823 { 1824 + bool ret; 1825 + 1826 1826 if (fp->kprobe_override) 1827 1827 return false; 1828 1828 1829 - if (!array->aux->type) { 1829 + spin_lock(&array->aux->owner.lock); 1830 + 1831 + if (!array->aux->owner.type) { 1830 1832 /* There's no owner yet where we could check for 1831 1833 * compatibility. 1832 1834 */ 1833 - array->aux->type = fp->type; 1834 - array->aux->jited = fp->jited; 1835 - return true; 1835 + array->aux->owner.type = fp->type; 1836 + array->aux->owner.jited = fp->jited; 1837 + ret = true; 1838 + } else { 1839 + ret = array->aux->owner.type == fp->type && 1840 + array->aux->owner.jited == fp->jited; 1836 1841 } 1837 - 1838 - return array->aux->type == fp->type && 1839 - array->aux->jited == fp->jited; 1842 + spin_unlock(&array->aux->owner.lock); 1843 + return ret; 1840 1844 } 1841 1845 1842 1846 static int bpf_check_tail_call(const struct bpf_prog *fp)
+7 -4
kernel/bpf/syscall.c
··· 543 543 544 544 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { 545 545 array = container_of(map, struct bpf_array, map); 546 - type = array->aux->type; 547 - jited = array->aux->jited; 546 + spin_lock(&array->aux->owner.lock); 547 + type = array->aux->owner.type; 548 + jited = array->aux->owner.jited; 549 + spin_unlock(&array->aux->owner.lock); 548 550 } 549 551 550 552 seq_printf(m, ··· 1339 1337 void __user *values = u64_to_user_ptr(attr->batch.values); 1340 1338 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1341 1339 u32 value_size, cp, max_count; 1342 - int ufd = attr->map_fd; 1340 + int ufd = attr->batch.map_fd; 1343 1341 void *key, *value; 1344 1342 struct fd f; 1345 1343 int err = 0; 1346 1344 1347 - f = fdget(ufd); 1348 1345 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1349 1346 return -EINVAL; 1350 1347 ··· 1368 1367 return -ENOMEM; 1369 1368 } 1370 1369 1370 + f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */ 1371 1371 for (cp = 0; cp < max_count; cp++) { 1372 1372 err = -EFAULT; 1373 1373 if (copy_from_user(key, keys + cp * map->key_size, ··· 1388 1386 1389 1387 kvfree(value); 1390 1388 kvfree(key); 1389 + fdput(f); 1391 1390 return err; 1392 1391 } 1393 1392
+3 -1
kernel/cgroup/cgroup.c
··· 2187 2187 * And don't kill the default root. 2188 2188 */ 2189 2189 if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root && 2190 - !percpu_ref_is_dying(&root->cgrp.self.refcnt)) 2190 + !percpu_ref_is_dying(&root->cgrp.self.refcnt)) { 2191 + cgroup_bpf_offline(&root->cgrp); 2191 2192 percpu_ref_kill(&root->cgrp.self.refcnt); 2193 + } 2192 2194 cgroup_put(&root->cgrp); 2193 2195 kernfs_kill_sb(sb); 2194 2196 }
+1
kernel/sched/core.c
··· 8795 8795 finish_arch_post_lock_switch(); 8796 8796 } 8797 8797 8798 + scs_task_reset(current); 8798 8799 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ 8799 8800 } 8800 8801
+2 -2
kernel/trace/trace_eprobe.c
··· 904 904 905 905 if (IS_ERR(ep)) { 906 906 ret = PTR_ERR(ep); 907 - /* This must return -ENOMEM, else there is a bug */ 908 - WARN_ON_ONCE(ret != -ENOMEM); 907 + /* This must return -ENOMEM or misssing event, else there is a bug */ 908 + WARN_ON_ONCE(ret != -ENOMEM && ret != -ENODEV); 909 909 ep = NULL; 910 910 goto error; 911 911 }
+6 -5
mm/secretmem.c
··· 18 18 #include <linux/secretmem.h> 19 19 #include <linux/set_memory.h> 20 20 #include <linux/sched/signal.h> 21 - #include <linux/refcount.h> 22 21 23 22 #include <uapi/linux/magic.h> 24 23 ··· 40 41 MODULE_PARM_DESC(secretmem_enable, 41 42 "Enable secretmem and memfd_secret(2) system call"); 42 43 43 - static refcount_t secretmem_users; 44 + static atomic_t secretmem_users; 44 45 45 46 bool secretmem_active(void) 46 47 { 47 - return !!refcount_read(&secretmem_users); 48 + return !!atomic_read(&secretmem_users); 48 49 } 49 50 50 51 static vm_fault_t secretmem_fault(struct vm_fault *vmf) ··· 103 104 104 105 static int secretmem_release(struct inode *inode, struct file *file) 105 106 { 106 - refcount_dec(&secretmem_users); 107 + atomic_dec(&secretmem_users); 107 108 return 0; 108 109 } 109 110 ··· 203 204 204 205 if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC)) 205 206 return -EINVAL; 207 + if (atomic_read(&secretmem_users) < 0) 208 + return -ENFILE; 206 209 207 210 fd = get_unused_fd_flags(flags & O_CLOEXEC); 208 211 if (fd < 0) ··· 219 218 file->f_flags |= O_LARGEFILE; 220 219 221 220 fd_install(fd, file); 222 - refcount_inc(&secretmem_users); 221 + atomic_inc(&secretmem_users); 223 222 return fd; 224 223 225 224 err_put_fd:
+7 -3
net/batman-adv/bridge_loop_avoidance.c
··· 1560 1560 return 0; 1561 1561 1562 1562 bat_priv->bla.claim_hash = batadv_hash_new(128); 1563 - bat_priv->bla.backbone_hash = batadv_hash_new(32); 1564 - 1565 - if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash) 1563 + if (!bat_priv->bla.claim_hash) 1566 1564 return -ENOMEM; 1565 + 1566 + bat_priv->bla.backbone_hash = batadv_hash_new(32); 1567 + if (!bat_priv->bla.backbone_hash) { 1568 + batadv_hash_destroy(bat_priv->bla.claim_hash); 1569 + return -ENOMEM; 1570 + } 1567 1571 1568 1572 batadv_hash_set_lock_class(bat_priv->bla.claim_hash, 1569 1573 &batadv_claim_hash_lock_class_key);
+40 -16
net/batman-adv/main.c
··· 190 190 191 191 bat_priv->gw.generation = 0; 192 192 193 - ret = batadv_v_mesh_init(bat_priv); 194 - if (ret < 0) 195 - goto err; 196 - 197 193 ret = batadv_originator_init(bat_priv); 198 - if (ret < 0) 199 - goto err; 194 + if (ret < 0) { 195 + atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); 196 + goto err_orig; 197 + } 200 198 201 199 ret = batadv_tt_init(bat_priv); 202 - if (ret < 0) 203 - goto err; 200 + if (ret < 0) { 201 + atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); 202 + goto err_tt; 203 + } 204 + 205 + ret = batadv_v_mesh_init(bat_priv); 206 + if (ret < 0) { 207 + atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); 208 + goto err_v; 209 + } 204 210 205 211 ret = batadv_bla_init(bat_priv); 206 - if (ret < 0) 207 - goto err; 212 + if (ret < 0) { 213 + atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); 214 + goto err_bla; 215 + } 208 216 209 217 ret = batadv_dat_init(bat_priv); 210 - if (ret < 0) 211 - goto err; 218 + if (ret < 0) { 219 + atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); 220 + goto err_dat; 221 + } 212 222 213 223 ret = batadv_nc_mesh_init(bat_priv); 214 - if (ret < 0) 215 - goto err; 224 + if (ret < 0) { 225 + atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); 226 + goto err_nc; 227 + } 216 228 217 229 batadv_gw_init(bat_priv); 218 230 batadv_mcast_init(bat_priv); ··· 234 222 235 223 return 0; 236 224 237 - err: 238 - batadv_mesh_free(soft_iface); 225 + err_nc: 226 + batadv_dat_free(bat_priv); 227 + err_dat: 228 + batadv_bla_free(bat_priv); 229 + err_bla: 230 + batadv_v_mesh_free(bat_priv); 231 + err_v: 232 + batadv_tt_free(bat_priv); 233 + err_tt: 234 + batadv_originator_free(bat_priv); 235 + err_orig: 236 + batadv_purge_outstanding_packets(bat_priv, NULL); 237 + atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); 238 + 239 239 return ret; 240 240 } 241 241
+3 -1
net/batman-adv/network-coding.c
··· 152 152 &batadv_nc_coding_hash_lock_class_key); 153 153 154 154 bat_priv->nc.decoding_hash = batadv_hash_new(128); 155 - if (!bat_priv->nc.decoding_hash) 155 + if (!bat_priv->nc.decoding_hash) { 156 + batadv_hash_destroy(bat_priv->nc.coding_hash); 156 157 goto err; 158 + } 157 159 158 160 batadv_hash_set_lock_class(bat_priv->nc.decoding_hash, 159 161 &batadv_nc_decoding_hash_lock_class_key);
+3 -1
net/batman-adv/translation-table.c
··· 4162 4162 return ret; 4163 4163 4164 4164 ret = batadv_tt_global_init(bat_priv); 4165 - if (ret < 0) 4165 + if (ret < 0) { 4166 + batadv_tt_local_table_free(bat_priv); 4166 4167 return ret; 4168 + } 4167 4169 4168 4170 batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1, 4169 4171 batadv_tt_tvlv_unicast_handler_v1,
+8 -1
net/core/dev.c
··· 3171 3171 3172 3172 qoffset = sb_dev->tc_to_txq[tc].offset; 3173 3173 qcount = sb_dev->tc_to_txq[tc].count; 3174 + if (unlikely(!qcount)) { 3175 + net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n", 3176 + sb_dev->name, qoffset, tc); 3177 + qoffset = 0; 3178 + qcount = dev->real_num_tx_queues; 3179 + } 3174 3180 } 3175 3181 3176 3182 if (skb_rx_queue_recorded(skb)) { ··· 3920 3914 skb_reset_mac_header(skb); 3921 3915 __skb_pull(skb, skb_network_offset(skb)); 3922 3916 skb->pkt_type = PACKET_LOOPBACK; 3923 - skb->ip_summed = CHECKSUM_UNNECESSARY; 3917 + if (skb->ip_summed == CHECKSUM_NONE) 3918 + skb->ip_summed = CHECKSUM_UNNECESSARY; 3924 3919 WARN_ON(!skb_dst(skb)); 3925 3920 skb_dst_force(skb); 3926 3921 netif_rx_ni(skb);
+2 -2
net/core/net-sysfs.c
··· 2028 2028 int netdev_change_owner(struct net_device *ndev, const struct net *net_old, 2029 2029 const struct net *net_new) 2030 2030 { 2031 + kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID; 2032 + kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID; 2031 2033 struct device *dev = &ndev->dev; 2032 - kuid_t old_uid, new_uid; 2033 - kgid_t old_gid, new_gid; 2034 2034 int error; 2035 2035 2036 2036 net_ns_get_ownership(net_old, &old_uid, &old_gid);
+23 -13
net/core/skbuff.c
··· 80 80 #include <linux/indirect_call_wrapper.h> 81 81 82 82 #include "datagram.h" 83 + #include "sock_destructor.h" 83 84 84 85 struct kmem_cache *skbuff_head_cache __ro_after_init; 85 86 static struct kmem_cache *skbuff_fclone_cache __ro_after_init; ··· 1804 1803 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) 1805 1804 { 1806 1805 int delta = headroom - skb_headroom(skb); 1806 + int osize = skb_end_offset(skb); 1807 + struct sock *sk = skb->sk; 1807 1808 1808 1809 if (WARN_ONCE(delta <= 0, 1809 1810 "%s is expecting an increase in the headroom", __func__)) 1810 1811 return skb; 1811 1812 1812 - /* pskb_expand_head() might crash, if skb is shared */ 1813 - if (skb_shared(skb)) { 1813 + delta = SKB_DATA_ALIGN(delta); 1814 + /* pskb_expand_head() might crash, if skb is shared. */ 1815 + if (skb_shared(skb) || !is_skb_wmem(skb)) { 1814 1816 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1815 1817 1816 - if (likely(nskb)) { 1817 - if (skb->sk) 1818 - skb_set_owner_w(nskb, skb->sk); 1819 - consume_skb(skb); 1820 - } else { 1821 - kfree_skb(skb); 1822 - } 1818 + if (unlikely(!nskb)) 1819 + goto fail; 1820 + 1821 + if (sk) 1822 + skb_set_owner_w(nskb, sk); 1823 + consume_skb(skb); 1823 1824 skb = nskb; 1824 1825 } 1825 - if (skb && 1826 - pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { 1827 - kfree_skb(skb); 1828 - skb = NULL; 1826 + if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) 1827 + goto fail; 1828 + 1829 + if (sk && is_skb_wmem(skb)) { 1830 + delta = skb_end_offset(skb) - osize; 1831 + refcount_add(delta, &sk->sk_wmem_alloc); 1832 + skb->truesize += delta; 1829 1833 } 1830 1834 return skb; 1835 + 1836 + fail: 1837 + kfree_skb(skb); 1838 + return NULL; 1831 1839 } 1832 1840 EXPORT_SYMBOL(skb_expand_head); 1833 1841
+14
net/core/skmsg.c
··· 474 474 } 475 475 EXPORT_SYMBOL_GPL(sk_msg_recvmsg); 476 476 477 + bool sk_msg_is_readable(struct sock *sk) 478 + { 479 + struct sk_psock *psock; 480 + bool empty = true; 481 + 482 + rcu_read_lock(); 483 + psock = sk_psock(sk); 484 + if (likely(psock)) 485 + empty = list_empty(&psock->ingress_msg); 486 + rcu_read_unlock(); 487 + return !empty; 488 + } 489 + EXPORT_SYMBOL_GPL(sk_msg_is_readable); 490 + 477 491 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk, 478 492 struct sk_buff *skb) 479 493 {
+12
net/core/sock_destructor.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + #ifndef _NET_CORE_SOCK_DESTRUCTOR_H 3 + #define _NET_CORE_SOCK_DESTRUCTOR_H 4 + #include <net/tcp.h> 5 + 6 + static inline bool is_skb_wmem(const struct sk_buff *skb) 7 + { 8 + return skb->destructor == sock_wfree || 9 + skb->destructor == __sock_wfree || 10 + (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree); 11 + } 12 + #endif
+1 -1
net/core/sysctl_net_core.c
··· 419 419 .mode = 0600, 420 420 .proc_handler = proc_dolongvec_minmax_bpf_restricted, 421 421 .extra1 = &long_one, 422 - .extra2 = &long_max, 422 + .extra2 = &bpf_jit_limit_max, 423 423 }, 424 424 #endif 425 425 {
+1 -4
net/ipv4/tcp.c
··· 481 481 { 482 482 if (tcp_epollin_ready(sk, target)) 483 483 return true; 484 - 485 - if (sk->sk_prot->stream_memory_read) 486 - return sk->sk_prot->stream_memory_read(sk); 487 - return false; 484 + return sk_is_readable(sk); 488 485 } 489 486 490 487 /*
+13 -14
net/ipv4/tcp_bpf.c
··· 150 150 EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); 151 151 152 152 #ifdef CONFIG_BPF_SYSCALL 153 - static bool tcp_bpf_stream_read(const struct sock *sk) 154 - { 155 - struct sk_psock *psock; 156 - bool empty = true; 157 - 158 - rcu_read_lock(); 159 - psock = sk_psock(sk); 160 - if (likely(psock)) 161 - empty = list_empty(&psock->ingress_msg); 162 - rcu_read_unlock(); 163 - return !empty; 164 - } 165 - 166 153 static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, 167 154 long timeo) 168 155 { ··· 219 232 bool cork = false, enospc = sk_msg_full(msg); 220 233 struct sock *sk_redir; 221 234 u32 tosend, delta = 0; 235 + u32 eval = __SK_NONE; 222 236 int ret; 223 237 224 238 more_data: ··· 263 275 case __SK_REDIRECT: 264 276 sk_redir = psock->sk_redir; 265 277 sk_msg_apply_bytes(psock, tosend); 278 + if (!psock->apply_bytes) { 279 + /* Clean up before releasing the sock lock. */ 280 + eval = psock->eval; 281 + psock->eval = __SK_NONE; 282 + psock->sk_redir = NULL; 283 + } 266 284 if (psock->cork) { 267 285 cork = true; 268 286 psock->cork = NULL; 269 287 } 270 288 sk_msg_return(sk, msg, tosend); 271 289 release_sock(sk); 290 + 272 291 ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags); 292 + 293 + if (eval == __SK_REDIRECT) 294 + sock_put(sk_redir); 295 + 273 296 lock_sock(sk); 274 297 if (unlikely(ret < 0)) { 275 298 int free = sk_msg_free_nocharge(sk, msg); ··· 478 479 prot[TCP_BPF_BASE].unhash = sock_map_unhash; 479 480 prot[TCP_BPF_BASE].close = sock_map_close; 480 481 prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; 481 - prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read; 482 + prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable; 482 483 483 484 prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; 484 485 prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg;
+3
net/ipv4/udp.c
··· 2867 2867 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) 2868 2868 mask &= ~(EPOLLIN | EPOLLRDNORM); 2869 2869 2870 + /* psock ingress_msg queue should not contain any bad checksum frames */ 2871 + if (sk_is_readable(sk)) 2872 + mask |= EPOLLIN | EPOLLRDNORM; 2870 2873 return mask; 2871 2874 2872 2875 }
+1
net/ipv4/udp_bpf.c
··· 114 114 *prot = *base; 115 115 prot->close = sock_map_close; 116 116 prot->recvmsg = udp_bpf_recvmsg; 117 + prot->sock_is_readable = sk_msg_is_readable; 117 118 } 118 119 119 120 static void udp_bpf_check_v6_needs_rebuild(struct proto *ops)
+5 -4
net/mac80211/mesh.c
··· 672 672 u8 *ie, u8 ie_len) 673 673 { 674 674 struct ieee80211_supported_band *sband; 675 - const u8 *cap; 675 + const struct element *cap; 676 676 const struct ieee80211_he_operation *he_oper = NULL; 677 677 678 678 sband = ieee80211_get_sband(sdata); ··· 687 687 688 688 sdata->vif.bss_conf.he_support = true; 689 689 690 - cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ie, ie_len); 691 - if (cap && cap[1] >= ieee80211_he_oper_size(&cap[3])) 692 - he_oper = (void *)(cap + 3); 690 + cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ie_len); 691 + if (cap && cap->datalen >= 1 + sizeof(*he_oper) && 692 + cap->datalen >= 1 + ieee80211_he_oper_size(cap->data + 1)) 693 + he_oper = (void *)(cap->data + 1); 693 694 694 695 if (he_oper) 695 696 sdata->vif.bss_conf.he_oper.params =
+24 -15
net/mptcp/options.c
··· 485 485 mpext = mptcp_get_ext(skb); 486 486 data_len = mpext ? mpext->data_len : 0; 487 487 488 - /* we will check ext_copy.data_len in mptcp_write_options() to 488 + /* we will check ops->data_len in mptcp_write_options() to 489 489 * discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and 490 490 * TCPOLEN_MPTCP_MPC_ACK 491 491 */ 492 - opts->ext_copy.data_len = data_len; 492 + opts->data_len = data_len; 493 493 opts->suboptions = OPTION_MPTCP_MPC_ACK; 494 494 opts->sndr_key = subflow->local_key; 495 495 opts->rcvr_key = subflow->remote_key; ··· 505 505 len = TCPOLEN_MPTCP_MPC_ACK_DATA; 506 506 if (opts->csum_reqd) { 507 507 /* we need to propagate more info to csum the pseudo hdr */ 508 - opts->ext_copy.data_seq = mpext->data_seq; 509 - opts->ext_copy.subflow_seq = mpext->subflow_seq; 510 - opts->ext_copy.csum = mpext->csum; 508 + opts->data_seq = mpext->data_seq; 509 + opts->subflow_seq = mpext->subflow_seq; 510 + opts->csum = mpext->csum; 511 511 len += TCPOLEN_MPTCP_DSS_CHECKSUM; 512 512 } 513 513 *size = ALIGN(len, 4); ··· 1223 1223 WRITE_ONCE(msk->rcv_wnd_sent, ack_seq); 1224 1224 } 1225 1225 1226 - static u16 mptcp_make_csum(const struct mptcp_ext *mpext) 1226 + static u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __sum16 sum) 1227 1227 { 1228 1228 struct csum_pseudo_header header; 1229 1229 __wsum csum; ··· 1233 1233 * always the 64-bit value, irrespective of what length is used in the 1234 1234 * DSS option itself. 1235 1235 */ 1236 - header.data_seq = cpu_to_be64(mpext->data_seq); 1237 - header.subflow_seq = htonl(mpext->subflow_seq); 1238 - header.data_len = htons(mpext->data_len); 1236 + header.data_seq = cpu_to_be64(data_seq); 1237 + header.subflow_seq = htonl(subflow_seq); 1238 + header.data_len = htons(data_len); 1239 1239 header.csum = 0; 1240 1240 1241 - csum = csum_partial(&header, sizeof(header), ~csum_unfold(mpext->csum)); 1241 + csum = csum_partial(&header, sizeof(header), ~csum_unfold(sum)); 1242 1242 return (__force u16)csum_fold(csum); 1243 + } 1244 + 1245 + static u16 mptcp_make_csum(const struct mptcp_ext *mpext) 1246 + { 1247 + return __mptcp_make_csum(mpext->data_seq, mpext->subflow_seq, mpext->data_len, 1248 + mpext->csum); 1243 1249 } 1244 1250 1245 1251 void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, ··· 1338 1332 len = TCPOLEN_MPTCP_MPC_SYN; 1339 1333 } else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) { 1340 1334 len = TCPOLEN_MPTCP_MPC_SYNACK; 1341 - } else if (opts->ext_copy.data_len) { 1335 + } else if (opts->data_len) { 1342 1336 len = TCPOLEN_MPTCP_MPC_ACK_DATA; 1343 1337 if (opts->csum_reqd) 1344 1338 len += TCPOLEN_MPTCP_DSS_CHECKSUM; ··· 1367 1361 1368 1362 put_unaligned_be64(opts->rcvr_key, ptr); 1369 1363 ptr += 2; 1370 - if (!opts->ext_copy.data_len) 1364 + if (!opts->data_len) 1371 1365 goto mp_capable_done; 1372 1366 1373 1367 if (opts->csum_reqd) { 1374 - put_unaligned_be32(opts->ext_copy.data_len << 16 | 1375 - mptcp_make_csum(&opts->ext_copy), ptr); 1368 + put_unaligned_be32(opts->data_len << 16 | 1369 + __mptcp_make_csum(opts->data_seq, 1370 + opts->subflow_seq, 1371 + opts->data_len, 1372 + opts->csum), ptr); 1376 1373 } else { 1377 - put_unaligned_be32(opts->ext_copy.data_len << 16 | 1374 + put_unaligned_be32(opts->data_len << 16 | 1378 1375 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr); 1379 1376 } 1380 1377 ptr += 1;
+85 -54
net/sctp/sm_statefuns.c
··· 156 156 void *arg, 157 157 struct sctp_cmd_seq *commands); 158 158 159 + static enum sctp_disposition 160 + __sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep, 161 + const struct sctp_association *asoc, 162 + const union sctp_subtype type, void *arg, 163 + struct sctp_cmd_seq *commands); 164 + 159 165 /* Small helper function that checks if the chunk length 160 166 * is of the appropriate length. The 'required_length' argument 161 167 * is set to be the size of a specific chunk we are testing. ··· 343 337 if (!chunk->singleton) 344 338 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 345 339 340 + /* Make sure that the INIT chunk has a valid length. 341 + * Normally, this would cause an ABORT with a Protocol Violation 342 + * error, but since we don't have an association, we'll 343 + * just discard the packet. 344 + */ 345 + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk))) 346 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 347 + 346 348 /* If the packet is an OOTB packet which is temporarily on the 347 349 * control endpoint, respond with an ABORT. 348 350 */ ··· 364 350 */ 365 351 if (chunk->sctp_hdr->vtag != 0) 366 352 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); 367 - 368 - /* Make sure that the INIT chunk has a valid length. 369 - * Normally, this would cause an ABORT with a Protocol Violation 370 - * error, but since we don't have an association, we'll 371 - * just discard the packet. 372 - */ 373 - if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk))) 374 - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 375 353 376 354 /* If the INIT is coming toward a closing socket, we'll send back 377 355 * and ABORT. Essentially, this catches the race of INIT being ··· 710 704 struct sock *sk; 711 705 int error = 0; 712 706 707 + if (asoc && !sctp_vtag_verify(chunk, asoc)) 708 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 709 + 713 710 /* If the packet is an OOTB packet which is temporarily on the 714 711 * control endpoint, respond with an ABORT. 715 712 */ ··· 727 718 * in sctp_unpack_cookie(). 728 719 */ 729 720 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) 730 - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 721 + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 722 + commands); 731 723 732 724 /* If the endpoint is not listening or if the number of associations 733 725 * on the TCP-style socket exceed the max backlog, respond with an ··· 1534 1524 if (!chunk->singleton) 1535 1525 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 1536 1526 1527 + /* Make sure that the INIT chunk has a valid length. */ 1528 + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk))) 1529 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 1530 + 1537 1531 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification 1538 1532 * Tag. 1539 1533 */ 1540 1534 if (chunk->sctp_hdr->vtag != 0) 1541 1535 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); 1542 - 1543 - /* Make sure that the INIT chunk has a valid length. 1544 - * In this case, we generate a protocol violation since we have 1545 - * an association established. 1546 - */ 1547 - if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk))) 1548 - return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 1549 - commands); 1550 1536 1551 1537 if (SCTP_INPUT_CB(chunk->skb)->encap_port != chunk->transport->encap_port) 1552 1538 return sctp_sf_new_encap_port(net, ep, asoc, type, arg, commands); ··· 1888 1882 * its peer. 1889 1883 */ 1890 1884 if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) { 1891 - disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc, 1892 - SCTP_ST_CHUNK(chunk->chunk_hdr->type), 1893 - chunk, commands); 1885 + disposition = __sctp_sf_do_9_2_reshutack(net, ep, asoc, 1886 + SCTP_ST_CHUNK(chunk->chunk_hdr->type), 1887 + chunk, commands); 1894 1888 if (SCTP_DISPOSITION_NOMEM == disposition) 1895 1889 goto nomem; 1896 1890 ··· 2208 2202 * enough for the chunk header. Cookie length verification is 2209 2203 * done later. 2210 2204 */ 2211 - if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) 2212 - return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 2213 - commands); 2205 + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) { 2206 + if (!sctp_vtag_verify(chunk, asoc)) 2207 + asoc = NULL; 2208 + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); 2209 + } 2214 2210 2215 2211 /* "Decode" the chunk. We have no optional parameters so we 2216 2212 * are in good shape. ··· 2349 2341 */ 2350 2342 if (SCTP_ADDR_DEL == 2351 2343 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) 2352 - return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); 2344 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2353 2345 2354 2346 if (!sctp_err_chunk_valid(chunk)) 2355 2347 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); ··· 2395 2387 */ 2396 2388 if (SCTP_ADDR_DEL == 2397 2389 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) 2398 - return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); 2390 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2399 2391 2400 2392 if (!sctp_err_chunk_valid(chunk)) 2401 2393 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); ··· 2665 2657 */ 2666 2658 if (SCTP_ADDR_DEL == 2667 2659 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) 2668 - return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); 2660 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2669 2661 2670 2662 if (!sctp_err_chunk_valid(chunk)) 2671 2663 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); ··· 2978 2970 * that belong to this association, it should discard the INIT chunk and 2979 2971 * retransmit the SHUTDOWN ACK chunk. 2980 2972 */ 2981 - enum sctp_disposition sctp_sf_do_9_2_reshutack( 2982 - struct net *net, 2983 - const struct sctp_endpoint *ep, 2984 - const struct sctp_association *asoc, 2985 - const union sctp_subtype type, 2986 - void *arg, 2987 - struct sctp_cmd_seq *commands) 2973 + static enum sctp_disposition 2974 + __sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep, 2975 + const struct sctp_association *asoc, 2976 + const union sctp_subtype type, void *arg, 2977 + struct sctp_cmd_seq *commands) 2988 2978 { 2989 2979 struct sctp_chunk *chunk = arg; 2990 2980 struct sctp_chunk *reply; ··· 3014 3008 return SCTP_DISPOSITION_CONSUME; 3015 3009 nomem: 3016 3010 return SCTP_DISPOSITION_NOMEM; 3011 + } 3012 + 3013 + enum sctp_disposition 3014 + sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep, 3015 + const struct sctp_association *asoc, 3016 + const union sctp_subtype type, void *arg, 3017 + struct sctp_cmd_seq *commands) 3018 + { 3019 + struct sctp_chunk *chunk = arg; 3020 + 3021 + if (!chunk->singleton) 3022 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3023 + 3024 + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk))) 3025 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3026 + 3027 + if (chunk->sctp_hdr->vtag != 0) 3028 + return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); 3029 + 3030 + return __sctp_sf_do_9_2_reshutack(net, ep, asoc, type, arg, commands); 3017 3031 } 3018 3032 3019 3033 /* ··· 3688 3662 3689 3663 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); 3690 3664 3665 + if (asoc && !sctp_vtag_verify(chunk, asoc)) 3666 + asoc = NULL; 3667 + 3691 3668 ch = (struct sctp_chunkhdr *)chunk->chunk_hdr; 3692 3669 do { 3693 3670 /* Report violation if the chunk is less then minimal */ ··· 3806 3777 3807 3778 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 3808 3779 3809 - /* If the chunk length is invalid, we don't want to process 3810 - * the reset of the packet. 3811 - */ 3812 - if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) 3813 - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3814 - 3815 3780 /* We need to discard the rest of the packet to prevent 3816 3781 * potential boomming attacks from additional bundled chunks. 3817 3782 * This is documented in SCTP Threats ID. ··· 3832 3809 struct sctp_cmd_seq *commands) 3833 3810 { 3834 3811 struct sctp_chunk *chunk = arg; 3812 + 3813 + if (!sctp_vtag_verify(chunk, asoc)) 3814 + asoc = NULL; 3835 3815 3836 3816 /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ 3837 3817 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) ··· 3871 3845 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3872 3846 } 3873 3847 3848 + /* Make sure that the ASCONF ADDIP chunk has a valid length. */ 3849 + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk))) 3850 + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3851 + commands); 3852 + 3874 3853 /* ADD-IP: Section 4.1.1 3875 3854 * This chunk MUST be sent in an authenticated way by using 3876 3855 * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk ··· 3884 3853 */ 3885 3854 if (!asoc->peer.asconf_capable || 3886 3855 (!net->sctp.addip_noauth && !chunk->auth)) 3887 - return sctp_sf_discard_chunk(net, ep, asoc, type, arg, 3888 - commands); 3889 - 3890 - /* Make sure that the ASCONF ADDIP chunk has a valid length. */ 3891 - if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk))) 3892 - return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3893 - commands); 3856 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3894 3857 3895 3858 hdr = (struct sctp_addiphdr *)chunk->skb->data; 3896 3859 serial = ntohl(hdr->serial); ··· 4013 3988 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4014 3989 } 4015 3990 3991 + /* Make sure that the ADDIP chunk has a valid length. */ 3992 + if (!sctp_chunk_length_valid(asconf_ack, 3993 + sizeof(struct sctp_addip_chunk))) 3994 + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3995 + commands); 3996 + 4016 3997 /* ADD-IP, Section 4.1.2: 4017 3998 * This chunk MUST be sent in an authenticated way by using 4018 3999 * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk ··· 4027 3996 */ 4028 3997 if (!asoc->peer.asconf_capable || 4029 3998 (!net->sctp.addip_noauth && !asconf_ack->auth)) 4030 - return sctp_sf_discard_chunk(net, ep, asoc, type, arg, 4031 - commands); 4032 - 4033 - /* Make sure that the ADDIP chunk has a valid length. */ 4034 - if (!sctp_chunk_length_valid(asconf_ack, 4035 - sizeof(struct sctp_addip_chunk))) 4036 - return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 4037 - commands); 3999 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4038 4000 4039 4001 addip_hdr = (struct sctp_addiphdr *)asconf_ack->skb->data; 4040 4002 rcvd_serial = ntohl(addip_hdr->serial); ··· 4599 4575 { 4600 4576 struct sctp_chunk *chunk = arg; 4601 4577 4578 + if (asoc && !sctp_vtag_verify(chunk, asoc)) 4579 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4580 + 4602 4581 /* Make sure that the chunk has a valid length. 4603 4582 * Since we don't know the chunk type, we use a general 4604 4583 * chunkhdr structure to make a comparison. ··· 4668 4641 struct sctp_cmd_seq *commands) 4669 4642 { 4670 4643 struct sctp_chunk *chunk = arg; 4644 + 4645 + if (!sctp_vtag_verify(chunk, asoc)) 4646 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4671 4647 4672 4648 /* Make sure that the chunk has a valid length. */ 4673 4649 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) ··· 6378 6348 * yet. 6379 6349 */ 6380 6350 switch (chunk->chunk_hdr->type) { 6351 + case SCTP_CID_INIT: 6381 6352 case SCTP_CID_INIT_ACK: 6382 6353 { 6383 6354 struct sctp_initack_chunk *initack;
+1 -1
net/smc/af_smc.c
··· 1185 1185 if (smc->clcsock->sk->sk_err) { 1186 1186 smc->sk.sk_err = smc->clcsock->sk->sk_err; 1187 1187 } else if ((1 << smc->clcsock->sk->sk_state) & 1188 - (TCPF_SYN_SENT | TCP_SYN_RECV)) { 1188 + (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 1189 1189 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo); 1190 1190 if ((rc == -EPIPE) && 1191 1191 ((1 << smc->clcsock->sk->sk_state) &
+1 -1
net/smc/smc_llc.c
··· 2154 2154 link->smcibdev->ibdev->name, link->ibport); 2155 2155 link->state = SMC_LNK_ACTIVE; 2156 2156 if (link->lgr->llc_testlink_time) { 2157 - link->llc_testlink_time = link->lgr->llc_testlink_time * HZ; 2157 + link->llc_testlink_time = link->lgr->llc_testlink_time; 2158 2158 schedule_delayed_work(&link->llc_testlink_wrk, 2159 2159 link->llc_testlink_time); 2160 2160 }
+21 -11
net/tipc/crypto.c
··· 2285 2285 u16 key_gen = msg_key_gen(hdr); 2286 2286 u16 size = msg_data_sz(hdr); 2287 2287 u8 *data = msg_data(hdr); 2288 + unsigned int keylen; 2289 + 2290 + /* Verify whether the size can exist in the packet */ 2291 + if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) { 2292 + pr_debug("%s: message data size is too small\n", rx->name); 2293 + goto exit; 2294 + } 2295 + 2296 + keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME))); 2297 + 2298 + /* Verify the supplied size values */ 2299 + if (unlikely(size != keylen + sizeof(struct tipc_aead_key) || 2300 + keylen > TIPC_AEAD_KEY_SIZE_MAX)) { 2301 + pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name); 2302 + goto exit; 2303 + } 2288 2304 2289 2305 spin_lock(&rx->lock); 2290 2306 if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) { 2291 2307 pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name, 2292 2308 rx->skey, key_gen, rx->key_gen); 2293 - goto exit; 2309 + goto exit_unlock; 2294 2310 } 2295 2311 2296 2312 /* Allocate memory for the key */ 2297 2313 skey = kmalloc(size, GFP_ATOMIC); 2298 2314 if (unlikely(!skey)) { 2299 2315 pr_err("%s: unable to allocate memory for skey\n", rx->name); 2300 - goto exit; 2316 + goto exit_unlock; 2301 2317 } 2302 2318 2303 2319 /* Copy key from msg data */ 2304 - skey->keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME))); 2320 + skey->keylen = keylen; 2305 2321 memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME); 2306 2322 memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32), 2307 2323 skey->keylen); 2308 - 2309 - /* Sanity check */ 2310 - if (unlikely(size != tipc_aead_key_size(skey))) { 2311 - kfree(skey); 2312 - skey = NULL; 2313 - goto exit; 2314 - } 2315 2324 2316 2325 rx->key_gen = key_gen; 2317 2326 rx->skey_mode = msg_key_mode(hdr); ··· 2328 2319 rx->nokey = 0; 2329 2320 mb(); /* for nokey flag */ 2330 2321 2331 - exit: 2322 + exit_unlock: 2332 2323 spin_unlock(&rx->lock); 2333 2324 2325 + exit: 2334 2326 /* Schedule the key attaching on this crypto */ 2335 2327 if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0))) 2336 2328 return true;
+2 -2
net/tls/tls_main.c
··· 769 769 770 770 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; 771 771 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; 772 - prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; 772 + prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable; 773 773 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; 774 774 775 775 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; 776 776 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; 777 - prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; 777 + prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable; 778 778 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; 779 779 780 780 #ifdef CONFIG_TLS_DEVICE
+15 -6
net/tls/tls_sw.c
··· 35 35 * SOFTWARE. 36 36 */ 37 37 38 + #include <linux/bug.h> 38 39 #include <linux/sched/signal.h> 39 40 #include <linux/module.h> 40 41 #include <linux/splice.h> ··· 43 42 44 43 #include <net/strparser.h> 45 44 #include <net/tls.h> 45 + 46 + noinline void tls_err_abort(struct sock *sk, int err) 47 + { 48 + WARN_ON_ONCE(err >= 0); 49 + /* sk->sk_err should contain a positive error code. */ 50 + sk->sk_err = -err; 51 + sk_error_report(sk); 52 + } 46 53 47 54 static int __skb_nsg(struct sk_buff *skb, int offset, int len, 48 55 unsigned int recursion_level) ··· 428 419 429 420 tx_err: 430 421 if (rc < 0 && rc != -EAGAIN) 431 - tls_err_abort(sk, EBADMSG); 422 + tls_err_abort(sk, -EBADMSG); 432 423 433 424 return rc; 434 425 } ··· 459 450 460 451 /* If err is already set on socket, return the same code */ 461 452 if (sk->sk_err) { 462 - ctx->async_wait.err = sk->sk_err; 453 + ctx->async_wait.err = -sk->sk_err; 463 454 } else { 464 455 ctx->async_wait.err = err; 465 456 tls_err_abort(sk, err); ··· 778 769 msg_pl->sg.size + prot->tail_size, i); 779 770 if (rc < 0) { 780 771 if (rc != -EINPROGRESS) { 781 - tls_err_abort(sk, EBADMSG); 772 + tls_err_abort(sk, -EBADMSG); 782 773 if (split) { 783 774 tls_ctx->pending_open_record_frags = true; 784 775 tls_merge_open_record(sk, rec, tmp, orig_end); ··· 1848 1839 err = decrypt_skb_update(sk, skb, &msg->msg_iter, 1849 1840 &chunk, &zc, async_capable); 1850 1841 if (err < 0 && err != -EINPROGRESS) { 1851 - tls_err_abort(sk, EBADMSG); 1842 + tls_err_abort(sk, -EBADMSG); 1852 1843 goto recv_end; 1853 1844 } 1854 1845 ··· 2028 2019 } 2029 2020 2030 2021 if (err < 0) { 2031 - tls_err_abort(sk, EBADMSG); 2022 + tls_err_abort(sk, -EBADMSG); 2032 2023 goto splice_read_end; 2033 2024 } 2034 2025 ctx->decrypted = 1; ··· 2047 2038 return copied ? : err; 2048 2039 } 2049 2040 2050 - bool tls_sw_stream_read(const struct sock *sk) 2041 + bool tls_sw_sock_is_readable(struct sock *sk) 2051 2042 { 2052 2043 struct tls_context *tls_ctx = tls_get_ctx(sk); 2053 2044 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+4
net/unix/af_unix.c
··· 3052 3052 /* readable? */ 3053 3053 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 3054 3054 mask |= EPOLLIN | EPOLLRDNORM; 3055 + if (sk_is_readable(sk)) 3056 + mask |= EPOLLIN | EPOLLRDNORM; 3055 3057 3056 3058 /* Connection-based need to check for termination and startup */ 3057 3059 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && ··· 3092 3090 3093 3091 /* readable? */ 3094 3092 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 3093 + mask |= EPOLLIN | EPOLLRDNORM; 3094 + if (sk_is_readable(sk)) 3095 3095 mask |= EPOLLIN | EPOLLRDNORM; 3096 3096 3097 3097 /* Connection-based need to check for termination and startup */
+2
net/unix/unix_bpf.c
··· 102 102 *prot = *base; 103 103 prot->close = sock_map_close; 104 104 prot->recvmsg = unix_bpf_recvmsg; 105 + prot->sock_is_readable = sk_msg_is_readable; 105 106 } 106 107 107 108 static void unix_stream_bpf_rebuild_protos(struct proto *prot, ··· 111 110 *prot = *base; 112 111 prot->close = sock_map_close; 113 112 prot->recvmsg = unix_bpf_recvmsg; 113 + prot->sock_is_readable = sk_msg_is_readable; 114 114 prot->unhash = sock_map_unhash; 115 115 } 116 116
+1 -1
net/wireless/core.c
··· 524 524 INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk); 525 525 INIT_WORK(&rdev->mgmt_registrations_update_wk, 526 526 cfg80211_mgmt_registrations_update_wk); 527 + spin_lock_init(&rdev->mgmt_registrations_lock); 527 528 528 529 #ifdef CONFIG_CFG80211_DEFAULT_PS 529 530 rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; ··· 1290 1289 INIT_LIST_HEAD(&wdev->event_list); 1291 1290 spin_lock_init(&wdev->event_lock); 1292 1291 INIT_LIST_HEAD(&wdev->mgmt_registrations); 1293 - spin_lock_init(&wdev->mgmt_registrations_lock); 1294 1292 INIT_LIST_HEAD(&wdev->pmsr_list); 1295 1293 spin_lock_init(&wdev->pmsr_lock); 1296 1294 INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk);
+2
net/wireless/core.h
··· 100 100 struct work_struct propagate_cac_done_wk; 101 101 102 102 struct work_struct mgmt_registrations_update_wk; 103 + /* lock for all wdev lists */ 104 + spinlock_t mgmt_registrations_lock; 103 105 104 106 /* must be last because of the way we do wiphy_priv(), 105 107 * and it should at least be aligned to NETDEV_ALIGN */
+14 -12
net/wireless/mlme.c
··· 452 452 453 453 lockdep_assert_held(&rdev->wiphy.mtx); 454 454 455 - spin_lock_bh(&wdev->mgmt_registrations_lock); 455 + spin_lock_bh(&rdev->mgmt_registrations_lock); 456 456 if (!wdev->mgmt_registrations_need_update) { 457 - spin_unlock_bh(&wdev->mgmt_registrations_lock); 457 + spin_unlock_bh(&rdev->mgmt_registrations_lock); 458 458 return; 459 459 } 460 460 ··· 479 479 rcu_read_unlock(); 480 480 481 481 wdev->mgmt_registrations_need_update = 0; 482 - spin_unlock_bh(&wdev->mgmt_registrations_lock); 482 + spin_unlock_bh(&rdev->mgmt_registrations_lock); 483 483 484 484 rdev_update_mgmt_frame_registrations(rdev, wdev, &upd); 485 485 } ··· 503 503 int match_len, bool multicast_rx, 504 504 struct netlink_ext_ack *extack) 505 505 { 506 + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 506 507 struct cfg80211_mgmt_registration *reg, *nreg; 507 508 int err = 0; 508 509 u16 mgmt_type; ··· 549 548 if (!nreg) 550 549 return -ENOMEM; 551 550 552 - spin_lock_bh(&wdev->mgmt_registrations_lock); 551 + spin_lock_bh(&rdev->mgmt_registrations_lock); 553 552 554 553 list_for_each_entry(reg, &wdev->mgmt_registrations, list) { 555 554 int mlen = min(match_len, reg->match_len); ··· 584 583 list_add(&nreg->list, &wdev->mgmt_registrations); 585 584 } 586 585 wdev->mgmt_registrations_need_update = 1; 587 - spin_unlock_bh(&wdev->mgmt_registrations_lock); 586 + spin_unlock_bh(&rdev->mgmt_registrations_lock); 588 587 589 588 cfg80211_mgmt_registrations_update(wdev); 590 589 ··· 592 591 593 592 out: 594 593 kfree(nreg); 595 - spin_unlock_bh(&wdev->mgmt_registrations_lock); 594 + spin_unlock_bh(&rdev->mgmt_registrations_lock); 596 595 597 596 return err; 598 597 } ··· 603 602 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); 604 603 struct cfg80211_mgmt_registration *reg, *tmp; 605 604 606 - spin_lock_bh(&wdev->mgmt_registrations_lock); 605 + spin_lock_bh(&rdev->mgmt_registrations_lock); 607 606 608 607 list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) { 609 608 if (reg->nlportid != nlportid) ··· 616 615 schedule_work(&rdev->mgmt_registrations_update_wk); 617 616 } 618 617 619 - spin_unlock_bh(&wdev->mgmt_registrations_lock); 618 + spin_unlock_bh(&rdev->mgmt_registrations_lock); 620 619 621 620 if (nlportid && rdev->crit_proto_nlportid == nlportid) { 622 621 rdev->crit_proto_nlportid = 0; ··· 629 628 630 629 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev) 631 630 { 631 + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 632 632 struct cfg80211_mgmt_registration *reg, *tmp; 633 633 634 - spin_lock_bh(&wdev->mgmt_registrations_lock); 634 + spin_lock_bh(&rdev->mgmt_registrations_lock); 635 635 list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) { 636 636 list_del(&reg->list); 637 637 kfree(reg); 638 638 } 639 639 wdev->mgmt_registrations_need_update = 1; 640 - spin_unlock_bh(&wdev->mgmt_registrations_lock); 640 + spin_unlock_bh(&rdev->mgmt_registrations_lock); 641 641 642 642 cfg80211_mgmt_registrations_update(wdev); 643 643 } ··· 786 784 data = buf + ieee80211_hdrlen(mgmt->frame_control); 787 785 data_len = len - ieee80211_hdrlen(mgmt->frame_control); 788 786 789 - spin_lock_bh(&wdev->mgmt_registrations_lock); 787 + spin_lock_bh(&rdev->mgmt_registrations_lock); 790 788 791 789 list_for_each_entry(reg, &wdev->mgmt_registrations, list) { 792 790 if (reg->frame_type != ftype) ··· 810 808 break; 811 809 } 812 810 813 - spin_unlock_bh(&wdev->mgmt_registrations_lock); 811 + spin_unlock_bh(&rdev->mgmt_registrations_lock); 814 812 815 813 trace_cfg80211_return_bool(result); 816 814 return result;
+5 -2
net/wireless/scan.c
··· 418 418 } 419 419 ssid_len = ssid[1]; 420 420 ssid = ssid + 2; 421 - rcu_read_unlock(); 422 421 423 422 /* check if nontrans_bss is in the list */ 424 423 list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) { 425 - if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) 424 + if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) { 425 + rcu_read_unlock(); 426 426 return 0; 427 + } 427 428 } 429 + 430 + rcu_read_unlock(); 428 431 429 432 /* add to the list */ 430 433 list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list);
+7 -7
net/wireless/util.c
··· 1030 1030 !(rdev->wiphy.interface_modes & (1 << ntype))) 1031 1031 return -EOPNOTSUPP; 1032 1032 1033 - /* if it's part of a bridge, reject changing type to station/ibss */ 1034 - if (netif_is_bridge_port(dev) && 1035 - (ntype == NL80211_IFTYPE_ADHOC || 1036 - ntype == NL80211_IFTYPE_STATION || 1037 - ntype == NL80211_IFTYPE_P2P_CLIENT)) 1038 - return -EBUSY; 1039 - 1040 1033 if (ntype != otype) { 1034 + /* if it's part of a bridge, reject changing type to station/ibss */ 1035 + if (netif_is_bridge_port(dev) && 1036 + (ntype == NL80211_IFTYPE_ADHOC || 1037 + ntype == NL80211_IFTYPE_STATION || 1038 + ntype == NL80211_IFTYPE_P2P_CLIENT)) 1039 + return -EBUSY; 1040 + 1041 1041 dev->ieee80211_ptr->use_4addr = false; 1042 1042 dev->ieee80211_ptr->mesh_id_up_len = 0; 1043 1043 wdev_lock(dev->ieee80211_ptr);
+20 -55
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
··· 949 949 int err, n; 950 950 u32 key; 951 951 char b; 952 - int retries = 100; 953 952 954 953 zero_verdict_count(verd_mapfd); 955 954 ··· 1001 1002 goto close_peer1; 1002 1003 if (pass != 1) 1003 1004 FAIL("%s: want pass count 1, have %d", log_prefix, pass); 1004 - again: 1005 - n = read(c0, &b, 1); 1006 - if (n < 0) { 1007 - if (errno == EAGAIN && retries--) { 1008 - usleep(1000); 1009 - goto again; 1010 - } 1011 - FAIL_ERRNO("%s: read", log_prefix); 1012 - } 1005 + n = recv_timeout(c0, &b, 1, 0, IO_TIMEOUT_SEC); 1006 + if (n < 0) 1007 + FAIL_ERRNO("%s: recv_timeout", log_prefix); 1013 1008 if (n == 0) 1014 - FAIL("%s: incomplete read", log_prefix); 1009 + FAIL("%s: incomplete recv", log_prefix); 1015 1010 1016 1011 close_peer1: 1017 1012 xclose(p1); ··· 1564 1571 const char *log_prefix = redir_mode_str(mode); 1565 1572 int c0, c1, p0, p1; 1566 1573 unsigned int pass; 1567 - int retries = 100; 1568 1574 int err, n; 1569 1575 int sfd[2]; 1570 1576 u32 key; ··· 1598 1606 if (pass != 1) 1599 1607 FAIL("%s: want pass count 1, have %d", log_prefix, pass); 1600 1608 1601 - again: 1602 - n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1); 1603 - if (n < 0) { 1604 - if (errno == EAGAIN && retries--) { 1605 - usleep(1000); 1606 - goto again; 1607 - } 1608 - FAIL_ERRNO("%s: read", log_prefix); 1609 - } 1609 + n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC); 1610 + if (n < 0) 1611 + FAIL_ERRNO("%s: recv_timeout", log_prefix); 1610 1612 if (n == 0) 1611 - FAIL("%s: incomplete read", log_prefix); 1613 + FAIL("%s: incomplete recv", log_prefix); 1612 1614 1613 1615 close: 1614 1616 xclose(c1); ··· 1734 1748 const char *log_prefix = redir_mode_str(mode); 1735 1749 int c0, c1, p0, p1; 1736 1750 unsigned int pass; 1737 - int retries = 100; 1738 1751 int err, n; 1739 1752 u32 key; 1740 1753 char b; ··· 1766 1781 if (pass != 1) 1767 1782 FAIL("%s: want pass count 1, have %d", log_prefix, pass); 1768 1783 1769 - again: 1770 - n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1); 1771 - if (n < 0) { 1772 - if (errno == EAGAIN && retries--) { 1773 - usleep(1000); 1774 - goto again; 1775 - } 1776 - FAIL_ERRNO("%s: read", log_prefix); 1777 - } 1784 + n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC); 1785 + if (n < 0) 1786 + FAIL_ERRNO("%s: recv_timeout", log_prefix); 1778 1787 if (n == 0) 1779 - FAIL("%s: incomplete read", log_prefix); 1788 + FAIL("%s: incomplete recv", log_prefix); 1780 1789 1781 1790 close_cli1: 1782 1791 xclose(c1); ··· 1820 1841 const char *log_prefix = redir_mode_str(mode); 1821 1842 int c0, c1, p0, p1; 1822 1843 unsigned int pass; 1823 - int retries = 100; 1824 1844 int err, n; 1825 1845 int sfd[2]; 1826 1846 u32 key; ··· 1854 1876 if (pass != 1) 1855 1877 FAIL("%s: want pass count 1, have %d", log_prefix, pass); 1856 1878 1857 - again: 1858 - n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1); 1859 - if (n < 0) { 1860 - if (errno == EAGAIN && retries--) { 1861 - usleep(1000); 1862 - goto again; 1863 - } 1864 - FAIL_ERRNO("%s: read", log_prefix); 1865 - } 1879 + n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC); 1880 + if (n < 0) 1881 + FAIL_ERRNO("%s: recv_timeout", log_prefix); 1866 1882 if (n == 0) 1867 - FAIL("%s: incomplete read", log_prefix); 1883 + FAIL("%s: incomplete recv", log_prefix); 1868 1884 1869 1885 close_cli1: 1870 1886 xclose(c1); ··· 1904 1932 int sfd[2]; 1905 1933 u32 key; 1906 1934 char b; 1907 - int retries = 100; 1908 1935 1909 1936 zero_verdict_count(verd_mapfd); 1910 1937 ··· 1934 1963 if (pass != 1) 1935 1964 FAIL("%s: want pass count 1, have %d", log_prefix, pass); 1936 1965 1937 - again: 1938 - n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1); 1939 - if (n < 0) { 1940 - if (errno == EAGAIN && retries--) { 1941 - usleep(1000); 1942 - goto again; 1943 - } 1944 - FAIL_ERRNO("%s: read", log_prefix); 1945 - } 1966 + n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC); 1967 + if (n < 0) 1968 + FAIL_ERRNO("%s: recv_timeout", log_prefix); 1946 1969 if (n == 0) 1947 - FAIL("%s: incomplete read", log_prefix); 1970 + FAIL("%s: incomplete recv", log_prefix); 1948 1971 1949 1972 close: 1950 1973 xclose(c1);
+3
tools/testing/selftests/net/fcnal-test.sh
··· 445 445 ip -netns ${NSA} link set dev ${NSA_DEV} down 446 446 ip -netns ${NSA} link del dev ${NSA_DEV} 447 447 448 + ip netns pids ${NSA} | xargs kill 2>/dev/null 448 449 ip netns del ${NSA} 449 450 fi 450 451 452 + ip netns pids ${NSB} | xargs kill 2>/dev/null 451 453 ip netns del ${NSB} 454 + ip netns pids ${NSC} | xargs kill 2>/dev/null 452 455 ip netns del ${NSC} >/dev/null 2>&1 453 456 } 454 457