Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts were easy to resolve using immediate context mostly,
except the cls_u32.c one where I simply too the entire HEAD
chunk.

Signed-off-by: David S. Miller <davem@davemloft.net>

+1420 -686
+5
Documentation/driver-api/fpga/fpga-mgr.rst
··· 184 184 API for programming an FPGA 185 185 --------------------------- 186 186 187 + FPGA Manager flags 188 + 189 + .. kernel-doc:: include/linux/fpga/fpga-mgr.h 190 + :doc: FPGA Manager flags 191 + 187 192 .. kernel-doc:: include/linux/fpga/fpga-mgr.h 188 193 :functions: fpga_image_info 189 194
+3 -2
MAINTAINERS
··· 9680 9680 M: Jiaxun Yang <jiaxun.yang@flygoat.com> 9681 9681 L: linux-mips@linux-mips.org 9682 9682 S: Maintained 9683 - F: arch/mips/loongson64/*{2e/2f}* 9683 + F: arch/mips/loongson64/fuloong-2e/ 9684 + F: arch/mips/loongson64/lemote-2f/ 9684 9685 F: arch/mips/include/asm/mach-loongson64/ 9685 9686 F: drivers/*/*loongson2* 9686 9687 F: drivers/*/*/*loongson2* ··· 9888 9887 S: Maintained 9889 9888 F: Documentation/ABI/testing/sysfs-class-mux* 9890 9889 F: Documentation/devicetree/bindings/mux/ 9891 - F: include/linux/dt-bindings/mux/ 9890 + F: include/dt-bindings/mux/ 9892 9891 F: include/linux/mux/ 9893 9892 F: drivers/mux/ 9894 9893
+6 -4
Makefile
··· 2 2 VERSION = 4 3 3 PATCHLEVEL = 19 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc6 5 + EXTRAVERSION = -rc7 6 6 NAME = Merciless Moray 7 7 8 8 # *DOCUMENTATION* ··· 483 483 ifeq ($(cc-name),clang) 484 484 ifneq ($(CROSS_COMPILE),) 485 485 CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) 486 - GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..) 486 + GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD))) 487 + CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR) 488 + GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) 487 489 endif 488 490 ifneq ($(GCC_TOOLCHAIN),) 489 491 CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN) 490 492 endif 491 - KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 492 - KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 493 + KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX) 494 + KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX) 493 495 KBUILD_CFLAGS += $(call cc-option, -no-integrated-as) 494 496 KBUILD_AFLAGS += $(call cc-option, -no-integrated-as) 495 497 endif
+1 -1
arch/arc/Kconfig
··· 149 149 Support for ARC770 core introduced with Rel 4.10 (Summer 2011) 150 150 This core has a bunch of cool new features: 151 151 -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4) 152 - Shared Address Spaces (for sharing TLB entires in MMU) 152 + Shared Address Spaces (for sharing TLB entries in MMU) 153 153 -Caches: New Prog Model, Region Flush 154 154 -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr 155 155
+2 -24
arch/arc/Makefile
··· 6 6 # published by the Free Software Foundation. 7 7 # 8 8 9 - ifeq ($(CROSS_COMPILE),) 10 - ifndef CONFIG_CPU_BIG_ENDIAN 11 - CROSS_COMPILE := arc-linux- 12 - else 13 - CROSS_COMPILE := arceb-linux- 14 - endif 15 - endif 16 - 17 9 KBUILD_DEFCONFIG := nsim_700_defconfig 18 10 19 11 cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ 20 12 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 21 - cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs 22 - 23 - is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0) 24 - 25 - ifdef CONFIG_ISA_ARCOMPACT 26 - ifeq ($(is_700), 0) 27 - $(error Toolchain not configured for ARCompact builds) 28 - endif 29 - endif 30 - 31 - ifdef CONFIG_ISA_ARCV2 32 - ifeq ($(is_700), 1) 33 - $(error Toolchain not configured for ARCv2 builds) 34 - endif 35 - endif 13 + cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38 36 14 37 15 ifdef CONFIG_ARC_CURR_IN_REG 38 16 # For a global register defintion, make sure it gets passed to every file ··· 57 79 cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian 58 80 ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB 59 81 60 - LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) 82 + LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name) 61 83 62 84 # Modules with short calls might break for calls into builtin-kernel 63 85 KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
+20
arch/arc/kernel/process.c
··· 241 241 task_thread_info(current)->thr_ptr; 242 242 } 243 243 244 + 245 + /* 246 + * setup usermode thread pointer #1: 247 + * when child is picked by scheduler, __switch_to() uses @c_callee to 248 + * populate usermode callee regs: this works (despite being in a kernel 249 + * function) since special return path for child @ret_from_fork() 250 + * ensures those regs are not clobbered all the way to RTIE to usermode 251 + */ 252 + c_callee->r25 = task_thread_info(p)->thr_ptr; 253 + 254 + #ifdef CONFIG_ARC_CURR_IN_REG 255 + /* 256 + * setup usermode thread pointer #2: 257 + * however for this special use of r25 in kernel, __switch_to() sets 258 + * r25 for kernel needs and only in the final return path is usermode 259 + * r25 setup, from pt_regs->user_r25. So set that up as well 260 + */ 261 + c_regs->user_r25 = c_callee->r25; 262 + #endif 263 + 244 264 return 0; 245 265 } 246 266
+11
arch/arm/boot/dts/imx53-qsb-common.dtsi
··· 123 123 }; 124 124 }; 125 125 126 + &cpu0 { 127 + /* CPU rated to 1GHz, not 1.2GHz as per the default settings */ 128 + operating-points = < 129 + /* kHz uV */ 130 + 166666 850000 131 + 400000 900000 132 + 800000 1050000 133 + 1000000 1200000 134 + >; 135 + }; 136 + 126 137 &esdhc1 { 127 138 pinctrl-names = "default"; 128 139 pinctrl-0 = <&pinctrl_esdhc1>;
+2
arch/arm/kernel/vmlinux.lds.h
··· 49 49 #define ARM_DISCARD \ 50 50 *(.ARM.exidx.exit.text) \ 51 51 *(.ARM.extab.exit.text) \ 52 + *(.ARM.exidx.text.exit) \ 53 + *(.ARM.extab.text.exit) \ 52 54 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \ 53 55 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \ 54 56 ARM_EXIT_DISCARD(EXIT_TEXT) \
+5 -5
arch/mips/include/asm/processor.h
··· 13 13 14 14 #include <linux/atomic.h> 15 15 #include <linux/cpumask.h> 16 + #include <linux/sizes.h> 16 17 #include <linux/threads.h> 17 18 18 19 #include <asm/cachectl.h> ··· 81 80 82 81 #endif 83 82 84 - /* 85 - * One page above the stack is used for branch delay slot "emulation". 86 - * See dsemul.c for details. 87 - */ 88 - #define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE) 83 + #define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M) 84 + 85 + extern unsigned long mips_stack_top(void); 86 + #define STACK_TOP mips_stack_top() 89 87 90 88 /* 91 89 * This decides where the kernel will search for a free chunk of vm
+25
arch/mips/kernel/process.c
··· 32 32 #include <linux/nmi.h> 33 33 #include <linux/cpu.h> 34 34 35 + #include <asm/abi.h> 35 36 #include <asm/asm.h> 36 37 #include <asm/bootinfo.h> 37 38 #include <asm/cpu.h> ··· 40 39 #include <asm/dsp.h> 41 40 #include <asm/fpu.h> 42 41 #include <asm/irq.h> 42 + #include <asm/mips-cps.h> 43 43 #include <asm/msa.h> 44 44 #include <asm/pgtable.h> 45 45 #include <asm/mipsregs.h> ··· 645 643 646 644 out: 647 645 return pc; 646 + } 647 + 648 + unsigned long mips_stack_top(void) 649 + { 650 + unsigned long top = TASK_SIZE & PAGE_MASK; 651 + 652 + /* One page for branch delay slot "emulation" */ 653 + top -= PAGE_SIZE; 654 + 655 + /* Space for the VDSO, data page & GIC user page */ 656 + top -= PAGE_ALIGN(current->thread.abi->vdso->size); 657 + top -= PAGE_SIZE; 658 + top -= mips_gic_present() ? PAGE_SIZE : 0; 659 + 660 + /* Space for cache colour alignment */ 661 + if (cpu_has_dc_aliases) 662 + top -= shm_align_mask + 1; 663 + 664 + /* Space to randomize the VDSO base */ 665 + if (current->flags & PF_RANDOMIZE) 666 + top -= VDSO_RANDOMIZE_SIZE; 667 + 668 + return top; 648 669 } 649 670 650 671 /*
+28 -20
arch/mips/kernel/setup.c
··· 846 846 struct memblock_region *reg; 847 847 extern void plat_mem_setup(void); 848 848 849 + /* 850 + * Initialize boot_command_line to an innocuous but non-empty string in 851 + * order to prevent early_init_dt_scan_chosen() from copying 852 + * CONFIG_CMDLINE into it without our knowledge. We handle 853 + * CONFIG_CMDLINE ourselves below & don't want to duplicate its 854 + * content because repeating arguments can be problematic. 855 + */ 856 + strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE); 857 + 858 + /* call board setup routine */ 859 + plat_mem_setup(); 860 + 861 + /* 862 + * Make sure all kernel memory is in the maps. The "UP" and 863 + * "DOWN" are opposite for initdata since if it crosses over 864 + * into another memory section you don't want that to be 865 + * freed when the initdata is freed. 866 + */ 867 + arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT, 868 + PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT, 869 + BOOT_MEM_RAM); 870 + arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT, 871 + PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT, 872 + BOOT_MEM_INIT_RAM); 873 + 874 + pr_info("Determined physical RAM map:\n"); 875 + print_memory_map(); 876 + 849 877 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) 850 878 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 851 879 #else ··· 901 873 } 902 874 #endif 903 875 #endif 904 - 905 - /* call board setup routine */ 906 - plat_mem_setup(); 907 - 908 - /* 909 - * Make sure all kernel memory is in the maps. The "UP" and 910 - * "DOWN" are opposite for initdata since if it crosses over 911 - * into another memory section you don't want that to be 912 - * freed when the initdata is freed. 913 - */ 914 - arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT, 915 - PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT, 916 - BOOT_MEM_RAM); 917 - arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT, 918 - PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT, 919 - BOOT_MEM_INIT_RAM); 920 - 921 - pr_info("Determined physical RAM map:\n"); 922 - print_memory_map(); 923 - 924 876 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 925 877 926 878 *cmdline_p = command_line;
+17 -1
arch/mips/kernel/vdso.c
··· 15 15 #include <linux/ioport.h> 16 16 #include <linux/kernel.h> 17 17 #include <linux/mm.h> 18 + #include <linux/random.h> 18 19 #include <linux/sched.h> 19 20 #include <linux/slab.h> 20 21 #include <linux/timekeeper_internal.h> ··· 98 97 } 99 98 } 100 99 100 + static unsigned long vdso_base(void) 101 + { 102 + unsigned long base; 103 + 104 + /* Skip the delay slot emulation page */ 105 + base = STACK_TOP + PAGE_SIZE; 106 + 107 + if (current->flags & PF_RANDOMIZE) { 108 + base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1); 109 + base = PAGE_ALIGN(base); 110 + } 111 + 112 + return base; 113 + } 114 + 101 115 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 102 116 { 103 117 struct mips_vdso_image *image = current->thread.abi->vdso; ··· 153 137 if (cpu_has_dc_aliases) 154 138 size += shm_align_mask + 1; 155 139 156 - base = get_unmapped_area(NULL, 0, size, 0, 0); 140 + base = get_unmapped_area(NULL, vdso_base(), size, 0, 0); 157 141 if (IS_ERR_VALUE(base)) { 158 142 ret = base; 159 143 goto out;
+3 -1
arch/mips/lib/memset.S
··· 280 280 * unset_bytes = end_addr - current_addr + 1 281 281 * a2 = t1 - a0 + 1 282 282 */ 283 + .set reorder 283 284 PTR_SUBU a2, t1, a0 285 + PTR_ADDIU a2, 1 284 286 jr ra 285 - PTR_ADDIU a2, 1 287 + .set noreorder 286 288 287 289 .endm 288 290
+10
arch/powerpc/kernel/process.c
··· 1306 1306 1307 1307 pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int)); 1308 1308 1309 + /* 1310 + * Make sure the NIP points at userspace, not kernel text/data or 1311 + * elsewhere. 1312 + */ 1313 + if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) { 1314 + pr_info("%s[%d]: Bad NIP, not dumping instructions.\n", 1315 + current->comm, current->pid); 1316 + return; 1317 + } 1318 + 1309 1319 pr_info("%s[%d]: code: ", current->comm, current->pid); 1310 1320 1311 1321 for (i = 0; i < instructions_to_print; i++) {
+12 -8
arch/powerpc/lib/code-patching.c
··· 28 28 { 29 29 int err; 30 30 31 - /* Make sure we aren't patching a freed init section */ 32 - if (init_mem_is_free && init_section_contains(exec_addr, 4)) { 33 - pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr); 34 - return 0; 35 - } 36 - 37 31 __put_user_size(instr, patch_addr, 4, err); 38 32 if (err) 39 33 return err; ··· 142 148 return 0; 143 149 } 144 150 145 - int patch_instruction(unsigned int *addr, unsigned int instr) 151 + static int do_patch_instruction(unsigned int *addr, unsigned int instr) 146 152 { 147 153 int err; 148 154 unsigned int *patch_addr = NULL; ··· 182 188 } 183 189 #else /* !CONFIG_STRICT_KERNEL_RWX */ 184 190 185 - int patch_instruction(unsigned int *addr, unsigned int instr) 191 + static int do_patch_instruction(unsigned int *addr, unsigned int instr) 186 192 { 187 193 return raw_patch_instruction(addr, instr); 188 194 } 189 195 190 196 #endif /* CONFIG_STRICT_KERNEL_RWX */ 197 + 198 + int patch_instruction(unsigned int *addr, unsigned int instr) 199 + { 200 + /* Make sure we aren't patching a freed init section */ 201 + if (init_mem_is_free && init_section_contains(addr, 4)) { 202 + pr_debug("Skipping init section patching addr: 0x%px\n", addr); 203 + return 0; 204 + } 205 + return do_patch_instruction(addr, instr); 206 + } 191 207 NOKPROBE_SYMBOL(patch_instruction); 192 208 193 209 int patch_branch(unsigned int *addr, unsigned long target, int flags)
+3 -2
arch/powerpc/mm/numa.c
··· 1217 1217 * Need to ensure that NODE_DATA is initialized for a node from 1218 1218 * available memory (see memblock_alloc_try_nid). If unable to 1219 1219 * init the node, then default to nearest node that has memory 1220 - * installed. 1220 + * installed. Skip onlining a node if the subsystems are not 1221 + * yet initialized. 1221 1222 */ 1222 - if (try_online_node(new_nid)) 1223 + if (!topology_inited || try_online_node(new_nid)) 1223 1224 new_nid = first_online_node; 1224 1225 #else 1225 1226 /*
+2 -1
arch/s390/include/asm/sclp.h
··· 108 108 void sclp_early_get_ipl_info(struct sclp_ipl_info *info); 109 109 void sclp_early_detect(void); 110 110 void sclp_early_printk(const char *s); 111 - void __sclp_early_printk(const char *s, unsigned int len); 111 + void sclp_early_printk_force(const char *s); 112 + void __sclp_early_printk(const char *s, unsigned int len, unsigned int force); 112 113 113 114 int _sclp_get_core_info(struct sclp_core_info *info); 114 115 int sclp_core_configure(u8 core);
+1 -1
arch/s390/kernel/early_printk.c
··· 10 10 11 11 static void sclp_early_write(struct console *con, const char *s, unsigned int len) 12 12 { 13 - __sclp_early_printk(s, len); 13 + __sclp_early_printk(s, len, 0); 14 14 } 15 15 16 16 static struct console sclp_early_console = {
+3 -5
arch/s390/kernel/swsusp.S
··· 198 198 199 199 /* Suspend CPU not available -> panic */ 200 200 larl %r15,init_thread_union 201 - ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) 201 + aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) 202 + aghi %r15,-STACK_FRAME_OVERHEAD 202 203 larl %r2,.Lpanic_string 203 - lghi %r1,0 204 - sam31 205 - sigp %r1,%r0,SIGP_SET_ARCHITECTURE 206 - brasl %r14,sclp_early_printk 204 + brasl %r14,sclp_early_printk_force 207 205 larl %r3,.Ldisabled_wait_31 208 206 lpsw 0(%r3) 209 207 4:
+2 -2
arch/sparc/kernel/auxio_64.c
··· 115 115 auxio_devtype = AUXIO_TYPE_SBUS; 116 116 size = 1; 117 117 } else { 118 - printk("auxio: Unknown parent bus type [%s]\n", 119 - dp->parent->name); 118 + printk("auxio: Unknown parent bus type [%pOFn]\n", 119 + dp->parent); 120 120 return -ENODEV; 121 121 } 122 122 auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");
+1 -1
arch/sparc/kernel/kgdb_32.c
··· 122 122 linux_regs->pc = addr; 123 123 linux_regs->npc = addr + 4; 124 124 } 125 - /* fallthru */ 125 + /* fall through */ 126 126 127 127 case 'D': 128 128 case 'k':
+1 -1
arch/sparc/kernel/kgdb_64.c
··· 148 148 linux_regs->tpc = addr; 149 149 linux_regs->tnpc = addr + 4; 150 150 } 151 - /* fallthru */ 151 + /* fall through */ 152 152 153 153 case 'D': 154 154 case 'k':
+2 -2
arch/sparc/kernel/power.c
··· 41 41 42 42 power_reg = of_ioremap(res, 0, 0x4, "power"); 43 43 44 - printk(KERN_INFO "%s: Control reg at %llx\n", 45 - op->dev.of_node->name, res->start); 44 + printk(KERN_INFO "%pOFn: Control reg at %llx\n", 45 + op->dev.of_node, res->start); 46 46 47 47 if (has_button_interrupt(irq, op->dev.of_node)) { 48 48 if (request_irq(irq,
+13 -13
arch/sparc/kernel/prom_32.c
··· 68 68 return; 69 69 70 70 regs = rprop->value; 71 - sprintf(tmp_buf, "%s@%x,%x", 72 - dp->name, 71 + sprintf(tmp_buf, "%pOFn@%x,%x", 72 + dp, 73 73 regs->which_io, regs->phys_addr); 74 74 } 75 75 ··· 84 84 return; 85 85 86 86 regs = prop->value; 87 - sprintf(tmp_buf, "%s@%x,%x", 88 - dp->name, 87 + sprintf(tmp_buf, "%pOFn@%x,%x", 88 + dp, 89 89 regs->which_io, 90 90 regs->phys_addr); 91 91 } ··· 104 104 regs = prop->value; 105 105 devfn = (regs->phys_hi >> 8) & 0xff; 106 106 if (devfn & 0x07) { 107 - sprintf(tmp_buf, "%s@%x,%x", 108 - dp->name, 107 + sprintf(tmp_buf, "%pOFn@%x,%x", 108 + dp, 109 109 devfn >> 3, 110 110 devfn & 0x07); 111 111 } else { 112 - sprintf(tmp_buf, "%s@%x", 113 - dp->name, 112 + sprintf(tmp_buf, "%pOFn@%x", 113 + dp, 114 114 devfn >> 3); 115 115 } 116 116 } ··· 127 127 128 128 regs = prop->value; 129 129 130 - sprintf(tmp_buf, "%s@%x,%x", 131 - dp->name, 130 + sprintf(tmp_buf, "%pOFn@%x,%x", 131 + dp, 132 132 regs->which_io, regs->phys_addr); 133 133 } 134 134 ··· 167 167 return; 168 168 device = prop->value; 169 169 170 - sprintf(tmp_buf, "%s:%d:%d@%x,%x", 171 - dp->name, *vendor, *device, 170 + sprintf(tmp_buf, "%pOFn:%d:%d@%x,%x", 171 + dp, *vendor, *device, 172 172 *intr, reg0); 173 173 } 174 174 ··· 201 201 tmp_buf[0] = '\0'; 202 202 __build_path_component(dp, tmp_buf); 203 203 if (tmp_buf[0] == '\0') 204 - strcpy(tmp_buf, dp->name); 204 + snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp); 205 205 206 206 n = prom_early_alloc(strlen(tmp_buf) + 1); 207 207 strcpy(n, tmp_buf);
+34 -34
arch/sparc/kernel/prom_64.c
··· 82 82 83 83 regs = rprop->value; 84 84 if (!of_node_is_root(dp->parent)) { 85 - sprintf(tmp_buf, "%s@%x,%x", 86 - dp->name, 85 + sprintf(tmp_buf, "%pOFn@%x,%x", 86 + dp, 87 87 (unsigned int) (regs->phys_addr >> 32UL), 88 88 (unsigned int) (regs->phys_addr & 0xffffffffUL)); 89 89 return; ··· 97 97 const char *prefix = (type == 0) ? "m" : "i"; 98 98 99 99 if (low_bits) 100 - sprintf(tmp_buf, "%s@%s%x,%x", 101 - dp->name, prefix, 100 + sprintf(tmp_buf, "%pOFn@%s%x,%x", 101 + dp, prefix, 102 102 high_bits, low_bits); 103 103 else 104 - sprintf(tmp_buf, "%s@%s%x", 105 - dp->name, 104 + sprintf(tmp_buf, "%pOFn@%s%x", 105 + dp, 106 106 prefix, 107 107 high_bits); 108 108 } else if (type == 12) { 109 - sprintf(tmp_buf, "%s@%x", 110 - dp->name, high_bits); 109 + sprintf(tmp_buf, "%pOFn@%x", 110 + dp, high_bits); 111 111 } 112 112 } 113 113 ··· 122 122 123 123 regs = prop->value; 124 124 if (!of_node_is_root(dp->parent)) { 125 - sprintf(tmp_buf, "%s@%x,%x", 126 - dp->name, 125 + sprintf(tmp_buf, "%pOFn@%x,%x", 126 + dp, 127 127 (unsigned int) (regs->phys_addr >> 32UL), 128 128 (unsigned int) (regs->phys_addr & 0xffffffffUL)); 129 129 return; ··· 138 138 if (tlb_type >= cheetah) 139 139 mask = 0x7fffff; 140 140 141 - sprintf(tmp_buf, "%s@%x,%x", 142 - dp->name, 141 + sprintf(tmp_buf, "%pOFn@%x,%x", 142 + dp, 143 143 *(u32 *)prop->value, 144 144 (unsigned int) (regs->phys_addr & mask)); 145 145 } ··· 156 156 return; 157 157 158 158 regs = prop->value; 159 - sprintf(tmp_buf, "%s@%x,%x", 160 - dp->name, 159 + sprintf(tmp_buf, "%pOFn@%x,%x", 160 + dp, 161 161 regs->which_io, 162 162 regs->phys_addr); 163 163 } ··· 176 176 regs = prop->value; 177 177 devfn = (regs->phys_hi >> 8) & 0xff; 178 178 if (devfn & 0x07) { 179 - sprintf(tmp_buf, "%s@%x,%x", 180 - dp->name, 179 + sprintf(tmp_buf, "%pOFn@%x,%x", 180 + dp, 181 181 devfn >> 3, 182 182 devfn & 0x07); 183 183 } else { 184 - sprintf(tmp_buf, "%s@%x", 185 - dp->name, 184 + sprintf(tmp_buf, "%pOFn@%x", 185 + dp, 186 186 devfn >> 3); 187 187 } 188 188 } ··· 203 203 if (!prop) 204 204 return; 205 205 206 - sprintf(tmp_buf, "%s@%x,%x", 207 - dp->name, 206 + sprintf(tmp_buf, "%pOFn@%x,%x", 207 + dp, 208 208 *(u32 *) prop->value, 209 209 (unsigned int) (regs->phys_addr & 0xffffffffUL)); 210 210 } ··· 221 221 222 222 regs = prop->value; 223 223 224 - sprintf(tmp_buf, "%s@%x", dp->name, *regs); 224 + sprintf(tmp_buf, "%pOFn@%x", dp, *regs); 225 225 } 226 226 227 227 /* "name@addrhi,addrlo" */ ··· 236 236 237 237 regs = prop->value; 238 238 239 - sprintf(tmp_buf, "%s@%x,%x", 240 - dp->name, 239 + sprintf(tmp_buf, "%pOFn@%x,%x", 240 + dp, 241 241 (unsigned int) (regs->phys_addr >> 32UL), 242 242 (unsigned int) (regs->phys_addr & 0xffffffffUL)); 243 243 } ··· 257 257 /* This actually isn't right... should look at the #address-cells 258 258 * property of the i2c bus node etc. etc. 259 259 */ 260 - sprintf(tmp_buf, "%s@%x,%x", 261 - dp->name, regs[0], regs[1]); 260 + sprintf(tmp_buf, "%pOFn@%x,%x", 261 + dp, regs[0], regs[1]); 262 262 } 263 263 264 264 /* "name@reg0[,reg1]" */ ··· 274 274 regs = prop->value; 275 275 276 276 if (prop->length == sizeof(u32) || regs[1] == 1) { 277 - sprintf(tmp_buf, "%s@%x", 278 - dp->name, regs[0]); 277 + sprintf(tmp_buf, "%pOFn@%x", 278 + dp, regs[0]); 279 279 } else { 280 - sprintf(tmp_buf, "%s@%x,%x", 281 - dp->name, regs[0], regs[1]); 280 + sprintf(tmp_buf, "%pOFn@%x,%x", 281 + dp, regs[0], regs[1]); 282 282 } 283 283 } 284 284 ··· 295 295 regs = prop->value; 296 296 297 297 if (regs[2] || regs[3]) { 298 - sprintf(tmp_buf, "%s@%08x%08x,%04x%08x", 299 - dp->name, regs[0], regs[1], regs[2], regs[3]); 298 + sprintf(tmp_buf, "%pOFn@%08x%08x,%04x%08x", 299 + dp, regs[0], regs[1], regs[2], regs[3]); 300 300 } else { 301 - sprintf(tmp_buf, "%s@%08x%08x", 302 - dp->name, regs[0], regs[1]); 301 + sprintf(tmp_buf, "%pOFn@%08x%08x", 302 + dp, regs[0], regs[1]); 303 303 } 304 304 } 305 305 ··· 361 361 tmp_buf[0] = '\0'; 362 362 __build_path_component(dp, tmp_buf); 363 363 if (tmp_buf[0] == '\0') 364 - strcpy(tmp_buf, dp->name); 364 + snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp); 365 365 366 366 n = prom_early_alloc(strlen(tmp_buf) + 1); 367 367 strcpy(n, tmp_buf);
+9 -3
arch/sparc/kernel/viohs.c
··· 180 180 struct vio_dring_register pkt; 181 181 char all[sizeof(struct vio_dring_register) + 182 182 (sizeof(struct ldc_trans_cookie) * 183 - dr->ncookies)]; 183 + VIO_MAX_RING_COOKIES)]; 184 184 } u; 185 + size_t bytes = sizeof(struct vio_dring_register) + 186 + (sizeof(struct ldc_trans_cookie) * 187 + dr->ncookies); 185 188 int i; 186 189 187 - memset(&u, 0, sizeof(u)); 190 + if (WARN_ON(bytes > sizeof(u))) 191 + return -EINVAL; 192 + 193 + memset(&u, 0, bytes); 188 194 init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG); 189 195 u.pkt.dring_ident = 0; 190 196 u.pkt.num_descr = dr->num_entries; ··· 212 206 (unsigned long long) u.pkt.cookies[i].cookie_size); 213 207 } 214 208 215 - return send_ctrl(vio, &u.pkt.tag, sizeof(u)); 209 + return send_ctrl(vio, &u.pkt.tag, bytes); 216 210 } 217 211 218 212 static int send_rdx(struct vio_driver_state *vio)
+3 -5
arch/sparc/vdso/Makefile
··· 31 31 targets += $(vdso_img_cfiles) 32 32 targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so) 33 33 34 - export CPPFLAGS_vdso.lds += -P -C 34 + CPPFLAGS_vdso.lds += -P -C 35 35 36 36 VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ 37 37 -Wl,--no-undefined \ 38 38 -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \ 39 39 $(DISABLE_LTO) 40 40 41 - $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE 41 + $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE 42 42 $(call if_changed,vdso) 43 43 44 44 HOST_EXTRACFLAGS += -I$(srctree)/tools/include 45 45 hostprogs-y += vdso2c 46 46 47 47 quiet_cmd_vdso2c = VDSO2C $@ 48 - define cmd_vdso2c 49 - $(obj)/vdso2c $< $(<:%.dbg=%) $@ 50 - endef 48 + cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@ 51 49 52 50 $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE 53 51 $(call if_changed,vdso2c)
+3 -3
arch/x86/kernel/cpu/intel_rdt.h
··· 529 529 int rdtgroup_schemata_show(struct kernfs_open_file *of, 530 530 struct seq_file *s, void *v); 531 531 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 532 - u32 _cbm, int closid, bool exclusive); 532 + unsigned long cbm, int closid, bool exclusive); 533 533 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, 534 - u32 cbm); 534 + unsigned long cbm); 535 535 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); 536 536 int rdtgroup_tasks_assigned(struct rdtgroup *r); 537 537 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); 538 538 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); 539 - bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm); 539 + bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); 540 540 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); 541 541 int rdt_pseudo_lock_init(void); 542 542 void rdt_pseudo_lock_release(void);
+11 -9
arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
··· 797 797 /** 798 798 * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked 799 799 * @d: RDT domain 800 - * @_cbm: CBM to test 800 + * @cbm: CBM to test 801 801 * 802 - * @d represents a cache instance and @_cbm a capacity bitmask that is 803 - * considered for it. Determine if @_cbm overlaps with any existing 802 + * @d represents a cache instance and @cbm a capacity bitmask that is 803 + * considered for it. Determine if @cbm overlaps with any existing 804 804 * pseudo-locked region on @d. 805 805 * 806 - * Return: true if @_cbm overlaps with pseudo-locked region on @d, false 806 + * @cbm is unsigned long, even if only 32 bits are used, to make the 807 + * bitmap functions work correctly. 808 + * 809 + * Return: true if @cbm overlaps with pseudo-locked region on @d, false 807 810 * otherwise. 808 811 */ 809 - bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm) 812 + bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) 810 813 { 811 - unsigned long *cbm = (unsigned long *)&_cbm; 812 - unsigned long *cbm_b; 813 814 unsigned int cbm_len; 815 + unsigned long cbm_b; 814 816 815 817 if (d->plr) { 816 818 cbm_len = d->plr->r->cache.cbm_len; 817 - cbm_b = (unsigned long *)&d->plr->cbm; 818 - if (bitmap_intersects(cbm, cbm_b, cbm_len)) 819 + cbm_b = d->plr->cbm; 820 + if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) 819 821 return true; 820 822 } 821 823 return false;
+23 -13
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
··· 975 975 * is false then overlaps with any resource group or hardware entities 976 976 * will be considered. 977 977 * 978 + * @cbm is unsigned long, even if only 32 bits are used, to make the 979 + * bitmap functions work correctly. 980 + * 978 981 * Return: false if CBM does not overlap, true if it does. 979 982 */ 980 983 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 981 - u32 _cbm, int closid, bool exclusive) 984 + unsigned long cbm, int closid, bool exclusive) 982 985 { 983 - unsigned long *cbm = (unsigned long *)&_cbm; 984 - unsigned long *ctrl_b; 985 986 enum rdtgrp_mode mode; 987 + unsigned long ctrl_b; 986 988 u32 *ctrl; 987 989 int i; 988 990 989 991 /* Check for any overlap with regions used by hardware directly */ 990 992 if (!exclusive) { 991 - if (bitmap_intersects(cbm, 992 - (unsigned long *)&r->cache.shareable_bits, 993 - r->cache.cbm_len)) 993 + ctrl_b = r->cache.shareable_bits; 994 + if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) 994 995 return true; 995 996 } 996 997 997 998 /* Check for overlap with other resource groups */ 998 999 ctrl = d->ctrl_val; 999 1000 for (i = 0; i < closids_supported(); i++, ctrl++) { 1000 - ctrl_b = (unsigned long *)ctrl; 1001 + ctrl_b = *ctrl; 1001 1002 mode = rdtgroup_mode_by_closid(i); 1002 1003 if (closid_allocated(i) && i != closid && 1003 1004 mode != RDT_MODE_PSEUDO_LOCKSETUP) { 1004 - if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) { 1005 + if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { 1005 1006 if (exclusive) { 1006 1007 if (mode == RDT_MODE_EXCLUSIVE) 1007 1008 return true; ··· 1139 1138 * computed by first dividing the total cache size by the CBM length to 1140 1139 * determine how many bytes each bit in the bitmask represents. The result 1141 1140 * is multiplied with the number of bits set in the bitmask. 1141 + * 1142 + * @cbm is unsigned long, even if only 32 bits are used to make the 1143 + * bitmap functions work correctly. 1142 1144 */ 1143 1145 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, 1144 - struct rdt_domain *d, u32 cbm) 1146 + struct rdt_domain *d, unsigned long cbm) 1145 1147 { 1146 1148 struct cpu_cacheinfo *ci; 1147 1149 unsigned int size = 0; 1148 1150 int num_b, i; 1149 1151 1150 - num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len); 1152 + num_b = bitmap_weight(&cbm, r->cache.cbm_len); 1151 1153 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); 1152 1154 for (i = 0; i < ci->num_leaves; i++) { 1153 1155 if (ci->info_list[i].level == r->cache_level) { ··· 2357 2353 u32 used_b = 0, unused_b = 0; 2358 2354 u32 closid = rdtgrp->closid; 2359 2355 struct rdt_resource *r; 2356 + unsigned long tmp_cbm; 2360 2357 enum rdtgrp_mode mode; 2361 2358 struct rdt_domain *d; 2362 2359 int i, ret; ··· 2395 2390 * modify the CBM based on system availability. 2396 2391 */ 2397 2392 cbm_ensure_valid(&d->new_ctrl, r); 2398 - if (bitmap_weight((unsigned long *) &d->new_ctrl, 2399 - r->cache.cbm_len) < 2400 - r->cache.min_cbm_bits) { 2393 + /* 2394 + * Assign the u32 CBM to an unsigned long to ensure 2395 + * that bitmap_weight() does not access out-of-bound 2396 + * memory. 2397 + */ 2398 + tmp_cbm = d->new_ctrl; 2399 + if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < 2400 + r->cache.min_cbm_bits) { 2401 2401 rdt_last_cmd_printf("no space on %s:%d\n", 2402 2402 r->name, d->id); 2403 2403 return -ENOSPC;
+8 -2
arch/x86/mm/pgtable.c
··· 115 115 116 116 #define UNSHARED_PTRS_PER_PGD \ 117 117 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 118 + #define MAX_UNSHARED_PTRS_PER_PGD \ 119 + max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD) 118 120 119 121 120 122 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) ··· 183 181 * and initialize the kernel pmds here. 184 182 */ 185 183 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD 184 + #define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD 186 185 187 186 /* 188 187 * We allocate separate PMDs for the kernel part of the user page-table ··· 192 189 */ 193 190 #define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \ 194 191 KERNEL_PGD_PTRS : 0) 192 + #define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS 195 193 196 194 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) 197 195 { ··· 214 210 215 211 /* No need to prepopulate any pagetable entries in non-PAE modes. */ 216 212 #define PREALLOCATED_PMDS 0 213 + #define MAX_PREALLOCATED_PMDS 0 217 214 #define PREALLOCATED_USER_PMDS 0 215 + #define MAX_PREALLOCATED_USER_PMDS 0 218 216 #endif /* CONFIG_X86_PAE */ 219 217 220 218 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) ··· 434 428 pgd_t *pgd_alloc(struct mm_struct *mm) 435 429 { 436 430 pgd_t *pgd; 437 - pmd_t *u_pmds[PREALLOCATED_USER_PMDS]; 438 - pmd_t *pmds[PREALLOCATED_PMDS]; 431 + pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS]; 432 + pmd_t *pmds[MAX_PREALLOCATED_PMDS]; 439 433 440 434 pgd = _pgd_alloc(); 441 435
+5 -2
drivers/base/firmware_loader/main.c
··· 226 226 } 227 227 228 228 tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size); 229 - if (tmp && !(opt_flags & FW_OPT_NOCACHE)) 230 - list_add(&tmp->list, &fwc->head); 229 + if (tmp) { 230 + INIT_LIST_HEAD(&tmp->list); 231 + if (!(opt_flags & FW_OPT_NOCACHE)) 232 + list_add(&tmp->list, &fwc->head); 233 + } 231 234 spin_unlock(&fwc->lock); 232 235 233 236 *fw_priv = tmp;
+1 -1
drivers/bluetooth/hci_qca.c
··· 1356 1356 { 1357 1357 int i; 1358 1358 1359 - qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs * 1359 + qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs, 1360 1360 sizeof(struct regulator_bulk_data), 1361 1361 GFP_KERNEL); 1362 1362 if (!qca->vreg_bulk)
+5 -3
drivers/crypto/inside-secure/safexcel.c
··· 1044 1044 1045 1045 safexcel_configure(priv); 1046 1046 1047 - priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring), 1047 + priv->ring = devm_kcalloc(dev, priv->config.rings, 1048 + sizeof(*priv->ring), 1048 1049 GFP_KERNEL); 1049 1050 if (!priv->ring) { 1050 1051 ret = -ENOMEM; ··· 1064 1063 if (ret) 1065 1064 goto err_reg_clk; 1066 1065 1067 - priv->ring[i].rdr_req = devm_kzalloc(dev, 1068 - sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE, 1066 + priv->ring[i].rdr_req = devm_kcalloc(dev, 1067 + EIP197_DEFAULT_RING_SIZE, 1068 + sizeof(priv->ring[i].rdr_req), 1069 1069 GFP_KERNEL); 1070 1070 if (!priv->ring[i].rdr_req) { 1071 1071 ret = -ENOMEM;
+3 -1
drivers/fpga/dfl-fme-region.c
··· 14 14 */ 15 15 16 16 #include <linux/module.h> 17 + #include <linux/fpga/fpga-mgr.h> 17 18 #include <linux/fpga/fpga-region.h> 18 19 19 20 #include "dfl-fme-pr.h" ··· 67 66 static int fme_region_remove(struct platform_device *pdev) 68 67 { 69 68 struct fpga_region *region = dev_get_drvdata(&pdev->dev); 69 + struct fpga_manager *mgr = region->mgr; 70 70 71 71 fpga_region_unregister(region); 72 - fpga_mgr_put(region->mgr); 72 + fpga_mgr_put(mgr); 73 73 74 74 return 0; 75 75 }
+1 -1
drivers/fpga/fpga-bridge.c
··· 125 125 * 126 126 * Given a device, get an exclusive reference to a fpga bridge. 127 127 * 128 - * Return: fpga manager struct or IS_ERR() condition containing error code. 128 + * Return: fpga bridge struct or IS_ERR() condition containing error code. 129 129 */ 130 130 struct fpga_bridge *fpga_bridge_get(struct device *dev, 131 131 struct fpga_image_info *info)
+2 -1
drivers/fpga/of-fpga-region.c
··· 437 437 static int of_fpga_region_remove(struct platform_device *pdev) 438 438 { 439 439 struct fpga_region *region = platform_get_drvdata(pdev); 440 + struct fpga_manager *mgr = region->mgr; 440 441 441 442 fpga_region_unregister(region); 442 - fpga_mgr_put(region->mgr); 443 + fpga_mgr_put(mgr); 443 444 444 445 return 0; 445 446 }
+2 -1
drivers/gpio/gpiolib.c
··· 1682 1682 irq_set_chained_handler_and_data(parent_irq, parent_handler, 1683 1683 gpiochip); 1684 1684 1685 - gpiochip->irq.parents = &parent_irq; 1685 + gpiochip->irq.parent_irq = parent_irq; 1686 + gpiochip->irq.parents = &gpiochip->irq.parent_irq; 1686 1687 gpiochip->irq.num_parents = 1; 1687 1688 } 1688 1689
+1 -1
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
··· 600 600 } 601 601 602 602 mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); 603 - mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr * 603 + mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr, 604 604 sizeof(struct drm_plane), 605 605 GFP_KERNEL); 606 606
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
··· 153 153 return 0; 154 154 } 155 155 156 - mp->clk_config = devm_kzalloc(&pdev->dev, 157 - sizeof(struct dss_clk) * num_clk, 156 + mp->clk_config = devm_kcalloc(&pdev->dev, 157 + num_clk, sizeof(struct dss_clk), 158 158 GFP_KERNEL); 159 159 if (!mp->clk_config) 160 160 return -ENOMEM;
+14 -1
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 900 900 nv50_mstc_detect(struct drm_connector *connector, bool force) 901 901 { 902 902 struct nv50_mstc *mstc = nv50_mstc(connector); 903 + enum drm_connector_status conn_status; 904 + int ret; 905 + 903 906 if (!mstc->port) 904 907 return connector_status_disconnected; 905 - return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port); 908 + 909 + ret = pm_runtime_get_sync(connector->dev->dev); 910 + if (ret < 0 && ret != -EACCES) 911 + return connector_status_disconnected; 912 + 913 + conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr, 914 + mstc->port); 915 + 916 + pm_runtime_mark_last_busy(connector->dev->dev); 917 + pm_runtime_put_autosuspend(connector->dev->dev); 918 + return conn_status; 906 919 } 907 920 908 921 static void
+5 -3
drivers/hv/connection.c
··· 76 76 __u32 version) 77 77 { 78 78 int ret = 0; 79 + unsigned int cur_cpu; 79 80 struct vmbus_channel_initiate_contact *msg; 80 81 unsigned long flags; 81 82 ··· 119 118 * the CPU attempting to connect may not be CPU 0. 120 119 */ 121 120 if (version >= VERSION_WIN8_1) { 122 - msg->target_vcpu = 123 - hv_cpu_number_to_vp_number(smp_processor_id()); 124 - vmbus_connection.connect_cpu = smp_processor_id(); 121 + cur_cpu = get_cpu(); 122 + msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu); 123 + vmbus_connection.connect_cpu = cur_cpu; 124 + put_cpu(); 125 125 } else { 126 126 msg->target_vcpu = 0; 127 127 vmbus_connection.connect_cpu = 0;
+1 -1
drivers/hwmon/npcm750-pwm-fan.c
··· 908 908 if (fan_cnt < 1) 909 909 return -EINVAL; 910 910 911 - fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL); 911 + fan_ch = devm_kcalloc(dev, fan_cnt, sizeof(*fan_ch), GFP_KERNEL); 912 912 if (!fan_ch) 913 913 return -ENOMEM; 914 914
+3 -1
drivers/i2c/busses/i2c-designware-master.c
··· 34 34 35 35 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) 36 36 { 37 - u32 ic_clk = i2c_dw_clk_rate(dev); 38 37 const char *mode_str, *fp_str = ""; 39 38 u32 comp_param1; 40 39 u32 sda_falling_time, scl_falling_time; 41 40 struct i2c_timings *t = &dev->timings; 41 + u32 ic_clk; 42 42 int ret; 43 43 44 44 ret = i2c_dw_acquire_lock(dev); ··· 53 53 54 54 /* Calculate SCL timing parameters for standard mode if not set */ 55 55 if (!dev->ss_hcnt || !dev->ss_lcnt) { 56 + ic_clk = i2c_dw_clk_rate(dev); 56 57 dev->ss_hcnt = 57 58 i2c_dw_scl_hcnt(ic_clk, 58 59 4000, /* tHD;STA = tHIGH = 4.0 us */ ··· 90 89 * needed also in high speed mode. 91 90 */ 92 91 if (!dev->fs_hcnt || !dev->fs_lcnt) { 92 + ic_clk = i2c_dw_clk_rate(dev); 93 93 dev->fs_hcnt = 94 94 i2c_dw_scl_hcnt(ic_clk, 95 95 600, /* tHD;STA = tHIGH = 0.6 us */
+1 -1
drivers/i2c/busses/i2c-isch.c
··· 164 164 * run ~75 kHz instead which should do no harm. 165 165 */ 166 166 dev_notice(&sch_adapter.dev, 167 - "Clock divider unitialized. Setting defaults\n"); 167 + "Clock divider uninitialized. Setting defaults\n"); 168 168 outw(backbone_speed / (4 * 100), SMBHSTCLK); 169 169 } 170 170
+18 -4
drivers/i2c/busses/i2c-qcom-geni.c
··· 367 367 dma_addr_t rx_dma; 368 368 enum geni_se_xfer_mode mode; 369 369 unsigned long time_left = XFER_TIMEOUT; 370 + void *dma_buf; 370 371 371 372 gi2c->cur = msg; 372 - mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO; 373 + mode = GENI_SE_FIFO; 374 + dma_buf = i2c_get_dma_safe_msg_buf(msg, 32); 375 + if (dma_buf) 376 + mode = GENI_SE_DMA; 377 + 373 378 geni_se_select_mode(&gi2c->se, mode); 374 379 writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN); 375 380 geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param); 376 381 if (mode == GENI_SE_DMA) { 377 382 int ret; 378 383 379 - ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len, 384 + ret = geni_se_rx_dma_prep(&gi2c->se, dma_buf, msg->len, 380 385 &rx_dma); 381 386 if (ret) { 382 387 mode = GENI_SE_FIFO; 383 388 geni_se_select_mode(&gi2c->se, mode); 389 + i2c_put_dma_safe_msg_buf(dma_buf, msg, false); 384 390 } 385 391 } 386 392 ··· 399 393 if (gi2c->err) 400 394 geni_i2c_rx_fsm_rst(gi2c); 401 395 geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len); 396 + i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err); 402 397 } 403 398 return gi2c->err; 404 399 } ··· 410 403 dma_addr_t tx_dma; 411 404 enum geni_se_xfer_mode mode; 412 405 unsigned long time_left; 406 + void *dma_buf; 413 407 414 408 gi2c->cur = msg; 415 - mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO; 409 + mode = GENI_SE_FIFO; 410 + dma_buf = i2c_get_dma_safe_msg_buf(msg, 32); 411 + if (dma_buf) 412 + mode = GENI_SE_DMA; 413 + 416 414 geni_se_select_mode(&gi2c->se, mode); 417 415 writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN); 418 416 geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param); 419 417 if (mode == GENI_SE_DMA) { 420 418 int ret; 421 419 422 - ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len, 420 + ret = geni_se_tx_dma_prep(&gi2c->se, dma_buf, msg->len, 423 421 &tx_dma); 424 422 if (ret) { 425 423 mode = GENI_SE_FIFO; 426 424 geni_se_select_mode(&gi2c->se, mode); 425 + i2c_put_dma_safe_msg_buf(dma_buf, msg, false); 427 426 } 428 427 } 429 428 ··· 445 432 if (gi2c->err) 446 433 geni_i2c_tx_fsm_rst(gi2c); 447 434 geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len); 435 + i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err); 448 436 } 449 437 return gi2c->err; 450 438 }
+1
drivers/i2c/busses/i2c-scmi.c
··· 152 152 mt_params[3].type = ACPI_TYPE_INTEGER; 153 153 mt_params[3].integer.value = len; 154 154 mt_params[4].type = ACPI_TYPE_BUFFER; 155 + mt_params[4].buffer.length = len; 155 156 mt_params[4].buffer.pointer = data->block + 1; 156 157 } 157 158 break;
+8 -4
drivers/infiniband/hw/mlx5/mr.c
··· 544 544 int shrink = 0; 545 545 int c; 546 546 547 + if (!mr->allocated_from_cache) 548 + return; 549 + 547 550 c = order2idx(dev, mr->order); 548 551 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { 549 552 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); ··· 1650 1647 umem = NULL; 1651 1648 } 1652 1649 #endif 1653 - 1654 1650 clean_mr(dev, mr); 1655 1651 1652 + /* 1653 + * We should unregister the DMA address from the HCA before 1654 + * remove the DMA mapping. 1655 + */ 1656 + mlx5_mr_cache_free(dev, mr); 1656 1657 if (umem) { 1657 1658 ib_umem_release(umem); 1658 1659 atomic_sub(npages, &dev->mdev->priv.reg_pages); 1659 1660 } 1660 - 1661 1661 if (!mr->allocated_from_cache) 1662 1662 kfree(mr); 1663 - else 1664 - mlx5_mr_cache_free(dev, mr); 1665 1663 } 1666 1664 1667 1665 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+1
drivers/input/evdev.c
··· 564 564 565 565 input_inject_event(&evdev->handle, 566 566 event.type, event.code, event.value); 567 + cond_resched(); 567 568 } 568 569 569 570 out:
+3
drivers/input/joystick/xpad.c
··· 231 231 { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, 232 232 { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, 233 233 { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, 234 + { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, 234 235 { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, 235 236 { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, 236 237 { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, ··· 531 530 XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2), 532 531 XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1), 533 532 XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2), 533 + XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1), 534 + XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2), 534 535 XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), 535 536 XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), 536 537 XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
+1
drivers/input/misc/uinput.c
··· 598 598 599 599 input_event(udev->dev, ev.type, ev.code, ev.value); 600 600 bytes += input_event_size(); 601 + cond_resched(); 601 602 } 602 603 603 604 return bytes;
+1
drivers/input/mousedev.c
··· 707 707 mousedev_generate_response(client, c); 708 708 709 709 spin_unlock_irq(&client->packet_lock); 710 + cond_resched(); 710 711 } 711 712 712 713 kill_fasync(&client->fasync, SIGIO, POLL_IN);
+20 -9
drivers/input/serio/i8042.c
··· 1395 1395 for (i = 0; i < I8042_NUM_PORTS; i++) { 1396 1396 struct serio *serio = i8042_ports[i].serio; 1397 1397 1398 - if (serio) { 1399 - printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n", 1400 - serio->name, 1401 - (unsigned long) I8042_DATA_REG, 1402 - (unsigned long) I8042_COMMAND_REG, 1403 - i8042_ports[i].irq); 1404 - serio_register_port(serio); 1405 - device_set_wakeup_capable(&serio->dev, true); 1406 - } 1398 + if (!serio) 1399 + continue; 1400 + 1401 + printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n", 1402 + serio->name, 1403 + (unsigned long) I8042_DATA_REG, 1404 + (unsigned long) I8042_COMMAND_REG, 1405 + i8042_ports[i].irq); 1406 + serio_register_port(serio); 1407 + device_set_wakeup_capable(&serio->dev, true); 1408 + 1409 + /* 1410 + * On platforms using suspend-to-idle, allow the keyboard to 1411 + * wake up the system from sleep by enabling keyboard wakeups 1412 + * by default. This is consistent with keyboard wakeup 1413 + * behavior on many platforms using suspend-to-RAM (ACPI S3) 1414 + * by default. 1415 + */ 1416 + if (pm_suspend_via_s2idle() && i == I8042_KBD_PORT_NO) 1417 + device_set_wakeup_enable(&serio->dev, true); 1407 1418 } 1408 1419 } 1409 1420
+2 -3
drivers/md/dm-cache-target.c
··· 3484 3484 int r; 3485 3485 3486 3486 migration_cache = KMEM_CACHE(dm_cache_migration, 0); 3487 - if (!migration_cache) { 3488 - dm_unregister_target(&cache_target); 3487 + if (!migration_cache) 3489 3488 return -ENOMEM; 3490 - } 3491 3489 3492 3490 r = dm_register_target(&cache_target); 3493 3491 if (r) { 3494 3492 DMERR("cache target registration failed: %d", r); 3493 + kmem_cache_destroy(migration_cache); 3495 3494 return r; 3496 3495 } 3497 3496
+2
drivers/md/dm-flakey.c
··· 467 467 static struct target_type flakey_target = { 468 468 .name = "flakey", 469 469 .version = {1, 5, 0}, 470 + #ifdef CONFIG_BLK_DEV_ZONED 470 471 .features = DM_TARGET_ZONED_HM, 472 + #endif 471 473 .module = THIS_MODULE, 472 474 .ctr = flakey_ctr, 473 475 .dtr = flakey_dtr,
+2 -1
drivers/md/dm-integrity.c
··· 3462 3462 r = -ENOMEM; 3463 3463 goto bad; 3464 3464 } 3465 - ic->recalc_tags = kvmalloc((RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size, GFP_KERNEL); 3465 + ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, 3466 + ic->tag_size, GFP_KERNEL); 3466 3467 if (!ic->recalc_tags) { 3467 3468 ti->error = "Cannot allocate tags for recalculating"; 3468 3469 r = -ENOMEM;
+7 -1
drivers/md/dm-linear.c
··· 102 102 return DM_MAPIO_REMAPPED; 103 103 } 104 104 105 + #ifdef CONFIG_BLK_DEV_ZONED 105 106 static int linear_end_io(struct dm_target *ti, struct bio *bio, 106 107 blk_status_t *error) 107 108 { ··· 113 112 114 113 return DM_ENDIO_DONE; 115 114 } 115 + #endif 116 116 117 117 static void linear_status(struct dm_target *ti, status_type_t type, 118 118 unsigned status_flags, char *result, unsigned maxlen) ··· 210 208 static struct target_type linear_target = { 211 209 .name = "linear", 212 210 .version = {1, 4, 0}, 211 + #ifdef CONFIG_BLK_DEV_ZONED 212 + .end_io = linear_end_io, 213 213 .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM, 214 + #else 215 + .features = DM_TARGET_PASSES_INTEGRITY, 216 + #endif 214 217 .module = THIS_MODULE, 215 218 .ctr = linear_ctr, 216 219 .dtr = linear_dtr, 217 220 .map = linear_map, 218 - .end_io = linear_end_io, 219 221 .status = linear_status, 220 222 .prepare_ioctl = linear_prepare_ioctl, 221 223 .iterate_devices = linear_iterate_devices,
+20 -7
drivers/md/dm.c
··· 1155 1155 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1156 1156 1157 1157 /* 1158 - * The zone descriptors obtained with a zone report indicate 1159 - * zone positions within the target device. The zone descriptors 1160 - * must be remapped to match their position within the dm device. 1161 - * A target may call dm_remap_zone_report after completion of a 1162 - * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained 1163 - * from the target device mapping to the dm device. 1158 + * The zone descriptors obtained with a zone report indicate zone positions 1159 + * within the target backing device, regardless of that device is a partition 1160 + * and regardless of the target mapping start sector on the device or partition. 1161 + * The zone descriptors start sector and write pointer position must be adjusted 1162 + * to match their relative position within the dm device. 1163 + * A target may call dm_remap_zone_report() after completion of a 1164 + * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the 1165 + * backing device. 1164 1166 */ 1165 1167 void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) 1166 1168 { ··· 1173 1171 struct blk_zone *zone; 1174 1172 unsigned int nr_rep = 0; 1175 1173 unsigned int ofst; 1174 + sector_t part_offset; 1176 1175 struct bio_vec bvec; 1177 1176 struct bvec_iter iter; 1178 1177 void *addr; 1179 1178 1180 1179 if (bio->bi_status) 1181 1180 return; 1181 + 1182 + /* 1183 + * bio sector was incremented by the request size on completion. Taking 1184 + * into account the original request sector, the target start offset on 1185 + * the backing device and the target mapping offset (ti->begin), the 1186 + * start sector of the backing device. The partition offset is always 0 1187 + * if the target uses a whole device. 1188 + */ 1189 + part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio)); 1182 1190 1183 1191 /* 1184 1192 * Remap the start sector of the reported zones. For sequential zones, ··· 1207 1195 /* Set zones start sector */ 1208 1196 while (hdr->nr_zones && ofst < bvec.bv_len) { 1209 1197 zone = addr + ofst; 1198 + zone->start -= part_offset; 1210 1199 if (zone->start >= start + ti->len) { 1211 1200 hdr->nr_zones = 0; 1212 1201 break; ··· 1219 1206 else if (zone->cond == BLK_ZONE_COND_EMPTY) 1220 1207 zone->wp = zone->start; 1221 1208 else 1222 - zone->wp = zone->wp + ti->begin - start; 1209 + zone->wp = zone->wp + ti->begin - start - part_offset; 1223 1210 } 1224 1211 ofst += sizeof(struct blk_zone); 1225 1212 hdr->nr_zones--;
+10
drivers/mmc/core/block.c
··· 1371 1371 1372 1372 if (brq->data.blocks > 1) { 1373 1373 /* 1374 + * Some SD cards in SPI mode return a CRC error or even lock up 1375 + * completely when trying to read the last block using a 1376 + * multiblock read command. 1377 + */ 1378 + if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) && 1379 + (blk_rq_pos(req) + blk_rq_sectors(req) == 1380 + get_capacity(md->disk))) 1381 + brq->data.blocks--; 1382 + 1383 + /* 1374 1384 * After a read error, we redo the request one sector 1375 1385 * at a time in order to accurately determine which 1376 1386 * sectors can be read successfully.
+1 -1
drivers/mux/adgs1408.c
··· 128 128 129 129 MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>"); 130 130 MODULE_DESCRIPTION("Analog Devices ADGS1408 MUX driver"); 131 - MODULE_LICENSE("GPL v2"); 131 + MODULE_LICENSE("GPL");
+4 -4
drivers/net/ethernet/amazon/ena/ena_eth_com.c
··· 349 349 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> 350 350 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; 351 351 ena_rx_ctx->l3_csum_err = 352 - (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> 353 - ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; 352 + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> 353 + ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT); 354 354 ena_rx_ctx->l4_csum_err = 355 - (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> 356 - ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; 355 + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> 356 + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT); 357 357 ena_rx_ctx->l4_csum_checked = 358 358 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> 359 359 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
+12 -10
drivers/net/ethernet/amazon/ena/ena_netdev.c
··· 1595 1595 if (rc) 1596 1596 return rc; 1597 1597 1598 - ena_init_napi(adapter); 1599 - 1600 1598 ena_change_mtu(adapter->netdev, adapter->netdev->mtu); 1601 1599 1602 1600 ena_refill_all_rx_bufs(adapter); ··· 1751 1753 netdev_dbg(adapter->netdev, "%s\n", __func__); 1752 1754 1753 1755 ena_setup_io_intr(adapter); 1756 + 1757 + /* napi poll functions should be initialized before running 1758 + * request_irq(), to handle a rare condition where there is a pending 1759 + * interrupt, causing the ISR to fire immediately while the poll 1760 + * function wasn't set yet, causing a null dereference 1761 + */ 1762 + ena_init_napi(adapter); 1754 1763 1755 1764 rc = ena_request_io_irq(adapter); 1756 1765 if (rc) ··· 2691 2686 ena_free_mgmnt_irq(adapter); 2692 2687 ena_disable_msix(adapter); 2693 2688 err_device_destroy: 2689 + ena_com_abort_admin_commands(ena_dev); 2690 + ena_com_wait_for_abort_completion(ena_dev); 2694 2691 ena_com_admin_destroy(ena_dev); 2692 + ena_com_mmio_reg_read_request_destroy(ena_dev); 2693 + ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); 2695 2694 err: 2696 2695 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2697 2696 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); ··· 3209 3200 3210 3201 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) 3211 3202 { 3212 - int release_bars; 3203 + int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 3213 3204 3214 - if (ena_dev->mem_bar) 3215 - devm_iounmap(&pdev->dev, ena_dev->mem_bar); 3216 - 3217 - if (ena_dev->reg_bar) 3218 - devm_iounmap(&pdev->dev, ena_dev->reg_bar); 3219 - 3220 - release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 3221 3205 pci_release_selected_regions(pdev, release_bars); 3222 3206 } 3223 3207
+15 -28
drivers/net/ethernet/mellanox/mlx4/main.c
··· 260 260 NULL, NULL, NULL), 261 261 }; 262 262 263 - static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id, 264 - union devlink_param_value init_val) 265 - { 266 - struct mlx4_priv *priv = devlink_priv(devlink); 267 - struct mlx4_dev *dev = &priv->dev; 268 - int err; 269 - 270 - err = devlink_param_driverinit_value_set(devlink, param_id, init_val); 271 - if (err) 272 - mlx4_warn(dev, 273 - "devlink set parameter %u value failed (err = %d)", 274 - param_id, err); 275 - } 276 - 277 263 static void mlx4_devlink_set_params_init_values(struct devlink *devlink) 278 264 { 279 265 union devlink_param_value value; 280 266 281 267 value.vbool = !!mlx4_internal_err_reset; 282 - mlx4_devlink_set_init_value(devlink, 283 - DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, 284 - value); 268 + devlink_param_driverinit_value_set(devlink, 269 + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, 270 + value); 285 271 286 272 value.vu32 = 1UL << log_num_mac; 287 - mlx4_devlink_set_init_value(devlink, 288 - DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value); 273 + devlink_param_driverinit_value_set(devlink, 274 + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, 275 + value); 289 276 290 277 value.vbool = enable_64b_cqe_eqe; 291 - mlx4_devlink_set_init_value(devlink, 292 - MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, 293 - value); 278 + devlink_param_driverinit_value_set(devlink, 279 + MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, 280 + value); 294 281 295 282 value.vbool = enable_4k_uar; 296 - mlx4_devlink_set_init_value(devlink, 297 - MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, 298 - value); 283 + devlink_param_driverinit_value_set(devlink, 284 + MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, 285 + value); 299 286 300 287 value.vbool = false; 301 - mlx4_devlink_set_init_value(devlink, 302 - DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, 303 - value); 288 + devlink_param_driverinit_value_set(devlink, 289 + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, 290 + value); 304 291 } 305 292 306 293 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
+2 -2
drivers/net/ethernet/realtek/r8169.c
··· 4269 4269 RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); 4270 4270 break; 4271 4271 case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: 4272 - case RTL_GIGA_MAC_VER_34: 4273 - case RTL_GIGA_MAC_VER_35: 4272 + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: 4273 + case RTL_GIGA_MAC_VER_38: 4274 4274 RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4275 4275 break; 4276 4276 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+4 -1
drivers/net/ethernet/socionext/netsec.c
··· 735 735 u16 idx = dring->tail; 736 736 struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); 737 737 738 - if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) 738 + if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { 739 + /* reading the register clears the irq */ 740 + netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT); 739 741 break; 742 + } 740 743 741 744 /* This barrier is needed to keep us from reading 742 745 * any other fields out of the netsec_de until we have
-2
drivers/net/phy/sfp.c
··· 163 163 /* Give this long for the PHY to reset. */ 164 164 #define T_PHY_RESET_MS 50 165 165 166 - static DEFINE_MUTEX(sfp_mutex); 167 - 168 166 struct sff_data { 169 167 unsigned int gpios; 170 168 bool (*module_supported)(const struct sfp_eeprom_id *id);
+1
drivers/net/usb/qmi_wwan.c
··· 1241 1241 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ 1242 1242 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 1243 1243 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ 1244 + {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */ 1244 1245 {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ 1245 1246 {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ 1246 1247 {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
+4
drivers/net/wireless/marvell/libertas/if_sdio.c
··· 1317 1317 if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { 1318 1318 dev_info(dev, "Suspend without wake params -- powering down card\n"); 1319 1319 if (priv->fw_ready) { 1320 + ret = lbs_suspend(priv); 1321 + if (ret) 1322 + return ret; 1323 + 1320 1324 priv->power_up_on_resume = true; 1321 1325 if_sdio_power_off(card); 1322 1326 }
+5 -5
drivers/net/wireless/mediatek/mt76/usb.c
··· 318 318 if (!buf->urb) 319 319 return -ENOMEM; 320 320 321 - buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg), 321 + buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg), 322 322 gfp); 323 323 if (!buf->urb->sg) 324 324 return -ENOMEM; ··· 525 525 526 526 spin_lock_init(&q->rx_page_lock); 527 527 spin_lock_init(&q->lock); 528 - q->entry = devm_kzalloc(dev->dev, 529 - MT_NUM_RX_ENTRIES * sizeof(*q->entry), 528 + q->entry = devm_kcalloc(dev->dev, 529 + MT_NUM_RX_ENTRIES, sizeof(*q->entry), 530 530 GFP_KERNEL); 531 531 if (!q->entry) 532 532 return -ENOMEM; ··· 755 755 INIT_LIST_HEAD(&q->swq); 756 756 q->hw_idx = mt76_ac_to_hwq(i); 757 757 758 - q->entry = devm_kzalloc(dev->dev, 759 - MT_NUM_TX_ENTRIES * sizeof(*q->entry), 758 + q->entry = devm_kcalloc(dev->dev, 759 + MT_NUM_TX_ENTRIES, sizeof(*q->entry), 760 760 GFP_KERNEL); 761 761 if (!q->entry) 762 762 return -ENOMEM;
+18 -8
drivers/of/unittest.c
··· 771 771 struct of_phandle_args args; 772 772 int i, rc; 773 773 774 + if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC) 775 + return; 776 + 774 777 np = of_find_node_by_path("/testcase-data/interrupts/interrupts0"); 775 778 if (!np) { 776 779 pr_err("missing testcase data\n"); ··· 847 844 struct device_node *np; 848 845 struct of_phandle_args args; 849 846 int i, rc; 847 + 848 + if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC) 849 + return; 850 850 851 851 np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0"); 852 852 if (!np) { ··· 1007 1001 pdev = of_find_device_by_node(np); 1008 1002 unittest(pdev, "device 1 creation failed\n"); 1009 1003 1010 - irq = platform_get_irq(pdev, 0); 1011 - unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq); 1004 + if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) { 1005 + irq = platform_get_irq(pdev, 0); 1006 + unittest(irq == -EPROBE_DEFER, 1007 + "device deferred probe failed - %d\n", irq); 1012 1008 1013 - /* Test that a parsing failure does not return -EPROBE_DEFER */ 1014 - np = of_find_node_by_path("/testcase-data/testcase-device2"); 1015 - pdev = of_find_device_by_node(np); 1016 - unittest(pdev, "device 2 creation failed\n"); 1017 - irq = platform_get_irq(pdev, 0); 1018 - unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq); 1009 + /* Test that a parsing failure does not return -EPROBE_DEFER */ 1010 + np = of_find_node_by_path("/testcase-data/testcase-device2"); 1011 + pdev = of_find_device_by_node(np); 1012 + unittest(pdev, "device 2 creation failed\n"); 1013 + irq = platform_get_irq(pdev, 0); 1014 + unittest(irq < 0 && irq != -EPROBE_DEFER, 1015 + "device parsing error failed - %d\n", irq); 1016 + } 1019 1017 1020 1018 np = of_find_node_by_path("/testcase-data/platform-tests"); 1021 1019 unittest(np, "No testcase data in device tree\n");
+2 -2
drivers/pci/controller/pcie-cadence.c
··· 180 180 return 0; 181 181 } 182 182 183 - phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL); 183 + phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); 184 184 if (!phy) 185 185 return -ENOMEM; 186 186 187 - link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL); 187 + link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); 188 188 if (!link) 189 189 return -ENOMEM; 190 190
+12 -1
drivers/pinctrl/pinctrl-mcp23s08.c
··· 636 636 return err; 637 637 } 638 638 639 + return 0; 640 + } 641 + 642 + static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp) 643 + { 644 + struct gpio_chip *chip = &mcp->chip; 645 + int err; 646 + 639 647 err = gpiochip_irqchip_add_nested(chip, 640 648 &mcp23s08_irq_chip, 641 649 0, ··· 920 912 } 921 913 922 914 if (mcp->irq && mcp->irq_controller) { 923 - ret = mcp23s08_irq_setup(mcp); 915 + ret = mcp23s08_irqchip_setup(mcp); 924 916 if (ret) 925 917 goto fail; 926 918 } ··· 951 943 ret = PTR_ERR(mcp->pctldev); 952 944 goto fail; 953 945 } 946 + 947 + if (mcp->irq) 948 + ret = mcp23s08_irq_setup(mcp); 954 949 955 950 fail: 956 951 if (ret < 0)
+1 -1
drivers/platform/chrome/cros_ec_proto.c
··· 520 520 ret = cros_ec_cmd_xfer(ec_dev, msg); 521 521 if (ret > 0) { 522 522 ec_dev->event_size = ret - 1; 523 - memcpy(&ec_dev->event_data, msg->data, ec_dev->event_size); 523 + memcpy(&ec_dev->event_data, msg->data, ret); 524 524 } 525 525 526 526 return ret;
+8 -3
drivers/s390/char/sclp_early_core.c
··· 210 210 * Output one or more lines of text on the SCLP console (VT220 and / 211 211 * or line-mode). 212 212 */ 213 - void __sclp_early_printk(const char *str, unsigned int len) 213 + void __sclp_early_printk(const char *str, unsigned int len, unsigned int force) 214 214 { 215 215 int have_linemode, have_vt220; 216 216 217 - if (sclp_init_state != sclp_init_state_uninitialized) 217 + if (!force && sclp_init_state != sclp_init_state_uninitialized) 218 218 return; 219 219 if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0) 220 220 return; ··· 227 227 228 228 void sclp_early_printk(const char *str) 229 229 { 230 - __sclp_early_printk(str, strlen(str)); 230 + __sclp_early_printk(str, strlen(str), 0); 231 + } 232 + 233 + void sclp_early_printk_force(const char *str) 234 + { 235 + __sclp_early_printk(str, strlen(str), 1); 231 236 }
+1 -1
drivers/s390/cio/vfio_ccw_cp.c
··· 163 163 164 164 for (i = 0; i < pat->pat_nr; i++, pa++) 165 165 for (j = 0; j < pa->pa_nr; j++) 166 - if (pa->pa_iova_pfn[i] == iova_pfn) 166 + if (pa->pa_iova_pfn[j] == iova_pfn) 167 167 return true; 168 168 169 169 return false;
+23 -1
drivers/s390/cio/vfio_ccw_drv.c
··· 22 22 #include "vfio_ccw_private.h" 23 23 24 24 struct workqueue_struct *vfio_ccw_work_q; 25 + struct kmem_cache *vfio_ccw_io_region; 25 26 26 27 /* 27 28 * Helpers ··· 80 79 cp_update_scsw(&private->cp, &irb->scsw); 81 80 cp_free(&private->cp); 82 81 } 83 - memcpy(private->io_region.irb_area, irb, sizeof(*irb)); 82 + memcpy(private->io_region->irb_area, irb, sizeof(*irb)); 84 83 85 84 if (private->io_trigger) 86 85 eventfd_signal(private->io_trigger, 1); ··· 115 114 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 116 115 if (!private) 117 116 return -ENOMEM; 117 + 118 + private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, 119 + GFP_KERNEL | GFP_DMA); 120 + if (!private->io_region) { 121 + kfree(private); 122 + return -ENOMEM; 123 + } 124 + 118 125 private->sch = sch; 119 126 dev_set_drvdata(&sch->dev, private); 120 127 ··· 148 139 cio_disable_subchannel(sch); 149 140 out_free: 150 141 dev_set_drvdata(&sch->dev, NULL); 142 + kmem_cache_free(vfio_ccw_io_region, private->io_region); 151 143 kfree(private); 152 144 return ret; 153 145 } ··· 163 153 164 154 dev_set_drvdata(&sch->dev, NULL); 165 155 156 + kmem_cache_free(vfio_ccw_io_region, private->io_region); 166 157 kfree(private); 167 158 168 159 return 0; ··· 243 232 if (!vfio_ccw_work_q) 244 233 return -ENOMEM; 245 234 235 + vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region", 236 + sizeof(struct ccw_io_region), 0, 237 + SLAB_ACCOUNT, 0, 238 + sizeof(struct ccw_io_region), NULL); 239 + if (!vfio_ccw_io_region) { 240 + destroy_workqueue(vfio_ccw_work_q); 241 + return -ENOMEM; 242 + } 243 + 246 244 isc_register(VFIO_CCW_ISC); 247 245 ret = css_driver_register(&vfio_ccw_sch_driver); 248 246 if (ret) { 249 247 isc_unregister(VFIO_CCW_ISC); 248 + kmem_cache_destroy(vfio_ccw_io_region); 250 249 destroy_workqueue(vfio_ccw_work_q); 251 250 } 252 251 ··· 267 246 { 268 247 css_driver_unregister(&vfio_ccw_sch_driver); 269 248 isc_unregister(VFIO_CCW_ISC); 249 + kmem_cache_destroy(vfio_ccw_io_region); 270 250 destroy_workqueue(vfio_ccw_work_q); 271 251 } 272 252 module_init(vfio_ccw_sch_init);
+3 -3
drivers/s390/cio/vfio_ccw_fsm.c
··· 93 93 enum vfio_ccw_event event) 94 94 { 95 95 pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state); 96 - private->io_region.ret_code = -EIO; 96 + private->io_region->ret_code = -EIO; 97 97 } 98 98 99 99 static void fsm_io_busy(struct vfio_ccw_private *private, 100 100 enum vfio_ccw_event event) 101 101 { 102 - private->io_region.ret_code = -EBUSY; 102 + private->io_region->ret_code = -EBUSY; 103 103 } 104 104 105 105 static void fsm_disabled_irq(struct vfio_ccw_private *private, ··· 126 126 { 127 127 union orb *orb; 128 128 union scsw *scsw = &private->scsw; 129 - struct ccw_io_region *io_region = &private->io_region; 129 + struct ccw_io_region *io_region = private->io_region; 130 130 struct mdev_device *mdev = private->mdev; 131 131 char *errstr = "request"; 132 132
+2 -2
drivers/s390/cio/vfio_ccw_ops.c
··· 174 174 return -EINVAL; 175 175 176 176 private = dev_get_drvdata(mdev_parent_dev(mdev)); 177 - region = &private->io_region; 177 + region = private->io_region; 178 178 if (copy_to_user(buf, (void *)region + *ppos, count)) 179 179 return -EFAULT; 180 180 ··· 196 196 if (private->state != VFIO_CCW_STATE_IDLE) 197 197 return -EACCES; 198 198 199 - region = &private->io_region; 199 + region = private->io_region; 200 200 if (copy_from_user((void *)region + *ppos, buf, count)) 201 201 return -EFAULT; 202 202
+1 -1
drivers/s390/cio/vfio_ccw_private.h
··· 41 41 atomic_t avail; 42 42 struct mdev_device *mdev; 43 43 struct notifier_block nb; 44 - struct ccw_io_region io_region; 44 + struct ccw_io_region *io_region; 45 45 46 46 struct channel_program cp; 47 47 struct irb irb;
+1 -10
drivers/sbus/char/openprom.c
··· 715 715 716 716 static int __init openprom_init(void) 717 717 { 718 - struct device_node *dp; 719 718 int err; 720 719 721 720 err = misc_register(&openprom_dev); 722 721 if (err) 723 722 return err; 724 723 725 - dp = of_find_node_by_path("/"); 726 - dp = dp->child; 727 - while (dp) { 728 - if (!strcmp(dp->name, "options")) 729 - break; 730 - dp = dp->sibling; 731 - } 732 - options_node = dp; 733 - 724 + options_node = of_get_child_by_name(of_find_node_by_path("/"), "options"); 734 725 if (!options_node) { 735 726 misc_deregister(&openprom_dev); 736 727 return -EIO;
+1 -2
drivers/sbus/char/oradax.c
··· 689 689 alloc_error: 690 690 kfree(ctx->ccb_buf); 691 691 done: 692 - if (ctx != NULL) 693 - kfree(ctx); 692 + kfree(ctx); 694 693 return -ENOMEM; 695 694 } 696 695
+1
drivers/scsi/qedi/qedi_main.c
··· 2472 2472 /* start qedi context */ 2473 2473 spin_lock_init(&qedi->hba_lock); 2474 2474 spin_lock_init(&qedi->task_idx_lock); 2475 + mutex_init(&qedi->stats_lock); 2475 2476 } 2476 2477 qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi); 2477 2478 qedi_ops->ll2->start(qedi->cdev, &params);
+11
drivers/soc/fsl/qbman/bman_ccsr.c
··· 120 120 */ 121 121 static dma_addr_t fbpr_a; 122 122 static size_t fbpr_sz; 123 + static int __bman_probed; 123 124 124 125 static int bman_fbpr(struct reserved_mem *rmem) 125 126 { ··· 167 166 return IRQ_HANDLED; 168 167 } 169 168 169 + int bman_is_probed(void) 170 + { 171 + return __bman_probed; 172 + } 173 + EXPORT_SYMBOL_GPL(bman_is_probed); 174 + 170 175 static int fsl_bman_probe(struct platform_device *pdev) 171 176 { 172 177 int ret, err_irq; ··· 181 174 struct resource *res; 182 175 u16 id, bm_pool_cnt; 183 176 u8 major, minor; 177 + 178 + __bman_probed = -1; 184 179 185 180 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 186 181 if (!res) { ··· 263 254 0, bm_pool_cnt - 1, ret); 264 255 return ret; 265 256 } 257 + 258 + __bman_probed = 1; 266 259 267 260 return 0; 268 261 };
+11
drivers/soc/fsl/qbman/qman_ccsr.c
··· 273 273 static u32 __iomem *qm_ccsr_start; 274 274 /* A SDQCR mask comprising all the available/visible pool channels */ 275 275 static u32 qm_pools_sdqcr; 276 + static int __qman_probed; 276 277 277 278 static inline u32 qm_ccsr_in(u32 offset) 278 279 { ··· 687 686 return 0; 688 687 } 689 688 689 + int qman_is_probed(void) 690 + { 691 + return __qman_probed; 692 + } 693 + EXPORT_SYMBOL_GPL(qman_is_probed); 694 + 690 695 static int fsl_qman_probe(struct platform_device *pdev) 691 696 { 692 697 struct device *dev = &pdev->dev; ··· 701 694 int ret, err_irq; 702 695 u16 id; 703 696 u8 major, minor; 697 + 698 + __qman_probed = -1; 704 699 705 700 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 706 701 if (!res) { ··· 836 827 ret = qman_wq_alloc(); 837 828 if (ret) 838 829 return ret; 830 + 831 + __qman_probed = 1; 839 832 840 833 return 0; 841 834 }
+8
drivers/soc/fsl/qbman/qman_portal.c
··· 227 227 int irq, cpu, err; 228 228 u32 val; 229 229 230 + err = qman_is_probed(); 231 + if (!err) 232 + return -EPROBE_DEFER; 233 + if (err < 0) { 234 + dev_err(&pdev->dev, "failing probe due to qman probe error\n"); 235 + return -ENODEV; 236 + } 237 + 230 238 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); 231 239 if (!pcfg) 232 240 return -ENOMEM;
+20 -29
drivers/thunderbolt/icm.c
··· 738 738 u8 link, depth; 739 739 u64 route; 740 740 741 - /* 742 - * After NVM upgrade adding root switch device fails because we 743 - * initiated reset. During that time ICM might still send 744 - * XDomain connected message which we ignore here. 745 - */ 746 - if (!tb->root_switch) 747 - return; 748 - 749 741 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 750 742 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 751 743 ICM_LINK_INFO_DEPTH_SHIFT; ··· 1027 1035 * packet for now. 1028 1036 */ 1029 1037 if (pkg->hdr.packet_id) 1030 - return; 1031 - 1032 - /* 1033 - * After NVM upgrade adding root switch device fails because we 1034 - * initiated reset. During that time ICM might still send device 1035 - * connected message which we ignore here. 1036 - */ 1037 - if (!tb->root_switch) 1038 1038 return; 1039 1039 1040 1040 route = get_route(pkg->route_hi, pkg->route_lo); ··· 1392 1408 1393 1409 mutex_lock(&tb->lock); 1394 1410 1395 - switch (n->pkg->code) { 1396 - case ICM_EVENT_DEVICE_CONNECTED: 1397 - icm->device_connected(tb, n->pkg); 1398 - break; 1399 - case ICM_EVENT_DEVICE_DISCONNECTED: 1400 - icm->device_disconnected(tb, n->pkg); 1401 - break; 1402 - case ICM_EVENT_XDOMAIN_CONNECTED: 1403 - icm->xdomain_connected(tb, n->pkg); 1404 - break; 1405 - case ICM_EVENT_XDOMAIN_DISCONNECTED: 1406 - icm->xdomain_disconnected(tb, n->pkg); 1407 - break; 1411 + /* 1412 + * When the domain is stopped we flush its workqueue but before 1413 + * that the root switch is removed. In that case we should treat 1414 + * the queued events as being canceled. 1415 + */ 1416 + if (tb->root_switch) { 1417 + switch (n->pkg->code) { 1418 + case ICM_EVENT_DEVICE_CONNECTED: 1419 + icm->device_connected(tb, n->pkg); 1420 + break; 1421 + case ICM_EVENT_DEVICE_DISCONNECTED: 1422 + icm->device_disconnected(tb, n->pkg); 1423 + break; 1424 + case ICM_EVENT_XDOMAIN_CONNECTED: 1425 + icm->xdomain_connected(tb, n->pkg); 1426 + break; 1427 + case ICM_EVENT_XDOMAIN_DISCONNECTED: 1428 + icm->xdomain_disconnected(tb, n->pkg); 1429 + break; 1430 + } 1408 1431 } 1409 1432 1410 1433 mutex_unlock(&tb->lock);
+1 -1
drivers/thunderbolt/nhi.c
··· 1191 1191 tb_domain_exit(); 1192 1192 } 1193 1193 1194 - fs_initcall(nhi_init); 1194 + rootfs_initcall(nhi_init); 1195 1195 module_exit(nhi_unload);
-4
drivers/tty/serial/8250/8250_dw.c
··· 630 630 if (!data->skip_autocfg) 631 631 dw8250_setup_port(p); 632 632 633 - #ifdef CONFIG_PM 634 - uart.capabilities |= UART_CAP_RPM; 635 - #endif 636 - 637 633 /* If we have a valid fifosize, try hooking up DMA */ 638 634 if (p->fifosize) { 639 635 data->dma.rxconf.src_maxburst = p->fifosize / 4;
+2 -2
drivers/tty/serial/qcom_geni_serial.c
··· 868 868 geni_se_init(&port->se, port->rx_wm, port->rx_rfr); 869 869 geni_se_select_mode(&port->se, port->xfer_mode); 870 870 if (!uart_console(uport)) { 871 - port->rx_fifo = devm_kzalloc(uport->dev, 872 - port->rx_fifo_depth * sizeof(u32), GFP_KERNEL); 871 + port->rx_fifo = devm_kcalloc(uport->dev, 872 + port->rx_fifo_depth, sizeof(u32), GFP_KERNEL); 873 873 if (!port->rx_fifo) 874 874 return -ENOMEM; 875 875 }
+41 -15
drivers/tty/serial/sh-sci.c
··· 292 292 }, 293 293 294 294 /* 295 + * The "SCIFA" that is in RZ/T and RZ/A2. 296 + * It looks like a normal SCIF with FIFO data, but with a 297 + * compressed address space. Also, the break out of interrupts 298 + * are different: ERI/BRI, RXI, TXI, TEI, DRI. 299 + */ 300 + [SCIx_RZ_SCIFA_REGTYPE] = { 301 + .regs = { 302 + [SCSMR] = { 0x00, 16 }, 303 + [SCBRR] = { 0x02, 8 }, 304 + [SCSCR] = { 0x04, 16 }, 305 + [SCxTDR] = { 0x06, 8 }, 306 + [SCxSR] = { 0x08, 16 }, 307 + [SCxRDR] = { 0x0A, 8 }, 308 + [SCFCR] = { 0x0C, 16 }, 309 + [SCFDR] = { 0x0E, 16 }, 310 + [SCSPTR] = { 0x10, 16 }, 311 + [SCLSR] = { 0x12, 16 }, 312 + }, 313 + .fifosize = 16, 314 + .overrun_reg = SCLSR, 315 + .overrun_mask = SCLSR_ORER, 316 + .sampling_rate_mask = SCI_SR(32), 317 + .error_mask = SCIF_DEFAULT_ERROR_MASK, 318 + .error_clear = SCIF_ERROR_CLEAR, 319 + }, 320 + 321 + /* 295 322 * Common SH-3 SCIF definitions. 296 323 */ 297 324 [SCIx_SH3_SCIF_REGTYPE] = { ··· 346 319 [SCIx_SH4_SCIF_REGTYPE] = { 347 320 .regs = { 348 321 [SCSMR] = { 0x00, 16 }, 349 - [SCBRR] = { 0x02, 8 }, 350 - [SCSCR] = { 0x04, 16 }, 351 - [SCxTDR] = { 0x06, 8 }, 352 - [SCxSR] = { 0x08, 16 }, 353 - [SCxRDR] = { 0x0a, 8 }, 354 - [SCFCR] = { 0x0c, 16 }, 355 - [SCFDR] = { 0x0e, 16 }, 356 - [SCSPTR] = { 0x10, 16 }, 357 - [SCLSR] = { 0x12, 16 }, 322 + [SCBRR] = { 0x04, 8 }, 323 + [SCSCR] = { 0x08, 16 }, 324 + [SCxTDR] = { 0x0c, 8 }, 325 + [SCxSR] = { 0x10, 16 }, 326 + [SCxRDR] = { 0x14, 8 }, 327 + [SCFCR] = { 0x18, 16 }, 328 + [SCFDR] = { 0x1c, 16 }, 329 + [SCSPTR] = { 0x20, 16 }, 330 + [SCLSR] = { 0x24, 16 }, 358 331 }, 359 332 .fifosize = 16, 360 333 .overrun_reg = SCLSR, ··· 2837 2810 { 2838 2811 struct uart_port *port = &sci_port->port; 2839 2812 const struct resource *res; 2840 - unsigned int i, regtype; 2813 + unsigned int i; 2841 2814 int ret; 2842 2815 2843 2816 sci_port->cfg = p; ··· 2874 2847 if (unlikely(sci_port->params == NULL)) 2875 2848 return -EINVAL; 2876 2849 2877 - regtype = sci_port->params - sci_port_params; 2878 2850 switch (p->type) { 2879 2851 case PORT_SCIFB: 2880 2852 sci_port->rx_trigger = 48; ··· 2927 2901 else 2928 2902 port->regshift = 1; 2929 2903 } 2930 - 2931 - if (regtype == SCIx_SH4_SCIF_REGTYPE) 2932 - if (sci_port->reg_size >= 0x20) 2933 - port->regshift = 1; 2934 2904 2935 2905 /* 2936 2906 * The UART port needs an IRQ value, so we peg this to the RX IRQ ··· 3131 3109 { 3132 3110 .compatible = "renesas,scif-r7s72100", 3133 3111 .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE), 3112 + }, 3113 + { 3114 + .compatible = "renesas,scif-r7s9210", 3115 + .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE), 3134 3116 }, 3135 3117 /* Family-specific types */ 3136 3118 {
+6
drivers/usb/class/cdc-acm.c
··· 1514 1514 { 1515 1515 struct acm *acm = usb_get_intfdata(intf); 1516 1516 struct tty_struct *tty; 1517 + int i; 1517 1518 1518 1519 /* sibling interface is already cleaning up */ 1519 1520 if (!acm) ··· 1545 1544 1546 1545 tty_unregister_device(acm_tty_driver, acm->minor); 1547 1546 1547 + usb_free_urb(acm->ctrlurb); 1548 + for (i = 0; i < ACM_NW; i++) 1549 + usb_free_urb(acm->wb[i].urb); 1550 + for (i = 0; i < acm->rx_buflimit; i++) 1551 + usb_free_urb(acm->read_urbs[i]); 1548 1552 acm_write_buffers_free(acm); 1549 1553 usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); 1550 1554 acm_read_buffers_free(acm);
+2 -2
drivers/usb/host/xhci-mtk.c
··· 642 642 xhci_mtk_host_enable(mtk); 643 643 644 644 xhci_dbg(xhci, "%s: restart port polling\n", __func__); 645 - set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 646 - usb_hcd_poll_rh_status(hcd); 647 645 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 648 646 usb_hcd_poll_rh_status(xhci->shared_hcd); 647 + set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 648 + usb_hcd_poll_rh_status(hcd); 649 649 return 0; 650 650 } 651 651
+2
drivers/usb/host/xhci-pci.c
··· 185 185 } 186 186 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 187 187 (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 188 + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || 189 + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || 188 190 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || 189 191 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) 190 192 xhci->quirks |= XHCI_MISSING_CAS;
+13 -2
drivers/usb/serial/option.c
··· 561 561 /* Interface is reserved */ 562 562 #define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0) 563 563 564 + /* Interface must have two endpoints */ 565 + #define NUMEP2 BIT(16) 566 + 564 567 565 568 static const struct usb_device_id option_ids[] = { 566 569 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, ··· 1084 1081 .driver_info = RSVD(4) }, 1085 1082 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), 1086 1083 .driver_info = RSVD(4) }, 1087 - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06), 1088 - .driver_info = RSVD(4) | RSVD(5) }, 1084 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), 1085 + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, 1086 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, 1089 1087 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1090 1088 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1091 1089 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), ··· 2001 1997 * the Windows driver .INF files for reserved interface numbers. 2002 1998 */ 2003 1999 if (device_flags & RSVD(iface_desc->bInterfaceNumber)) 2000 + return -ENODEV; 2001 + 2002 + /* 2003 + * Allow matching on bNumEndpoints for devices whose interface numbers 2004 + * can change (e.g. Quectel EP06). 2005 + */ 2006 + if (device_flags & NUMEP2 && iface_desc->bNumEndpoints != 2) 2004 2007 return -ENODEV; 2005 2008 2006 2009 /* Store the device flags so we can use them during attach. */
+2 -1
drivers/usb/serial/usb-serial-simple.c
··· 84 84 85 85 /* Motorola Tetra driver */ 86 86 #define MOTOROLA_TETRA_IDS() \ 87 - { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */ 87 + { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ 88 + { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */ 88 89 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); 89 90 90 91 /* Novatel Wireless GPS driver */
+2 -1
drivers/video/fbdev/aty/atyfb.h
··· 333 333 extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll); 334 334 extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par); 335 335 336 + extern const u8 aty_postdividers[8]; 337 + 336 338 337 339 /* 338 340 * Hardware cursor support ··· 361 359 362 360 extern void aty_reset_engine(const struct atyfb_par *par); 363 361 extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info); 364 - extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par); 365 362 366 363 void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); 367 364 void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+4 -3
drivers/video/fbdev/aty/atyfb_base.c
··· 3087 3087 /* 3088 3088 * PLL Reference Divider M: 3089 3089 */ 3090 - M = pll_regs[2]; 3090 + M = pll_regs[PLL_REF_DIV]; 3091 3091 3092 3092 /* 3093 3093 * PLL Feedback Divider N (Dependent on CLOCK_CNTL): 3094 3094 */ 3095 - N = pll_regs[7 + (clock_cntl & 3)]; 3095 + N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)]; 3096 3096 3097 3097 /* 3098 3098 * PLL Post Divider P (Dependent on CLOCK_CNTL): 3099 3099 */ 3100 - P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1)); 3100 + P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) | 3101 + ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)]; 3101 3102 3102 3103 /* 3103 3104 * PLL Divider Q:
+5 -5
drivers/video/fbdev/aty/mach64_ct.c
··· 115 115 */ 116 116 117 117 #define Maximum_DSP_PRECISION 7 118 - static u8 postdividers[] = {1,2,4,8,3}; 118 + const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12}; 119 119 120 120 static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll) 121 121 { ··· 222 222 pll->vclk_post_div += (q < 64*8); 223 223 pll->vclk_post_div += (q < 32*8); 224 224 } 225 - pll->vclk_post_div_real = postdividers[pll->vclk_post_div]; 225 + pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div]; 226 226 // pll->vclk_post_div <<= 6; 227 227 pll->vclk_fb_div = q * pll->vclk_post_div_real / 8; 228 228 pllvclk = (1000000 * 2 * pll->vclk_fb_div) / ··· 513 513 u8 mclk_fb_div, pll_ext_cntl; 514 514 pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par); 515 515 pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par); 516 - pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07]; 516 + pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07]; 517 517 mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par); 518 518 if (pll_ext_cntl & PLL_MFB_TIMES_4_2B) 519 519 mclk_fb_div <<= 1; ··· 535 535 xpost_div += (q < 64*8); 536 536 xpost_div += (q < 32*8); 537 537 } 538 - pll->ct.xclk_post_div_real = postdividers[xpost_div]; 538 + pll->ct.xclk_post_div_real = aty_postdividers[xpost_div]; 539 539 pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8; 540 540 541 541 #ifdef CONFIG_PPC ··· 584 584 mpost_div += (q < 64*8); 585 585 mpost_div += (q < 32*8); 586 586 } 587 - sclk_post_div_real = postdividers[mpost_div]; 587 + sclk_post_div_real = aty_postdividers[mpost_div]; 588 588 pll->ct.sclk_fb_div = q * sclk_post_div_real / 8; 589 589 pll->ct.spll_cntl2 = mpost_div << 4; 590 590 #ifdef DEBUG
+15 -2
fs/afs/cell.c
··· 514 514 */ 515 515 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) 516 516 { 517 + struct hlist_node **p; 518 + struct afs_cell *pcell; 517 519 int ret; 518 520 519 521 if (!cell->anonymous_key) { ··· 536 534 return ret; 537 535 538 536 mutex_lock(&net->proc_cells_lock); 539 - list_add_tail(&cell->proc_link, &net->proc_cells); 537 + for (p = &net->proc_cells.first; *p; p = &(*p)->next) { 538 + pcell = hlist_entry(*p, struct afs_cell, proc_link); 539 + if (strcmp(cell->name, pcell->name) < 0) 540 + break; 541 + } 542 + 543 + cell->proc_link.pprev = p; 544 + cell->proc_link.next = *p; 545 + rcu_assign_pointer(*p, &cell->proc_link.next); 546 + if (cell->proc_link.next) 547 + cell->proc_link.next->pprev = &cell->proc_link.next; 548 + 540 549 afs_dynroot_mkdir(net, cell); 541 550 mutex_unlock(&net->proc_cells_lock); 542 551 return 0; ··· 563 550 afs_proc_cell_remove(cell); 564 551 565 552 mutex_lock(&net->proc_cells_lock); 566 - list_del_init(&cell->proc_link); 553 + hlist_del_rcu(&cell->proc_link); 567 554 afs_dynroot_rmdir(net, cell); 568 555 mutex_unlock(&net->proc_cells_lock); 569 556
+1 -1
fs/afs/dynroot.c
··· 265 265 return -ERESTARTSYS; 266 266 267 267 net->dynroot_sb = sb; 268 - list_for_each_entry(cell, &net->proc_cells, proc_link) { 268 + hlist_for_each_entry(cell, &net->proc_cells, proc_link) { 269 269 ret = afs_dynroot_mkdir(net, cell); 270 270 if (ret < 0) 271 271 goto error;
+2 -2
fs/afs/internal.h
··· 244 244 seqlock_t cells_lock; 245 245 246 246 struct mutex proc_cells_lock; 247 - struct list_head proc_cells; 247 + struct hlist_head proc_cells; 248 248 249 249 /* Known servers. Theoretically each fileserver can only be in one 250 250 * cell, but in practice, people create aliases and subsets and there's ··· 322 322 struct afs_net *net; 323 323 struct key *anonymous_key; /* anonymous user key for this cell */ 324 324 struct work_struct manager; /* Manager for init/deinit/dns */ 325 - struct list_head proc_link; /* /proc cell list link */ 325 + struct hlist_node proc_link; /* /proc cell list link */ 326 326 #ifdef CONFIG_AFS_FSCACHE 327 327 struct fscache_cookie *cache; /* caching cookie */ 328 328 #endif
+1 -1
fs/afs/main.c
··· 87 87 timer_setup(&net->cells_timer, afs_cells_timer, 0); 88 88 89 89 mutex_init(&net->proc_cells_lock); 90 - INIT_LIST_HEAD(&net->proc_cells); 90 + INIT_HLIST_HEAD(&net->proc_cells); 91 91 92 92 seqlock_init(&net->fs_lock); 93 93 net->fs_servers = RB_ROOT;
+3 -4
fs/afs/proc.c
··· 33 33 static int afs_proc_cells_show(struct seq_file *m, void *v) 34 34 { 35 35 struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link); 36 - struct afs_net *net = afs_seq2net(m); 37 36 38 - if (v == &net->proc_cells) { 37 + if (v == SEQ_START_TOKEN) { 39 38 /* display header on line 1 */ 40 39 seq_puts(m, "USE NAME\n"); 41 40 return 0; ··· 49 50 __acquires(rcu) 50 51 { 51 52 rcu_read_lock(); 52 - return seq_list_start_head(&afs_seq2net(m)->proc_cells, *_pos); 53 + return seq_hlist_start_head_rcu(&afs_seq2net(m)->proc_cells, *_pos); 53 54 } 54 55 55 56 static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos) 56 57 { 57 - return seq_list_next(v, &afs_seq2net(m)->proc_cells, pos); 58 + return seq_hlist_next_rcu(v, &afs_seq2net(m)->proc_cells, pos); 58 59 } 59 60 60 61 static void afs_proc_cells_stop(struct seq_file *m, void *v)
+2
fs/afs/server.c
··· 199 199 200 200 write_sequnlock(&net->fs_addr_lock); 201 201 ret = 0; 202 + goto out; 202 203 203 204 exists: 204 205 afs_get_server(server); 206 + out: 205 207 write_sequnlock(&net->fs_lock); 206 208 return server; 207 209 }
+4
fs/gfs2/bmap.c
··· 975 975 { 976 976 struct gfs2_inode *ip = GFS2_I(inode); 977 977 978 + if (!page_has_buffers(page)) { 979 + create_empty_buffers(page, inode->i_sb->s_blocksize, 980 + (1 << BH_Dirty)|(1 << BH_Uptodate)); 981 + } 978 982 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied); 979 983 } 980 984
+165 -35
fs/xfs/xfs_reflink.c
··· 1220 1220 return 0; 1221 1221 } 1222 1222 1223 + /* Unlock both inodes after they've been prepped for a range clone. */ 1224 + STATIC void 1225 + xfs_reflink_remap_unlock( 1226 + struct file *file_in, 1227 + struct file *file_out) 1228 + { 1229 + struct inode *inode_in = file_inode(file_in); 1230 + struct xfs_inode *src = XFS_I(inode_in); 1231 + struct inode *inode_out = file_inode(file_out); 1232 + struct xfs_inode *dest = XFS_I(inode_out); 1233 + bool same_inode = (inode_in == inode_out); 1234 + 1235 + xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); 1236 + if (!same_inode) 1237 + xfs_iunlock(src, XFS_MMAPLOCK_SHARED); 1238 + inode_unlock(inode_out); 1239 + if (!same_inode) 1240 + inode_unlock_shared(inode_in); 1241 + } 1242 + 1223 1243 /* 1224 - * Link a range of blocks from one file to another. 1244 + * If we're reflinking to a point past the destination file's EOF, we must 1245 + * zero any speculative post-EOF preallocations that sit between the old EOF 1246 + * and the destination file offset. 1225 1247 */ 1226 - int 1227 - xfs_reflink_remap_range( 1248 + static int 1249 + xfs_reflink_zero_posteof( 1250 + struct xfs_inode *ip, 1251 + loff_t pos) 1252 + { 1253 + loff_t isize = i_size_read(VFS_I(ip)); 1254 + 1255 + if (pos <= isize) 1256 + return 0; 1257 + 1258 + trace_xfs_zero_eof(ip, isize, pos - isize); 1259 + return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL, 1260 + &xfs_iomap_ops); 1261 + } 1262 + 1263 + /* 1264 + * Prepare two files for range cloning. Upon a successful return both inodes 1265 + * will have the iolock and mmaplock held, the page cache of the out file will 1266 + * be truncated, and any leases on the out file will have been broken. This 1267 + * function borrows heavily from xfs_file_aio_write_checks. 1268 + * 1269 + * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't 1270 + * checked that the bytes beyond EOF physically match. Hence we cannot use the 1271 + * EOF block in the source dedupe range because it's not a complete block match, 1272 + * hence can introduce a corruption into the file that has it's block replaced. 1273 + * 1274 + * In similar fashion, the VFS file cloning also allows partial EOF blocks to be 1275 + * "block aligned" for the purposes of cloning entire files. However, if the 1276 + * source file range includes the EOF block and it lands within the existing EOF 1277 + * of the destination file, then we can expose stale data from beyond the source 1278 + * file EOF in the destination file. 1279 + * 1280 + * XFS doesn't support partial block sharing, so in both cases we have check 1281 + * these cases ourselves. For dedupe, we can simply round the length to dedupe 1282 + * down to the previous whole block and ignore the partial EOF block. While this 1283 + * means we can't dedupe the last block of a file, this is an acceptible 1284 + * tradeoff for simplicity on implementation. 1285 + * 1286 + * For cloning, we want to share the partial EOF block if it is also the new EOF 1287 + * block of the destination file. If the partial EOF block lies inside the 1288 + * existing destination EOF, then we have to abort the clone to avoid exposing 1289 + * stale data in the destination file. Hence we reject these clone attempts with 1290 + * -EINVAL in this case. 1291 + */ 1292 + STATIC int 1293 + xfs_reflink_remap_prep( 1228 1294 struct file *file_in, 1229 1295 loff_t pos_in, 1230 1296 struct file *file_out, 1231 1297 loff_t pos_out, 1232 - u64 len, 1298 + u64 *len, 1233 1299 bool is_dedupe) 1234 1300 { 1235 1301 struct inode *inode_in = file_inode(file_in); 1236 1302 struct xfs_inode *src = XFS_I(inode_in); 1237 1303 struct inode *inode_out = file_inode(file_out); 1238 1304 struct xfs_inode *dest = XFS_I(inode_out); 1239 - struct xfs_mount *mp = src->i_mount; 1240 1305 bool same_inode = (inode_in == inode_out); 1241 - xfs_fileoff_t sfsbno, dfsbno; 1242 - xfs_filblks_t fsblen; 1243 - xfs_extlen_t cowextsize; 1306 + u64 blkmask = i_blocksize(inode_in) - 1; 1244 1307 ssize_t ret; 1245 - 1246 - if (!xfs_sb_version_hasreflink(&mp->m_sb)) 1247 - return -EOPNOTSUPP; 1248 - 1249 - if (XFS_FORCED_SHUTDOWN(mp)) 1250 - return -EIO; 1251 1308 1252 1309 /* Lock both files against IO */ 1253 1310 ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out); ··· 1327 1270 goto out_unlock; 1328 1271 1329 1272 ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out, 1330 - &len, is_dedupe); 1273 + len, is_dedupe); 1331 1274 if (ret <= 0) 1332 1275 goto out_unlock; 1276 + 1277 + /* 1278 + * If the dedupe data matches, chop off the partial EOF block 1279 + * from the source file so we don't try to dedupe the partial 1280 + * EOF block. 1281 + */ 1282 + if (is_dedupe) { 1283 + *len &= ~blkmask; 1284 + } else if (*len & blkmask) { 1285 + /* 1286 + * The user is attempting to share a partial EOF block, 1287 + * if it's inside the destination EOF then reject it. 1288 + */ 1289 + if (pos_out + *len < i_size_read(inode_out)) { 1290 + ret = -EINVAL; 1291 + goto out_unlock; 1292 + } 1293 + } 1333 1294 1334 1295 /* Attach dquots to dest inode before changing block map */ 1335 1296 ret = xfs_qm_dqattach(dest); 1336 1297 if (ret) 1337 1298 goto out_unlock; 1338 1299 1339 - trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out); 1340 - 1341 1300 /* 1342 - * Clear out post-eof preallocations because we don't have page cache 1343 - * backing the delayed allocations and they'll never get freed on 1344 - * their own. 1301 + * Zero existing post-eof speculative preallocations in the destination 1302 + * file. 1345 1303 */ 1346 - if (xfs_can_free_eofblocks(dest, true)) { 1347 - ret = xfs_free_eofblocks(dest); 1348 - if (ret) 1349 - goto out_unlock; 1350 - } 1304 + ret = xfs_reflink_zero_posteof(dest, pos_out); 1305 + if (ret) 1306 + goto out_unlock; 1351 1307 1352 1308 /* Set flags and remap blocks. */ 1353 1309 ret = xfs_reflink_set_inode_flag(src, dest); 1354 1310 if (ret) 1355 1311 goto out_unlock; 1312 + 1313 + /* Zap any page cache for the destination file's range. */ 1314 + truncate_inode_pages_range(&inode_out->i_data, pos_out, 1315 + PAGE_ALIGN(pos_out + *len) - 1); 1316 + 1317 + /* If we're altering the file contents... */ 1318 + if (!is_dedupe) { 1319 + /* 1320 + * ...update the timestamps (which will grab the ilock again 1321 + * from xfs_fs_dirty_inode, so we have to call it before we 1322 + * take the ilock). 1323 + */ 1324 + if (!(file_out->f_mode & FMODE_NOCMTIME)) { 1325 + ret = file_update_time(file_out); 1326 + if (ret) 1327 + goto out_unlock; 1328 + } 1329 + 1330 + /* 1331 + * ...clear the security bits if the process is not being run 1332 + * by root. This keeps people from modifying setuid and setgid 1333 + * binaries. 1334 + */ 1335 + ret = file_remove_privs(file_out); 1336 + if (ret) 1337 + goto out_unlock; 1338 + } 1339 + 1340 + return 1; 1341 + out_unlock: 1342 + xfs_reflink_remap_unlock(file_in, file_out); 1343 + return ret; 1344 + } 1345 + 1346 + /* 1347 + * Link a range of blocks from one file to another. 1348 + */ 1349 + int 1350 + xfs_reflink_remap_range( 1351 + struct file *file_in, 1352 + loff_t pos_in, 1353 + struct file *file_out, 1354 + loff_t pos_out, 1355 + u64 len, 1356 + bool is_dedupe) 1357 + { 1358 + struct inode *inode_in = file_inode(file_in); 1359 + struct xfs_inode *src = XFS_I(inode_in); 1360 + struct inode *inode_out = file_inode(file_out); 1361 + struct xfs_inode *dest = XFS_I(inode_out); 1362 + struct xfs_mount *mp = src->i_mount; 1363 + xfs_fileoff_t sfsbno, dfsbno; 1364 + xfs_filblks_t fsblen; 1365 + xfs_extlen_t cowextsize; 1366 + ssize_t ret; 1367 + 1368 + if (!xfs_sb_version_hasreflink(&mp->m_sb)) 1369 + return -EOPNOTSUPP; 1370 + 1371 + if (XFS_FORCED_SHUTDOWN(mp)) 1372 + return -EIO; 1373 + 1374 + /* Prepare and then clone file data. */ 1375 + ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out, 1376 + &len, is_dedupe); 1377 + if (ret <= 0) 1378 + return ret; 1379 + 1380 + trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out); 1356 1381 1357 1382 dfsbno = XFS_B_TO_FSBT(mp, pos_out); 1358 1383 sfsbno = XFS_B_TO_FSBT(mp, pos_in); ··· 1443 1304 pos_out + len); 1444 1305 if (ret) 1445 1306 goto out_unlock; 1446 - 1447 - /* Zap any page cache for the destination file's range. */ 1448 - truncate_inode_pages_range(&inode_out->i_data, pos_out, 1449 - PAGE_ALIGN(pos_out + len) - 1); 1450 1307 1451 1308 /* 1452 1309 * Carry the cowextsize hint from src to dest if we're sharing the ··· 1460 1325 is_dedupe); 1461 1326 1462 1327 out_unlock: 1463 - xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); 1464 - if (!same_inode) 1465 - xfs_iunlock(src, XFS_MMAPLOCK_SHARED); 1466 - inode_unlock(inode_out); 1467 - if (!same_inode) 1468 - inode_unlock_shared(inode_in); 1328 + xfs_reflink_remap_unlock(file_in, file_out); 1469 1329 if (ret) 1470 1330 trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_); 1471 1331 return ret;
+3 -3
include/asm-generic/vmlinux.lds.h
··· 68 68 */ 69 69 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 70 70 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* 71 - #define DATA_MAIN .data .data.[0-9a-zA-Z_]* 71 + #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* 72 72 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* 73 73 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* 74 74 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* ··· 613 613 614 614 #define EXIT_DATA \ 615 615 *(.exit.data .exit.data.*) \ 616 - *(.fini_array) \ 617 - *(.dtors) \ 616 + *(.fini_array .fini_array.*) \ 617 + *(.dtors .dtors.*) \ 618 618 MEM_DISCARD(exit.data*) \ 619 619 MEM_DISCARD(exit.rodata*) 620 620
+1
include/linux/cgroup-defs.h
··· 412 412 * specific task are charged to the dom_cgrp. 413 413 */ 414 414 struct cgroup *dom_cgrp; 415 + struct cgroup *old_dom_cgrp; /* used while enabling threaded */ 415 416 416 417 /* per-cpu recursive resource statistics */ 417 418 struct cgroup_rstat_cpu __percpu *rstat_cpu;
+14 -6
include/linux/fpga/fpga-mgr.h
··· 53 53 FPGA_MGR_STATE_OPERATING, 54 54 }; 55 55 56 - /* 57 - * FPGA Manager flags 58 - * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported 59 - * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting 60 - * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first 61 - * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed 56 + /** 57 + * DOC: FPGA Manager flags 58 + * 59 + * Flags used in the &fpga_image_info->flags field 60 + * 61 + * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported 62 + * 63 + * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting 64 + * 65 + * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted 66 + * 67 + * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first 68 + * 69 + * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed 62 70 */ 63 71 #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) 64 72 #define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
+7
include/linux/gpio/driver.h
··· 95 95 unsigned int num_parents; 96 96 97 97 /** 98 + * @parent_irq: 99 + * 100 + * For use by gpiochip_set_cascaded_irqchip() 101 + */ 102 + unsigned int parent_irq; 103 + 104 + /** 98 105 * @parents: 99 106 * 100 107 * A list of interrupt parents of a GPIO chip. This is owned by the
-4
include/linux/mmzone.h
··· 668 668 wait_queue_head_t kcompactd_wait; 669 669 struct task_struct *kcompactd; 670 670 #endif 671 - #ifdef CONFIG_NUMA_BALANCING 672 - /* Lock serializing the migrate rate limiting window */ 673 - spinlock_t numabalancing_migrate_lock; 674 - #endif 675 671 /* 676 672 * This is a per-node reserve of pages that are not available 677 673 * to userspace allocations.
+7
include/linux/netdevice.h
··· 2496 2496 struct netlink_ext_ack *extack; 2497 2497 }; 2498 2498 2499 + struct netdev_notifier_info_ext { 2500 + struct netdev_notifier_info info; /* must be first */ 2501 + union { 2502 + u32 mtu; 2503 + } ext; 2504 + }; 2505 + 2499 2506 struct netdev_notifier_change_info { 2500 2507 struct netdev_notifier_info info; /* must be first */ 2501 2508 unsigned int flags_changed;
+1
include/linux/serial_sci.h
··· 36 36 SCIx_SH4_SCIF_FIFODATA_REGTYPE, 37 37 SCIx_SH7705_SCIF_REGTYPE, 38 38 SCIx_HSCIF_REGTYPE, 39 + SCIx_RZ_SCIFA_REGTYPE, 39 40 40 41 SCIx_NR_REGTYPES, 41 42 };
+2
include/linux/suspend.h
··· 251 251 return unlikely(s2idle_state == S2IDLE_STATE_ENTER); 252 252 } 253 253 254 + extern bool pm_suspend_via_s2idle(void); 254 255 extern void __init pm_states_init(void); 255 256 extern void s2idle_set_ops(const struct platform_s2idle_ops *ops); 256 257 extern void s2idle_wake(void); ··· 283 282 static inline void pm_set_resume_via_firmware(void) {} 284 283 static inline bool pm_suspend_via_firmware(void) { return false; } 285 284 static inline bool pm_resume_via_firmware(void) { return false; } 285 + static inline bool pm_suspend_via_s2idle(void) { return false; } 286 286 287 287 static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} 288 288 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
+10 -2
include/net/devlink.h
··· 298 298 299 299 #define DEVLINK_RESOURCE_ID_PARENT_TOP 0 300 300 301 - #define DEVLINK_PARAM_MAX_STRING_VALUE 32 301 + #define __DEVLINK_PARAM_MAX_STRING_VALUE 32 302 302 enum devlink_param_type { 303 303 DEVLINK_PARAM_TYPE_U8, 304 304 DEVLINK_PARAM_TYPE_U16, ··· 311 311 u8 vu8; 312 312 u16 vu16; 313 313 u32 vu32; 314 - const char *vstr; 314 + char vstr[__DEVLINK_PARAM_MAX_STRING_VALUE]; 315 315 bool vbool; 316 316 }; 317 317 ··· 568 568 int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, 569 569 union devlink_param_value init_val); 570 570 void devlink_param_value_changed(struct devlink *devlink, u32 param_id); 571 + void devlink_param_value_str_fill(union devlink_param_value *dst_val, 572 + const char *src); 571 573 struct devlink_region *devlink_region_create(struct devlink *devlink, 572 574 const char *region_name, 573 575 u32 region_max_snapshots, ··· 803 801 804 802 static inline void 805 803 devlink_param_value_changed(struct devlink *devlink, u32 param_id) 804 + { 805 + } 806 + 807 + static inline void 808 + devlink_param_value_str_fill(union devlink_param_value *dst_val, 809 + const char *src) 806 810 { 807 811 } 808 812
+1
include/net/ip_fib.h
··· 395 395 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); 396 396 int fib_sync_down_addr(struct net_device *dev, __be32 local); 397 397 int fib_sync_up(struct net_device *dev, unsigned int nh_flags); 398 + void fib_sync_mtu(struct net_device *dev, u32 orig_mtu); 398 399 399 400 #ifdef CONFIG_IP_ROUTE_MULTIPATH 400 401 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
+8
include/soc/fsl/bman.h
··· 126 126 */ 127 127 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num); 128 128 129 + /** 130 + * bman_is_probed - Check if bman is probed 131 + * 132 + * Returns 1 if the bman driver successfully probed, -1 if the bman driver 133 + * failed to probe or 0 if the bman driver did not probed yet. 134 + */ 135 + int bman_is_probed(void); 136 + 129 137 #endif /* __FSL_BMAN_H */
+8
include/soc/fsl/qman.h
··· 1186 1186 */ 1187 1187 int qman_release_cgrid(u32 id); 1188 1188 1189 + /** 1190 + * qman_is_probed - Check if qman is probed 1191 + * 1192 + * Returns 1 if the qman driver successfully probed, -1 if the qman driver 1193 + * failed to probe or 0 if the qman driver did not probed yet. 1194 + */ 1195 + int qman_is_probed(void); 1196 + 1189 1197 #endif /* __FSL_QMAN_H */
+1
include/trace/events/rxrpc.h
··· 931 931 TP_fast_assign( 932 932 __entry->call = call_id; 933 933 memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr)); 934 + __entry->where = where; 934 935 ), 935 936 936 937 TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
+14 -11
include/uapi/linux/smc_diag.h
··· 18 18 * on the internal clcsock, and more SMC-related socket data 19 19 */ 20 20 struct smc_diag_msg { 21 - __u8 diag_family; 22 - __u8 diag_state; 23 - __u8 diag_mode; 24 - __u8 diag_shutdown; 21 + __u8 diag_family; 22 + __u8 diag_state; 23 + union { 24 + __u8 diag_mode; 25 + __u8 diag_fallback; /* the old name of the field */ 26 + }; 27 + __u8 diag_shutdown; 25 28 struct inet_diag_sockid id; 26 29 27 - __u32 diag_uid; 28 - __u64 diag_inode; 30 + __u32 diag_uid; 31 + __aligned_u64 diag_inode; 29 32 }; 30 33 31 34 /* Mode of a connection */ ··· 102 99 }; 103 100 104 101 struct smcd_diag_dmbinfo { /* SMC-D Socket internals */ 105 - __u32 linkid; /* Link identifier */ 106 - __u64 peer_gid; /* Peer GID */ 107 - __u64 my_gid; /* My GID */ 108 - __u64 token; /* Token of DMB */ 109 - __u64 peer_token; /* Token of remote DMBE */ 102 + __u32 linkid; /* Link identifier */ 103 + __aligned_u64 peer_gid; /* Peer GID */ 104 + __aligned_u64 my_gid; /* My GID */ 105 + __aligned_u64 token; /* Token of DMB */ 106 + __aligned_u64 peer_token; /* Token of remote DMBE */ 110 107 }; 111 108 112 109 #endif /* _UAPI_SMC_DIAG_H_ */
+1
include/uapi/linux/udp.h
··· 40 40 #define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */ 41 41 #define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */ 42 42 #define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */ 43 + #define UDP_ENCAP_RXRPC 6 43 44 44 45 #endif /* _UAPI_LINUX_UDP_H */
+16 -9
kernel/cgroup/cgroup.c
··· 2836 2836 } 2837 2837 2838 2838 /** 2839 - * cgroup_save_control - save control masks of a subtree 2839 + * cgroup_save_control - save control masks and dom_cgrp of a subtree 2840 2840 * @cgrp: root of the target subtree 2841 2841 * 2842 - * Save ->subtree_control and ->subtree_ss_mask to the respective old_ 2843 - * prefixed fields for @cgrp's subtree including @cgrp itself. 2842 + * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the 2843 + * respective old_ prefixed fields for @cgrp's subtree including @cgrp 2844 + * itself. 2844 2845 */ 2845 2846 static void cgroup_save_control(struct cgroup *cgrp) 2846 2847 { ··· 2851 2850 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 2852 2851 dsct->old_subtree_control = dsct->subtree_control; 2853 2852 dsct->old_subtree_ss_mask = dsct->subtree_ss_mask; 2853 + dsct->old_dom_cgrp = dsct->dom_cgrp; 2854 2854 } 2855 2855 } 2856 2856 ··· 2877 2875 } 2878 2876 2879 2877 /** 2880 - * cgroup_restore_control - restore control masks of a subtree 2878 + * cgroup_restore_control - restore control masks and dom_cgrp of a subtree 2881 2879 * @cgrp: root of the target subtree 2882 2880 * 2883 - * Restore ->subtree_control and ->subtree_ss_mask from the respective old_ 2884 - * prefixed fields for @cgrp's subtree including @cgrp itself. 2881 + * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the 2882 + * respective old_ prefixed fields for @cgrp's subtree including @cgrp 2883 + * itself. 2885 2884 */ 2886 2885 static void cgroup_restore_control(struct cgroup *cgrp) 2887 2886 { ··· 2892 2889 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { 2893 2890 dsct->subtree_control = dsct->old_subtree_control; 2894 2891 dsct->subtree_ss_mask = dsct->old_subtree_ss_mask; 2892 + dsct->dom_cgrp = dsct->old_dom_cgrp; 2895 2893 } 2896 2894 } 2897 2895 ··· 3200 3196 { 3201 3197 struct cgroup *parent = cgroup_parent(cgrp); 3202 3198 struct cgroup *dom_cgrp = parent->dom_cgrp; 3199 + struct cgroup *dsct; 3200 + struct cgroup_subsys_state *d_css; 3203 3201 int ret; 3204 3202 3205 3203 lockdep_assert_held(&cgroup_mutex); ··· 3231 3225 */ 3232 3226 cgroup_save_control(cgrp); 3233 3227 3234 - cgrp->dom_cgrp = dom_cgrp; 3228 + cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) 3229 + if (dsct == cgrp || cgroup_is_threaded(dsct)) 3230 + dsct->dom_cgrp = dom_cgrp; 3231 + 3235 3232 ret = cgroup_apply_control(cgrp); 3236 3233 if (!ret) 3237 3234 parent->nr_threaded_children++; 3238 - else 3239 - cgrp->dom_cgrp = cgrp; 3240 3235 3241 3236 cgroup_finalize_control(cgrp, ret); 3242 3237 return ret;
+6
kernel/power/suspend.c
··· 63 63 enum s2idle_states __read_mostly s2idle_state; 64 64 static DEFINE_RAW_SPINLOCK(s2idle_lock); 65 65 66 + bool pm_suspend_via_s2idle(void) 67 + { 68 + return mem_sleep_current == PM_SUSPEND_TO_IDLE; 69 + } 70 + EXPORT_SYMBOL_GPL(pm_suspend_via_s2idle); 71 + 66 72 void s2idle_set_ops(const struct platform_s2idle_ops *ops) 67 73 { 68 74 lock_system_sleep();
-1
lib/Makefile
··· 119 119 obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ 120 120 obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ 121 121 obj-$(CONFIG_BCH) += bch.o 122 - CFLAGS_bch.o := $(call cc-option,-Wframe-larger-than=4500) 123 122 obj-$(CONFIG_LZO_COMPRESS) += lzo/ 124 123 obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ 125 124 obj-$(CONFIG_LZ4_COMPRESS) += lz4/
+13 -4
lib/bch.c
··· 79 79 #define GF_T(_p) (CONFIG_BCH_CONST_T) 80 80 #define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1) 81 81 #define BCH_MAX_M (CONFIG_BCH_CONST_M) 82 + #define BCH_MAX_T (CONFIG_BCH_CONST_T) 82 83 #else 83 84 #define GF_M(_p) ((_p)->m) 84 85 #define GF_T(_p) ((_p)->t) 85 86 #define GF_N(_p) ((_p)->n) 86 - #define BCH_MAX_M 15 87 + #define BCH_MAX_M 15 /* 2KB */ 88 + #define BCH_MAX_T 64 /* 64 bit correction */ 87 89 #endif 88 - 89 - #define BCH_MAX_T (((1 << BCH_MAX_M) - 1) / BCH_MAX_M) 90 90 91 91 #define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32) 92 92 #define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8) 93 93 94 94 #define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32) 95 - #define BCH_ECC_MAX_BYTES DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 8) 96 95 97 96 #ifndef dbg 98 97 #define dbg(_fmt, args...) do {} while (0) ··· 200 201 const uint32_t * const tab2 = tab1 + 256*(l+1); 201 202 const uint32_t * const tab3 = tab2 + 256*(l+1); 202 203 const uint32_t *pdata, *p0, *p1, *p2, *p3; 204 + 205 + if (WARN_ON(r_bytes > sizeof(r))) 206 + return; 203 207 204 208 if (ecc) { 205 209 /* load ecc parity bytes into internal 32-bit buffer */ ··· 1284 1282 * values of m greater than 15 are not currently supported; 1285 1283 * supporting m > 15 would require changing table base type 1286 1284 * (uint16_t) and a small patch in matrix transposition 1285 + */ 1286 + goto fail; 1287 + 1288 + if (t > BCH_MAX_T) 1289 + /* 1290 + * we can support larger than 64 bits if necessary, at the 1291 + * cost of higher stack usage. 1287 1292 */ 1288 1293 goto fail; 1289 1294
+1 -1
lib/vsprintf.c
··· 2794 2794 copy = end - str; 2795 2795 memcpy(str, args, copy); 2796 2796 str += len; 2797 - args += len; 2797 + args += len + 1; 2798 2798 } 2799 2799 } 2800 2800 if (process)
-1
lib/xz/xz_crc32.c
··· 15 15 * but they are bigger and use more memory for the lookup table. 16 16 */ 17 17 18 - #include <linux/crc32poly.h> 19 18 #include "xz_private.h" 20 19 21 20 /*
+4
lib/xz/xz_private.h
··· 102 102 # endif 103 103 #endif 104 104 105 + #ifndef CRC32_POLY_LE 106 + #define CRC32_POLY_LE 0xedb88320 107 + #endif 108 + 105 109 /* 106 110 * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used 107 111 * before calling xz_dec_lzma2_run().
-10
mm/page_alloc.c
··· 6193 6193 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 6194 6194 } 6195 6195 6196 - #ifdef CONFIG_NUMA_BALANCING 6197 - static void pgdat_init_numabalancing(struct pglist_data *pgdat) 6198 - { 6199 - spin_lock_init(&pgdat->numabalancing_migrate_lock); 6200 - } 6201 - #else 6202 - static void pgdat_init_numabalancing(struct pglist_data *pgdat) {} 6203 - #endif 6204 - 6205 6196 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 6206 6197 static void pgdat_init_split_queue(struct pglist_data *pgdat) 6207 6198 { ··· 6217 6226 { 6218 6227 pgdat_resize_init(pgdat); 6219 6228 6220 - pgdat_init_numabalancing(pgdat); 6221 6229 pgdat_init_split_queue(pgdat); 6222 6230 pgdat_init_kcompactd(pgdat); 6223 6231
+1
mm/percpu.c
··· 1212 1212 { 1213 1213 if (!chunk) 1214 1214 return; 1215 + pcpu_mem_free(chunk->md_blocks); 1215 1216 pcpu_mem_free(chunk->bound_map); 1216 1217 pcpu_mem_free(chunk->alloc_map); 1217 1218 pcpu_mem_free(chunk);
+26 -2
net/core/dev.c
··· 1752 1752 } 1753 1753 EXPORT_SYMBOL(call_netdevice_notifiers); 1754 1754 1755 + /** 1756 + * call_netdevice_notifiers_mtu - call all network notifier blocks 1757 + * @val: value passed unmodified to notifier function 1758 + * @dev: net_device pointer passed unmodified to notifier function 1759 + * @arg: additional u32 argument passed to the notifier function 1760 + * 1761 + * Call all network notifier blocks. Parameters and return value 1762 + * are as for raw_notifier_call_chain(). 1763 + */ 1764 + static int call_netdevice_notifiers_mtu(unsigned long val, 1765 + struct net_device *dev, u32 arg) 1766 + { 1767 + struct netdev_notifier_info_ext info = { 1768 + .info.dev = dev, 1769 + .ext.mtu = arg, 1770 + }; 1771 + 1772 + BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 1773 + 1774 + return call_netdevice_notifiers_info(val, &info.info); 1775 + } 1776 + 1755 1777 #ifdef CONFIG_NET_INGRESS 1756 1778 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 1757 1779 ··· 7597 7575 err = __dev_set_mtu(dev, new_mtu); 7598 7576 7599 7577 if (!err) { 7600 - err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 7578 + err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 7579 + orig_mtu); 7601 7580 err = notifier_to_errno(err); 7602 7581 if (err) { 7603 7582 /* setting mtu back and notifying everyone again, 7604 7583 * so that they have a chance to revert changes. 7605 7584 */ 7606 7585 __dev_set_mtu(dev, orig_mtu); 7607 - call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 7586 + call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 7587 + new_mtu); 7608 7588 } 7609 7589 } 7610 7590 return err;
+37 -6
net/core/devlink.c
··· 3012 3012 struct genl_info *info, 3013 3013 union devlink_param_value *value) 3014 3014 { 3015 + int len; 3016 + 3015 3017 if (param->type != DEVLINK_PARAM_TYPE_BOOL && 3016 3018 !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) 3017 3019 return -EINVAL; ··· 3029 3027 value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); 3030 3028 break; 3031 3029 case DEVLINK_PARAM_TYPE_STRING: 3032 - if (nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) > 3033 - DEVLINK_PARAM_MAX_STRING_VALUE) 3030 + len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]), 3031 + nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])); 3032 + if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) || 3033 + len >= __DEVLINK_PARAM_MAX_STRING_VALUE) 3034 3034 return -EINVAL; 3035 - value->vstr = nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); 3035 + strcpy(value->vstr, 3036 + nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])); 3036 3037 break; 3037 3038 case DEVLINK_PARAM_TYPE_BOOL: 3038 3039 value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ? ··· 3122 3117 return -EOPNOTSUPP; 3123 3118 3124 3119 if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) { 3125 - param_item->driverinit_value = value; 3120 + if (param->type == DEVLINK_PARAM_TYPE_STRING) 3121 + strcpy(param_item->driverinit_value.vstr, value.vstr); 3122 + else 3123 + param_item->driverinit_value = value; 3126 3124 param_item->driverinit_value_valid = true; 3127 3125 } else { 3128 3126 if (!param->set) ··· 4565 4557 DEVLINK_PARAM_CMODE_DRIVERINIT)) 4566 4558 return -EOPNOTSUPP; 4567 4559 4568 - *init_val = param_item->driverinit_value; 4560 + if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING) 4561 + strcpy(init_val->vstr, param_item->driverinit_value.vstr); 4562 + else 4563 + *init_val = param_item->driverinit_value; 4569 4564 4570 4565 return 0; 4571 4566 } ··· 4599 4588 DEVLINK_PARAM_CMODE_DRIVERINIT)) 4600 4589 return -EOPNOTSUPP; 4601 4590 4602 - param_item->driverinit_value = init_val; 4591 + if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING) 4592 + strcpy(param_item->driverinit_value.vstr, init_val.vstr); 4593 + else 4594 + param_item->driverinit_value = init_val; 4603 4595 param_item->driverinit_value_valid = true; 4604 4596 4605 4597 devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); ··· 4633 4619 devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); 4634 4620 } 4635 4621 EXPORT_SYMBOL_GPL(devlink_param_value_changed); 4622 + 4623 + /** 4624 + * devlink_param_value_str_fill - Safely fill-up the string preventing 4625 + * from overflow of the preallocated buffer 4626 + * 4627 + * @dst_val: destination devlink_param_value 4628 + * @src: source buffer 4629 + */ 4630 + void devlink_param_value_str_fill(union devlink_param_value *dst_val, 4631 + const char *src) 4632 + { 4633 + size_t len; 4634 + 4635 + len = strlcpy(dst_val->vstr, src, __DEVLINK_PARAM_MAX_STRING_VALUE); 4636 + WARN_ON(len >= __DEVLINK_PARAM_MAX_STRING_VALUE); 4637 + } 4638 + EXPORT_SYMBOL_GPL(devlink_param_value_str_fill); 4636 4639 4637 4640 /** 4638 4641 * devlink_region_create - create a new address region
+7 -5
net/core/skbuff.c
··· 4394 4394 */ 4395 4395 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 4396 4396 { 4397 - if (unlikely(start > skb_headlen(skb)) || 4398 - unlikely((int)start + off > skb_headlen(skb) - 2)) { 4399 - net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 4400 - start, off, skb_headlen(skb)); 4397 + u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 4398 + u32 csum_start = skb_headroom(skb) + (u32)start; 4399 + 4400 + if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { 4401 + net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 4402 + start, off, skb_headroom(skb), skb_headlen(skb)); 4401 4403 return false; 4402 4404 } 4403 4405 skb->ip_summed = CHECKSUM_PARTIAL; 4404 - skb->csum_start = skb_headroom(skb) + start; 4406 + skb->csum_start = csum_start; 4405 4407 skb->csum_offset = off; 4406 4408 skb_set_transport_header(skb, start); 4407 4409 return true;
+8 -4
net/ipv4/fib_frontend.c
··· 1291 1291 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1292 1292 { 1293 1293 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1294 - struct netdev_notifier_changeupper_info *info; 1294 + struct netdev_notifier_changeupper_info *upper_info = ptr; 1295 + struct netdev_notifier_info_ext *info_ext = ptr; 1295 1296 struct in_device *in_dev; 1296 1297 struct net *net = dev_net(dev); 1297 1298 unsigned int flags; ··· 1327 1326 fib_sync_up(dev, RTNH_F_LINKDOWN); 1328 1327 else 1329 1328 fib_sync_down_dev(dev, event, false); 1330 - /* fall through */ 1329 + rt_cache_flush(net); 1330 + break; 1331 1331 case NETDEV_CHANGEMTU: 1332 + fib_sync_mtu(dev, info_ext->ext.mtu); 1332 1333 rt_cache_flush(net); 1333 1334 break; 1334 1335 case NETDEV_CHANGEUPPER: 1335 - info = ptr; 1336 + upper_info = ptr; 1336 1337 /* flush all routes if dev is linked to or unlinked from 1337 1338 * an L3 master device (e.g., VRF) 1338 1339 */ 1339 - if (info->upper_dev && netif_is_l3_master(info->upper_dev)) 1340 + if (upper_info->upper_dev && 1341 + netif_is_l3_master(upper_info->upper_dev)) 1340 1342 fib_disable_ip(dev, NETDEV_DOWN, true); 1341 1343 break; 1342 1344 }
+50
net/ipv4/fib_semantics.c
··· 1457 1457 return NOTIFY_DONE; 1458 1458 } 1459 1459 1460 + /* Update the PMTU of exceptions when: 1461 + * - the new MTU of the first hop becomes smaller than the PMTU 1462 + * - the old MTU was the same as the PMTU, and it limited discovery of 1463 + * larger MTUs on the path. With that limit raised, we can now 1464 + * discover larger MTUs 1465 + * A special case is locked exceptions, for which the PMTU is smaller 1466 + * than the minimal accepted PMTU: 1467 + * - if the new MTU is greater than the PMTU, don't make any change 1468 + * - otherwise, unlock and set PMTU 1469 + */ 1470 + static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig) 1471 + { 1472 + struct fnhe_hash_bucket *bucket; 1473 + int i; 1474 + 1475 + bucket = rcu_dereference_protected(nh->nh_exceptions, 1); 1476 + if (!bucket) 1477 + return; 1478 + 1479 + for (i = 0; i < FNHE_HASH_SIZE; i++) { 1480 + struct fib_nh_exception *fnhe; 1481 + 1482 + for (fnhe = rcu_dereference_protected(bucket[i].chain, 1); 1483 + fnhe; 1484 + fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) { 1485 + if (fnhe->fnhe_mtu_locked) { 1486 + if (new <= fnhe->fnhe_pmtu) { 1487 + fnhe->fnhe_pmtu = new; 1488 + fnhe->fnhe_mtu_locked = false; 1489 + } 1490 + } else if (new < fnhe->fnhe_pmtu || 1491 + orig == fnhe->fnhe_pmtu) { 1492 + fnhe->fnhe_pmtu = new; 1493 + } 1494 + } 1495 + } 1496 + } 1497 + 1498 + void fib_sync_mtu(struct net_device *dev, u32 orig_mtu) 1499 + { 1500 + unsigned int hash = fib_devindex_hashfn(dev->ifindex); 1501 + struct hlist_head *head = &fib_info_devhash[hash]; 1502 + struct fib_nh *nh; 1503 + 1504 + hlist_for_each_entry(nh, head, nh_hash) { 1505 + if (nh->nh_dev == dev) 1506 + nh_update_mtu(nh, dev->mtu, orig_mtu); 1507 + } 1508 + } 1509 + 1460 1510 /* Event force Flags Description 1461 1511 * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host 1462 1512 * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
+4 -3
net/ipv4/route.c
··· 1001 1001 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) 1002 1002 { 1003 1003 struct dst_entry *dst = &rt->dst; 1004 + u32 old_mtu = ipv4_mtu(dst); 1004 1005 struct fib_result res; 1005 1006 bool lock = false; 1006 1007 1007 1008 if (ip_mtu_locked(dst)) 1008 1009 return; 1009 1010 1010 - if (ipv4_mtu(dst) < mtu) 1011 + if (old_mtu < mtu) 1011 1012 return; 1012 1013 1013 1014 if (mtu < ip_rt_min_pmtu) { 1014 1015 lock = true; 1015 - mtu = ip_rt_min_pmtu; 1016 + mtu = min(old_mtu, ip_rt_min_pmtu); 1016 1017 } 1017 1018 1018 - if (rt->rt_pmtu == mtu && 1019 + if (rt->rt_pmtu == mtu && !lock && 1019 1020 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) 1020 1021 return; 1021 1022
+1 -1
net/ipv4/udp.c
··· 1627 1627 *err = error; 1628 1628 return NULL; 1629 1629 } 1630 - EXPORT_SYMBOL_GPL(__skb_recv_udp); 1630 + EXPORT_SYMBOL(__skb_recv_udp); 1631 1631 1632 1632 /* 1633 1633 * This should be easy, if there is something there we
+2
net/ipv6/ip6_fib.c
··· 195 195 *ppcpu_rt = NULL; 196 196 } 197 197 } 198 + 199 + free_percpu(f6i->rt6i_pcpu); 198 200 } 199 201 200 202 lwtstate_put(f6i->fib6_nh.nh_lwtstate);
+10 -3
net/rds/send.c
··· 1007 1007 return ret; 1008 1008 } 1009 1009 1010 - static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn) 1010 + static int rds_send_mprds_hash(struct rds_sock *rs, 1011 + struct rds_connection *conn, int nonblock) 1011 1012 { 1012 1013 int hash; 1013 1014 ··· 1024 1023 * used. But if we are interrupted, we have to use the zero 1025 1024 * c_path in case the connection ends up being non-MP capable. 1026 1025 */ 1027 - if (conn->c_npaths == 0) 1026 + if (conn->c_npaths == 0) { 1027 + /* Cannot wait for the connection be made, so just use 1028 + * the base c_path. 1029 + */ 1030 + if (nonblock) 1031 + return 0; 1028 1032 if (wait_event_interruptible(conn->c_hs_waitq, 1029 1033 conn->c_npaths != 0)) 1030 1034 hash = 0; 1035 + } 1031 1036 if (conn->c_npaths == 1) 1032 1037 hash = 0; 1033 1038 } ··· 1263 1256 } 1264 1257 1265 1258 if (conn->c_trans->t_mp_capable) 1266 - cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)]; 1259 + cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)]; 1267 1260 else 1268 1261 cpath = &conn->c_path[0]; 1269 1262
+13 -10
net/rxrpc/ar-internal.h
··· 302 302 303 303 /* calculated RTT cache */ 304 304 #define RXRPC_RTT_CACHE_SIZE 32 305 + spinlock_t rtt_input_lock; /* RTT lock for input routine */ 305 306 ktime_t rtt_last_req; /* Time of last RTT request */ 306 307 u64 rtt; /* Current RTT estimate (in nS) */ 307 308 u64 rtt_sum; /* Sum of cache contents */ ··· 443 442 spinlock_t state_lock; /* state-change lock */ 444 443 enum rxrpc_conn_cache_state cache_state; 445 444 enum rxrpc_conn_proto_state state; /* current state of connection */ 446 - u32 local_abort; /* local abort code */ 447 - u32 remote_abort; /* remote abort code */ 445 + u32 abort_code; /* Abort code of connection abort */ 448 446 int debug_id; /* debug ID for printks */ 449 447 atomic_t serial; /* packet serial number counter */ 450 448 unsigned int hi_serial; /* highest serial number received */ 451 449 u32 security_nonce; /* response re-use preventer */ 452 - u16 service_id; /* Service ID, possibly upgraded */ 450 + u32 service_id; /* Service ID, possibly upgraded */ 453 451 u8 size_align; /* data size alignment (for security) */ 454 452 u8 security_size; /* security header size */ 455 453 u8 security_ix; /* security type */ 456 454 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ 455 + short error; /* Local error code */ 457 456 }; 458 457 459 458 static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp) ··· 636 635 bool tx_phase; /* T if transmission phase, F if receive phase */ 637 636 u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */ 638 637 638 + spinlock_t input_lock; /* Lock for packet input to this call */ 639 + 639 640 /* receive-phase ACK management */ 640 641 u8 ackr_reason; /* reason to ACK */ 641 642 u16 ackr_skew; /* skew on packet being ACK'd */ ··· 723 720 void rxrpc_discard_prealloc(struct rxrpc_sock *); 724 721 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *, 725 722 struct rxrpc_sock *, 726 - struct rxrpc_peer *, 727 - struct rxrpc_connection *, 728 723 struct sk_buff *); 729 724 void rxrpc_accept_incoming_calls(struct rxrpc_local *); 730 725 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long, ··· 892 891 extern struct idr rxrpc_client_conn_ids; 893 892 894 893 void rxrpc_destroy_client_conn_ids(void); 895 - int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *, 896 - struct sockaddr_rxrpc *, gfp_t); 894 + int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *, 895 + struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *, 896 + gfp_t); 897 897 void rxrpc_expose_client_call(struct rxrpc_call *); 898 898 void rxrpc_disconnect_client_call(struct rxrpc_call *); 899 899 void rxrpc_put_client_conn(struct rxrpc_connection *); ··· 967 965 /* 968 966 * input.c 969 967 */ 970 - void rxrpc_data_ready(struct sock *); 968 + int rxrpc_input_packet(struct sock *, struct sk_buff *); 971 969 972 970 /* 973 971 * insecure.c ··· 1047 1045 */ 1048 1046 struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *, 1049 1047 const struct sockaddr_rxrpc *); 1050 - struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *, 1048 + struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *, 1051 1049 struct sockaddr_rxrpc *, gfp_t); 1052 1050 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); 1053 - void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *); 1051 + void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *, 1052 + struct rxrpc_peer *); 1054 1053 void rxrpc_destroy_all_peers(struct rxrpc_net *); 1055 1054 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); 1056 1055 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
+18 -9
net/rxrpc/call_accept.c
··· 287 287 (peer_tail + 1) & 288 288 (RXRPC_BACKLOG_MAX - 1)); 289 289 290 - rxrpc_new_incoming_peer(local, peer); 290 + rxrpc_new_incoming_peer(rx, local, peer); 291 291 } 292 292 293 293 /* Now allocate and set up the connection */ ··· 333 333 */ 334 334 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, 335 335 struct rxrpc_sock *rx, 336 - struct rxrpc_peer *peer, 337 - struct rxrpc_connection *conn, 338 336 struct sk_buff *skb) 339 337 { 340 338 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 339 + struct rxrpc_connection *conn; 340 + struct rxrpc_peer *peer; 341 341 struct rxrpc_call *call; 342 342 343 343 _enter(""); ··· 353 353 call = NULL; 354 354 goto out; 355 355 } 356 + 357 + /* The peer, connection and call may all have sprung into existence due 358 + * to a duplicate packet being handled on another CPU in parallel, so 359 + * we have to recheck the routing. However, we're now holding 360 + * rx->incoming_lock, so the values should remain stable. 361 + */ 362 + conn = rxrpc_find_connection_rcu(local, skb, &peer); 356 363 357 364 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); 358 365 if (!call) { ··· 403 396 404 397 case RXRPC_CONN_SERVICE: 405 398 write_lock(&call->state_lock); 406 - if (rx->discard_new_call) 407 - call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 408 - else 409 - call->state = RXRPC_CALL_SERVER_ACCEPTING; 399 + if (call->state < RXRPC_CALL_COMPLETE) { 400 + if (rx->discard_new_call) 401 + call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 402 + else 403 + call->state = RXRPC_CALL_SERVER_ACCEPTING; 404 + } 410 405 write_unlock(&call->state_lock); 411 406 break; 412 407 413 408 case RXRPC_CONN_REMOTELY_ABORTED: 414 409 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, 415 - conn->remote_abort, -ECONNABORTED); 410 + conn->abort_code, conn->error); 416 411 break; 417 412 case RXRPC_CONN_LOCALLY_ABORTED: 418 413 rxrpc_abort_call("CON", call, sp->hdr.seq, 419 - conn->local_abort, -ECONNABORTED); 414 + conn->abort_code, conn->error); 420 415 break; 421 416 default: 422 417 BUG();
+3 -2
net/rxrpc/call_object.c
··· 138 138 init_waitqueue_head(&call->waitq); 139 139 spin_lock_init(&call->lock); 140 140 spin_lock_init(&call->notify_lock); 141 + spin_lock_init(&call->input_lock); 141 142 rwlock_init(&call->state_lock); 142 143 atomic_set(&call->usage, 1); 143 144 call->debug_id = debug_id; ··· 288 287 /* Set up or get a connection record and set the protocol parameters, 289 288 * including channel number and call ID. 290 289 */ 291 - ret = rxrpc_connect_call(call, cp, srx, gfp); 290 + ret = rxrpc_connect_call(rx, call, cp, srx, gfp); 292 291 if (ret < 0) 293 292 goto error; 294 293 ··· 340 339 /* Set up or get a connection record and set the protocol parameters, 341 340 * including channel number and call ID. 342 341 */ 343 - ret = rxrpc_connect_call(call, cp, srx, gfp); 342 + ret = rxrpc_connect_call(rx, call, cp, srx, gfp); 344 343 if (ret < 0) 345 344 goto error; 346 345
+6 -4
net/rxrpc/conn_client.c
··· 276 276 * If we return with a connection, the call will be on its waiting list. It's 277 277 * left to the caller to assign a channel and wake up the call. 278 278 */ 279 - static int rxrpc_get_client_conn(struct rxrpc_call *call, 279 + static int rxrpc_get_client_conn(struct rxrpc_sock *rx, 280 + struct rxrpc_call *call, 280 281 struct rxrpc_conn_parameters *cp, 281 282 struct sockaddr_rxrpc *srx, 282 283 gfp_t gfp) ··· 290 289 291 290 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 292 291 293 - cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp); 292 + cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp); 294 293 if (!cp->peer) 295 294 goto error; 296 295 ··· 684 683 * find a connection for a call 685 684 * - called in process context with IRQs enabled 686 685 */ 687 - int rxrpc_connect_call(struct rxrpc_call *call, 686 + int rxrpc_connect_call(struct rxrpc_sock *rx, 687 + struct rxrpc_call *call, 688 688 struct rxrpc_conn_parameters *cp, 689 689 struct sockaddr_rxrpc *srx, 690 690 gfp_t gfp) ··· 698 696 rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); 699 697 rxrpc_cull_active_client_conns(rxnet); 700 698 701 - ret = rxrpc_get_client_conn(call, cp, srx, gfp); 699 + ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp); 702 700 if (ret < 0) 703 701 goto out; 704 702
+15 -11
net/rxrpc/conn_event.c
··· 126 126 127 127 switch (chan->last_type) { 128 128 case RXRPC_PACKET_TYPE_ABORT: 129 - _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort); 129 + _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code); 130 130 break; 131 131 case RXRPC_PACKET_TYPE_ACK: 132 132 trace_rxrpc_tx_ack(chan->call_debug_id, serial, ··· 153 153 * pass a connection-level abort onto all calls on that connection 154 154 */ 155 155 static void rxrpc_abort_calls(struct rxrpc_connection *conn, 156 - enum rxrpc_call_completion compl, 157 - u32 abort_code, int error) 156 + enum rxrpc_call_completion compl) 158 157 { 159 158 struct rxrpc_call *call; 160 159 int i; 161 160 162 - _enter("{%d},%x", conn->debug_id, abort_code); 161 + _enter("{%d},%x", conn->debug_id, conn->abort_code); 163 162 164 163 spin_lock(&conn->channel_lock); 165 164 ··· 171 172 trace_rxrpc_abort(call->debug_id, 172 173 "CON", call->cid, 173 174 call->call_id, 0, 174 - abort_code, error); 175 + conn->abort_code, 176 + conn->error); 175 177 if (rxrpc_set_call_completion(call, compl, 176 - abort_code, error)) 178 + conn->abort_code, 179 + conn->error)) 177 180 rxrpc_notify_socket(call); 178 181 } 179 182 } ··· 208 207 return 0; 209 208 } 210 209 210 + conn->error = error; 211 + conn->abort_code = abort_code; 211 212 conn->state = RXRPC_CONN_LOCALLY_ABORTED; 212 213 spin_unlock_bh(&conn->state_lock); 213 214 214 - rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error); 215 + rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED); 215 216 216 217 msg.msg_name = &conn->params.peer->srx.transport; 217 218 msg.msg_namelen = conn->params.peer->srx.transport_len; ··· 232 229 whdr._rsvd = 0; 233 230 whdr.serviceId = htons(conn->service_id); 234 231 235 - word = htonl(conn->local_abort); 232 + word = htonl(conn->abort_code); 236 233 237 234 iov[0].iov_base = &whdr; 238 235 iov[0].iov_len = sizeof(whdr); ··· 243 240 244 241 serial = atomic_inc_return(&conn->serial); 245 242 whdr.serial = htonl(serial); 246 - _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort); 243 + _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code); 247 244 248 245 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); 249 246 if (ret < 0) { ··· 318 315 abort_code = ntohl(wtmp); 319 316 _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code); 320 317 318 + conn->error = -ECONNABORTED; 319 + conn->abort_code = abort_code; 321 320 conn->state = RXRPC_CONN_REMOTELY_ABORTED; 322 - rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, 323 - abort_code, -ECONNABORTED); 321 + rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED); 324 322 return -ECONNABORTED; 325 323 326 324 case RXRPC_PACKET_TYPE_CHALLENGE:
+136 -125
net/rxrpc/input.c
··· 216 216 /* 217 217 * Apply a hard ACK by advancing the Tx window. 218 218 */ 219 - static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, 219 + static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, 220 220 struct rxrpc_ack_summary *summary) 221 221 { 222 222 struct sk_buff *skb, *list = NULL; 223 + bool rot_last = false; 223 224 int ix; 224 225 u8 annotation; 225 226 ··· 244 243 skb->next = list; 245 244 list = skb; 246 245 247 - if (annotation & RXRPC_TX_ANNO_LAST) 246 + if (annotation & RXRPC_TX_ANNO_LAST) { 248 247 set_bit(RXRPC_CALL_TX_LAST, &call->flags); 248 + rot_last = true; 249 + } 249 250 if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK) 250 251 summary->nr_rot_new_acks++; 251 252 } 252 253 253 254 spin_unlock(&call->lock); 254 255 255 - trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ? 256 + trace_rxrpc_transmit(call, (rot_last ? 256 257 rxrpc_transmit_rotate_last : 257 258 rxrpc_transmit_rotate)); 258 259 wake_up(&call->waitq); ··· 265 262 skb_mark_not_on_list(skb); 266 263 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 267 264 } 265 + 266 + return rot_last; 268 267 } 269 268 270 269 /* ··· 278 273 static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, 279 274 const char *abort_why) 280 275 { 276 + unsigned int state; 281 277 282 278 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); 283 279 284 280 write_lock(&call->state_lock); 285 281 286 - switch (call->state) { 282 + state = call->state; 283 + switch (state) { 287 284 case RXRPC_CALL_CLIENT_SEND_REQUEST: 288 285 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 289 286 if (reply_begun) 290 - call->state = RXRPC_CALL_CLIENT_RECV_REPLY; 287 + call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY; 291 288 else 292 - call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 289 + call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 293 290 break; 294 291 295 292 case RXRPC_CALL_SERVER_AWAIT_ACK: 296 293 __rxrpc_call_completed(call); 297 294 rxrpc_notify_socket(call); 295 + state = call->state; 298 296 break; 299 297 300 298 default: ··· 305 297 } 306 298 307 299 write_unlock(&call->state_lock); 308 - if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) { 300 + if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY) 309 301 trace_rxrpc_transmit(call, rxrpc_transmit_await_reply); 310 - } else { 302 + else 311 303 trace_rxrpc_transmit(call, rxrpc_transmit_end); 312 - } 313 304 _leave(" = ok"); 314 305 return true; 315 306 ··· 339 332 trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); 340 333 } 341 334 342 - if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) 343 - rxrpc_rotate_tx_window(call, top, &summary); 344 335 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { 345 - rxrpc_proto_abort("TXL", call, top); 346 - return false; 336 + if (!rxrpc_rotate_tx_window(call, top, &summary)) { 337 + rxrpc_proto_abort("TXL", call, top); 338 + return false; 339 + } 347 340 } 348 341 if (!rxrpc_end_tx_phase(call, true, "ETD")) 349 342 return false; ··· 459 452 } 460 453 } 461 454 455 + spin_lock(&call->input_lock); 456 + 462 457 /* Received data implicitly ACKs all of the request packets we sent 463 458 * when we're acting as a client. 464 459 */ 465 460 if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || 466 461 state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && 467 462 !rxrpc_receiving_reply(call)) 468 - return; 463 + goto unlock; 469 464 470 465 call->ackr_prev_seq = seq; 471 466 ··· 497 488 498 489 if (flags & RXRPC_LAST_PACKET) { 499 490 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 500 - seq != call->rx_top) 501 - return rxrpc_proto_abort("LSN", call, seq); 491 + seq != call->rx_top) { 492 + rxrpc_proto_abort("LSN", call, seq); 493 + goto unlock; 494 + } 502 495 } else { 503 496 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 504 - after_eq(seq, call->rx_top)) 505 - return rxrpc_proto_abort("LSA", call, seq); 497 + after_eq(seq, call->rx_top)) { 498 + rxrpc_proto_abort("LSA", call, seq); 499 + goto unlock; 500 + } 506 501 } 507 502 508 503 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); ··· 573 560 skip: 574 561 offset += len; 575 562 if (flags & RXRPC_JUMBO_PACKET) { 576 - if (skb_copy_bits(skb, offset, &flags, 1) < 0) 577 - return rxrpc_proto_abort("XJF", call, seq); 563 + if (skb_copy_bits(skb, offset, &flags, 1) < 0) { 564 + rxrpc_proto_abort("XJF", call, seq); 565 + goto unlock; 566 + } 578 567 offset += sizeof(struct rxrpc_jumbo_header); 579 568 seq++; 580 569 serial++; ··· 616 601 trace_rxrpc_notify_socket(call->debug_id, serial); 617 602 rxrpc_notify_socket(call); 618 603 } 604 + 605 + unlock: 606 + spin_unlock(&call->input_lock); 619 607 _leave(" [queued]"); 620 608 } 621 609 ··· 705 687 706 688 ping_time = call->ping_time; 707 689 smp_rmb(); 708 - ping_serial = call->ping_serial; 690 + ping_serial = READ_ONCE(call->ping_serial); 709 691 710 692 if (orig_serial == call->acks_lost_ping) 711 693 rxrpc_input_check_for_lost_ack(call); 712 694 713 - if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || 714 - before(orig_serial, ping_serial)) 695 + if (before(orig_serial, ping_serial) || 696 + !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags)) 715 697 return; 716 - clear_bit(RXRPC_CALL_PINGING, &call->flags); 717 698 if (after(orig_serial, ping_serial)) 718 699 return; 719 700 ··· 878 861 rxrpc_propose_ack_respond_to_ack); 879 862 } 880 863 881 - ioffset = offset + nr_acks + 3; 882 - if (skb->len >= ioffset + sizeof(buf.info)) { 883 - if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0) 884 - return rxrpc_proto_abort("XAI", call, 0); 885 - rxrpc_input_ackinfo(call, skb, &buf.info); 886 - } 864 + /* Discard any out-of-order or duplicate ACKs. */ 865 + if (before_eq(sp->hdr.serial, call->acks_latest)) 866 + return; 887 867 888 - if (first_soft_ack == 0) 889 - return rxrpc_proto_abort("AK0", call, 0); 868 + buf.info.rxMTU = 0; 869 + ioffset = offset + nr_acks + 3; 870 + if (skb->len >= ioffset + sizeof(buf.info) && 871 + skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0) 872 + return rxrpc_proto_abort("XAI", call, 0); 873 + 874 + spin_lock(&call->input_lock); 875 + 876 + /* Discard any out-of-order or duplicate ACKs. */ 877 + if (before_eq(sp->hdr.serial, call->acks_latest)) 878 + goto out; 879 + call->acks_latest_ts = skb->tstamp; 880 + call->acks_latest = sp->hdr.serial; 881 + 882 + /* Parse rwind and mtu sizes if provided. */ 883 + if (buf.info.rxMTU) 884 + rxrpc_input_ackinfo(call, skb, &buf.info); 885 + 886 + if (first_soft_ack == 0) { 887 + rxrpc_proto_abort("AK0", call, 0); 888 + goto out; 889 + } 890 890 891 891 /* Ignore ACKs unless we are or have just been transmitting. */ 892 892 switch (READ_ONCE(call->state)) { ··· 913 879 case RXRPC_CALL_SERVER_AWAIT_ACK: 914 880 break; 915 881 default: 916 - return; 882 + goto out; 917 883 } 918 - 919 - /* Discard any out-of-order or duplicate ACKs. */ 920 - if (before_eq(sp->hdr.serial, call->acks_latest)) { 921 - _debug("discard ACK %d <= %d", 922 - sp->hdr.serial, call->acks_latest); 923 - return; 924 - } 925 - call->acks_latest_ts = skb->tstamp; 926 - call->acks_latest = sp->hdr.serial; 927 884 928 885 if (before(hard_ack, call->tx_hard_ack) || 929 - after(hard_ack, call->tx_top)) 930 - return rxrpc_proto_abort("AKW", call, 0); 931 - if (nr_acks > call->tx_top - hard_ack) 932 - return rxrpc_proto_abort("AKN", call, 0); 933 - 934 - if (after(hard_ack, call->tx_hard_ack)) 935 - rxrpc_rotate_tx_window(call, hard_ack, &summary); 936 - 937 - if (nr_acks > 0) { 938 - if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) 939 - return rxrpc_proto_abort("XSA", call, 0); 940 - rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks, 941 - &summary); 886 + after(hard_ack, call->tx_top)) { 887 + rxrpc_proto_abort("AKW", call, 0); 888 + goto out; 889 + } 890 + if (nr_acks > call->tx_top - hard_ack) { 891 + rxrpc_proto_abort("AKN", call, 0); 892 + goto out; 942 893 } 943 894 944 - if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { 945 - rxrpc_end_tx_phase(call, false, "ETA"); 946 - return; 895 + if (after(hard_ack, call->tx_hard_ack)) { 896 + if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) { 897 + rxrpc_end_tx_phase(call, false, "ETA"); 898 + goto out; 899 + } 900 + } 901 + 902 + if (nr_acks > 0) { 903 + if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) { 904 + rxrpc_proto_abort("XSA", call, 0); 905 + goto out; 906 + } 907 + rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks, 908 + &summary); 947 909 } 948 910 949 911 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & ··· 950 920 false, true, 951 921 rxrpc_propose_ack_ping_for_lost_reply); 952 922 953 - return rxrpc_congestion_management(call, skb, &summary, acked_serial); 923 + rxrpc_congestion_management(call, skb, &summary, acked_serial); 924 + out: 925 + spin_unlock(&call->input_lock); 954 926 } 955 927 956 928 /* ··· 965 933 966 934 _proto("Rx ACKALL %%%u", sp->hdr.serial); 967 935 968 - rxrpc_rotate_tx_window(call, call->tx_top, &summary); 969 - if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) 936 + spin_lock(&call->input_lock); 937 + 938 + if (rxrpc_rotate_tx_window(call, call->tx_top, &summary)) 970 939 rxrpc_end_tx_phase(call, false, "ETL"); 940 + 941 + spin_unlock(&call->input_lock); 971 942 } 972 943 973 944 /* ··· 1053 1018 } 1054 1019 1055 1020 /* 1056 - * Handle a new call on a channel implicitly completing the preceding call on 1057 - * that channel. 1021 + * Handle a new service call on a channel implicitly completing the preceding 1022 + * call on that channel. This does not apply to client conns. 1058 1023 * 1059 1024 * TODO: If callNumber > call_id + 1, renegotiate security. 1060 1025 */ 1061 - static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, 1026 + static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx, 1027 + struct rxrpc_connection *conn, 1062 1028 struct rxrpc_call *call) 1063 1029 { 1064 1030 switch (READ_ONCE(call->state)) { 1065 1031 case RXRPC_CALL_SERVER_AWAIT_ACK: 1066 1032 rxrpc_call_completed(call); 1067 - break; 1033 + /* Fall through */ 1068 1034 case RXRPC_CALL_COMPLETE: 1069 1035 break; 1070 1036 default: ··· 1073 1037 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 1074 1038 rxrpc_queue_call(call); 1075 1039 } 1040 + trace_rxrpc_improper_term(call); 1076 1041 break; 1077 1042 } 1078 1043 1079 - trace_rxrpc_improper_term(call); 1044 + spin_lock(&rx->incoming_lock); 1080 1045 __rxrpc_disconnect_call(conn, call); 1046 + spin_unlock(&rx->incoming_lock); 1081 1047 rxrpc_notify_socket(call); 1082 1048 } 1083 1049 ··· 1158 1120 * The socket is locked by the caller and this prevents the socket from being 1159 1121 * shut down and the local endpoint from going away, thus sk_user_data will not 1160 1122 * be cleared until this function returns. 1123 + * 1124 + * Called with the RCU read lock held from the IP layer via UDP. 1161 1125 */ 1162 - void rxrpc_data_ready(struct sock *udp_sk) 1126 + int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) 1163 1127 { 1164 1128 struct rxrpc_connection *conn; 1165 1129 struct rxrpc_channel *chan; ··· 1170 1130 struct rxrpc_local *local = udp_sk->sk_user_data; 1171 1131 struct rxrpc_peer *peer = NULL; 1172 1132 struct rxrpc_sock *rx = NULL; 1173 - struct sk_buff *skb; 1174 1133 unsigned int channel; 1175 - int ret, skew = 0; 1134 + int skew = 0; 1176 1135 1177 1136 _enter("%p", udp_sk); 1178 - 1179 - ASSERT(!irqs_disabled()); 1180 - 1181 - skb = skb_recv_udp(udp_sk, 0, 1, &ret); 1182 - if (!skb) { 1183 - if (ret == -EAGAIN) 1184 - return; 1185 - _debug("UDP socket error %d", ret); 1186 - return; 1187 - } 1188 1137 1189 1138 if (skb->tstamp == 0) 1190 1139 skb->tstamp = ktime_get_real(); 1191 1140 1192 1141 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 1193 1142 1194 - _net("recv skb %p", skb); 1195 - 1196 - /* we'll probably need to checksum it (didn't call sock_recvmsg) */ 1197 - if (skb_checksum_complete(skb)) { 1198 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 1199 - __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0); 1200 - _leave(" [CSUM failed]"); 1201 - return; 1202 - } 1203 - 1204 - __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0); 1143 + skb_pull(skb, sizeof(struct udphdr)); 1205 1144 1206 1145 /* The UDP protocol already released all skb resources; 1207 1146 * we are free to add our own data there. ··· 1196 1177 if ((lose++ & 7) == 7) { 1197 1178 trace_rxrpc_rx_lose(sp); 1198 1179 rxrpc_free_skb(skb, rxrpc_skb_rx_lost); 1199 - return; 1180 + return 0; 1200 1181 } 1201 1182 } 1202 1183 1184 + if (skb->tstamp == 0) 1185 + skb->tstamp = ktime_get_real(); 1203 1186 trace_rxrpc_rx_packet(sp); 1204 1187 1205 1188 switch (sp->hdr.type) { ··· 1255 1234 if (sp->hdr.serviceId == 0) 1256 1235 goto bad_message; 1257 1236 1258 - rcu_read_lock(); 1259 - 1260 1237 if (rxrpc_to_server(sp)) { 1261 1238 /* Weed out packets to services we're not offering. Packets 1262 1239 * that would begin a call are explicitly rejected and the rest ··· 1266 1247 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && 1267 1248 sp->hdr.seq == 1) 1268 1249 goto unsupported_service; 1269 - goto discard_unlock; 1250 + goto discard; 1270 1251 } 1271 1252 } 1272 1253 ··· 1276 1257 goto wrong_security; 1277 1258 1278 1259 if (sp->hdr.serviceId != conn->service_id) { 1279 - if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) || 1280 - conn->service_id != conn->params.service_id) 1260 + int old_id; 1261 + 1262 + if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) 1281 1263 goto reupgrade; 1282 - conn->service_id = sp->hdr.serviceId; 1264 + old_id = cmpxchg(&conn->service_id, conn->params.service_id, 1265 + sp->hdr.serviceId); 1266 + 1267 + if (old_id != conn->params.service_id && 1268 + old_id != sp->hdr.serviceId) 1269 + goto reupgrade; 1283 1270 } 1284 1271 1285 1272 if (sp->hdr.callNumber == 0) { 1286 1273 /* Connection-level packet */ 1287 1274 _debug("CONN %p {%d}", conn, conn->debug_id); 1288 1275 rxrpc_post_packet_to_conn(conn, skb); 1289 - goto out_unlock; 1276 + goto out; 1290 1277 } 1291 1278 1292 1279 /* Note the serial number skew here */ ··· 1311 1286 1312 1287 /* Ignore really old calls */ 1313 1288 if (sp->hdr.callNumber < chan->last_call) 1314 - goto discard_unlock; 1289 + goto discard; 1315 1290 1316 1291 if (sp->hdr.callNumber == chan->last_call) { 1317 1292 if (chan->call || 1318 1293 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) 1319 - goto discard_unlock; 1294 + goto discard; 1320 1295 1321 1296 /* For the previous service call, if completed 1322 1297 * successfully, we discard all further packets. 1323 1298 */ 1324 1299 if (rxrpc_conn_is_service(conn) && 1325 1300 chan->last_type == RXRPC_PACKET_TYPE_ACK) 1326 - goto discard_unlock; 1301 + goto discard; 1327 1302 1328 1303 /* But otherwise we need to retransmit the final packet 1329 1304 * from data cached in the connection record. ··· 1334 1309 sp->hdr.serial, 1335 1310 sp->hdr.flags, 0); 1336 1311 rxrpc_post_packet_to_conn(conn, skb); 1337 - goto out_unlock; 1312 + goto out; 1338 1313 } 1339 1314 1340 1315 call = rcu_dereference(chan->call); 1341 1316 1342 1317 if (sp->hdr.callNumber > chan->call_id) { 1343 - if (rxrpc_to_client(sp)) { 1344 - rcu_read_unlock(); 1318 + if (rxrpc_to_client(sp)) 1345 1319 goto reject_packet; 1346 - } 1347 1320 if (call) 1348 - rxrpc_input_implicit_end_call(conn, call); 1321 + rxrpc_input_implicit_end_call(rx, conn, call); 1349 1322 call = NULL; 1350 1323 } 1351 1324 ··· 1360 1337 if (!call || atomic_read(&call->usage) == 0) { 1361 1338 if (rxrpc_to_client(sp) || 1362 1339 sp->hdr.type != RXRPC_PACKET_TYPE_DATA) 1363 - goto bad_message_unlock; 1340 + goto bad_message; 1364 1341 if (sp->hdr.seq != 1) 1365 - goto discard_unlock; 1366 - call = rxrpc_new_incoming_call(local, rx, peer, conn, skb); 1367 - if (!call) { 1368 - rcu_read_unlock(); 1342 + goto discard; 1343 + call = rxrpc_new_incoming_call(local, rx, skb); 1344 + if (!call) 1369 1345 goto reject_packet; 1370 - } 1371 1346 rxrpc_send_ping(call, skb, skew); 1372 1347 mutex_unlock(&call->user_mutex); 1373 1348 } 1374 1349 1375 1350 rxrpc_input_call_packet(call, skb, skew); 1376 - goto discard_unlock; 1351 + goto discard; 1377 1352 1378 - discard_unlock: 1379 - rcu_read_unlock(); 1380 1353 discard: 1381 1354 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 1382 1355 out: 1383 1356 trace_rxrpc_rx_done(0, 0); 1384 - return; 1385 - 1386 - out_unlock: 1387 - rcu_read_unlock(); 1388 - goto out; 1357 + return 0; 1389 1358 1390 1359 wrong_security: 1391 - rcu_read_unlock(); 1392 1360 trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1393 1361 RXKADINCONSISTENCY, EBADMSG); 1394 1362 skb->priority = RXKADINCONSISTENCY; 1395 1363 goto post_abort; 1396 1364 1397 1365 unsupported_service: 1398 - rcu_read_unlock(); 1399 1366 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1400 1367 RX_INVALID_OPERATION, EOPNOTSUPP); 1401 1368 skb->priority = RX_INVALID_OPERATION; 1402 1369 goto post_abort; 1403 1370 1404 1371 reupgrade: 1405 - rcu_read_unlock(); 1406 1372 trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1407 1373 RX_PROTOCOL_ERROR, EBADMSG); 1408 1374 goto protocol_error; 1409 1375 1410 - bad_message_unlock: 1411 - rcu_read_unlock(); 1412 1376 bad_message: 1413 1377 trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1414 1378 RX_PROTOCOL_ERROR, EBADMSG); ··· 1407 1397 trace_rxrpc_rx_done(skb->mark, skb->priority); 1408 1398 rxrpc_reject_packet(local, skb); 1409 1399 _leave(" [badmsg]"); 1400 + return 0; 1410 1401 }
+24 -6
net/rxrpc/local_object.c
··· 19 19 #include <linux/ip.h> 20 20 #include <linux/hashtable.h> 21 21 #include <net/sock.h> 22 + #include <net/udp.h> 22 23 #include <net/af_rxrpc.h> 23 24 #include "ar-internal.h" 24 25 ··· 109 108 */ 110 109 static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) 111 110 { 112 - struct sock *sock; 111 + struct sock *usk; 113 112 int ret, opt; 114 113 115 114 _enter("%p{%d,%d}", ··· 122 121 _leave(" = %d [socket]", ret); 123 122 return ret; 124 123 } 124 + 125 + /* set the socket up */ 126 + usk = local->socket->sk; 127 + inet_sk(usk)->mc_loop = 0; 128 + 129 + /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */ 130 + inet_inc_convert_csum(usk); 131 + 132 + rcu_assign_sk_user_data(usk, local); 133 + 134 + udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC; 135 + udp_sk(usk)->encap_rcv = rxrpc_input_packet; 136 + udp_sk(usk)->encap_destroy = NULL; 137 + udp_sk(usk)->gro_receive = NULL; 138 + udp_sk(usk)->gro_complete = NULL; 139 + 140 + udp_encap_enable(); 141 + #if IS_ENABLED(CONFIG_IPV6) 142 + if (local->srx.transport.family == AF_INET6) 143 + udpv6_encap_enable(); 144 + #endif 145 + usk->sk_error_report = rxrpc_error_report; 125 146 126 147 /* if a local address was supplied then bind it */ 127 148 if (local->srx.transport_len > sizeof(sa_family_t)) { ··· 214 191 BUG(); 215 192 } 216 193 217 - /* set the socket up */ 218 - sock = local->socket->sk; 219 - sock->sk_user_data = local; 220 - sock->sk_data_ready = rxrpc_data_ready; 221 - sock->sk_error_report = rxrpc_error_report; 222 194 _leave(" = 0"); 223 195 return 0; 224 196
+5
net/rxrpc/peer_event.c
··· 303 303 if (rtt < 0) 304 304 return; 305 305 306 + spin_lock(&peer->rtt_input_lock); 307 + 306 308 /* Replace the oldest datum in the RTT buffer */ 307 309 sum -= peer->rtt_cache[cursor]; 308 310 sum += rtt; ··· 316 314 peer->rtt_usage = usage; 317 315 } 318 316 317 + spin_unlock(&peer->rtt_input_lock); 318 + 319 319 /* Now recalculate the average */ 320 320 if (usage == RXRPC_RTT_CACHE_SIZE) { 321 321 avg = sum / RXRPC_RTT_CACHE_SIZE; ··· 326 322 do_div(avg, usage); 327 323 } 328 324 325 + /* Don't need to update this under lock */ 329 326 peer->rtt = avg; 330 327 trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, 331 328 usage, avg);
+18 -11
net/rxrpc/peer_object.c
··· 153 153 * assess the MTU size for the network interface through which this peer is 154 154 * reached 155 155 */ 156 - static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) 156 + static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx, 157 + struct rxrpc_peer *peer) 157 158 { 159 + struct net *net = sock_net(&rx->sk); 158 160 struct dst_entry *dst; 159 161 struct rtable *rt; 160 162 struct flowi fl; ··· 171 169 switch (peer->srx.transport.family) { 172 170 case AF_INET: 173 171 rt = ip_route_output_ports( 174 - &init_net, fl4, NULL, 172 + net, fl4, NULL, 175 173 peer->srx.transport.sin.sin_addr.s_addr, 0, 176 174 htons(7000), htons(7001), IPPROTO_UDP, 0, 0); 177 175 if (IS_ERR(rt)) { ··· 190 188 sizeof(struct in6_addr)); 191 189 fl6->fl6_dport = htons(7001); 192 190 fl6->fl6_sport = htons(7000); 193 - dst = ip6_route_output(&init_net, NULL, fl6); 191 + dst = ip6_route_output(net, NULL, fl6); 194 192 if (dst->error) { 195 193 _leave(" [route err %d]", dst->error); 196 194 return; ··· 225 223 peer->service_conns = RB_ROOT; 226 224 seqlock_init(&peer->service_conn_lock); 227 225 spin_lock_init(&peer->lock); 226 + spin_lock_init(&peer->rtt_input_lock); 228 227 peer->debug_id = atomic_inc_return(&rxrpc_debug_id); 229 228 230 229 if (RXRPC_TX_SMSS > 2190) ··· 243 240 /* 244 241 * Initialise peer record. 245 242 */ 246 - static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key) 243 + static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer, 244 + unsigned long hash_key) 247 245 { 248 246 peer->hash_key = hash_key; 249 - rxrpc_assess_MTU_size(peer); 247 + rxrpc_assess_MTU_size(rx, peer); 250 248 peer->mtu = peer->if_mtu; 251 249 peer->rtt_last_req = ktime_get_real(); 252 250 ··· 279 275 /* 280 276 * Set up a new peer. 281 277 */ 282 - static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, 278 + static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx, 279 + struct rxrpc_local *local, 283 280 struct sockaddr_rxrpc *srx, 284 281 unsigned long hash_key, 285 282 gfp_t gfp) ··· 292 287 peer = rxrpc_alloc_peer(local, gfp); 293 288 if (peer) { 294 289 memcpy(&peer->srx, srx, sizeof(*srx)); 295 - rxrpc_init_peer(peer, hash_key); 290 + rxrpc_init_peer(rx, peer, hash_key); 296 291 } 297 292 298 293 _leave(" = %p", peer); ··· 304 299 * since we've already done a search in the list from the non-reentrant context 305 300 * (the data_ready handler) that is the only place we can add new peers. 306 301 */ 307 - void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) 302 + void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local, 303 + struct rxrpc_peer *peer) 308 304 { 309 305 struct rxrpc_net *rxnet = local->rxnet; 310 306 unsigned long hash_key; 311 307 312 308 hash_key = rxrpc_peer_hash_key(local, &peer->srx); 313 309 peer->local = local; 314 - rxrpc_init_peer(peer, hash_key); 310 + rxrpc_init_peer(rx, peer, hash_key); 315 311 316 312 spin_lock(&rxnet->peer_hash_lock); 317 313 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); ··· 323 317 /* 324 318 * obtain a remote transport endpoint for the specified address 325 319 */ 326 - struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, 320 + struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, 321 + struct rxrpc_local *local, 327 322 struct sockaddr_rxrpc *srx, gfp_t gfp) 328 323 { 329 324 struct rxrpc_peer *peer, *candidate; ··· 344 337 /* The peer is not yet present in hash - create a candidate 345 338 * for a new record and then redo the search. 346 339 */ 347 - candidate = rxrpc_create_peer(local, srx, hash_key, gfp); 340 + candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp); 348 341 if (!candidate) { 349 342 _leave(" = NULL [nomem]"); 350 343 return NULL;
+3 -3
net/sched/cls_u32.c
··· 391 391 RCU_INIT_POINTER(root_ht->next, tp_c->hlist); 392 392 rcu_assign_pointer(tp_c->hlist, root_ht); 393 393 394 + root_ht->refcnt++; 394 395 rcu_assign_pointer(tp->root, root_ht); 395 396 tp->data = tp_c; 396 397 return 0; ··· 607 606 struct tc_u_hnode __rcu **hn; 608 607 struct tc_u_hnode *phn; 609 608 610 - WARN_ON(ht->refcnt); 609 + WARN_ON(--ht->refcnt); 611 610 612 611 u32_clear_hnode(tp, ht, extack); 613 612 ··· 635 634 636 635 WARN_ON(root_ht == NULL); 637 636 638 - if (root_ht && --root_ht->refcnt == 0) 637 + if (root_ht && --root_ht->refcnt == 1) 639 638 u32_destroy_hnode(tp, root_ht, extack); 640 639 641 640 if (--tp_c->refcnt == 0) { ··· 680 679 } 681 680 682 681 if (ht->refcnt == 1) { 683 - ht->refcnt--; 684 682 u32_destroy_hnode(tp, ht, extack); 685 683 } else { 686 684 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
+1 -1
net/sched/sch_cake.c
··· 2644 2644 for (i = 1; i <= CAKE_QUEUES; i++) 2645 2645 quantum_div[i] = 65535 / i; 2646 2646 2647 - q->tins = kvzalloc(CAKE_MAX_TINS * sizeof(struct cake_tin_data), 2647 + q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data), 2648 2648 GFP_KERNEL); 2649 2649 if (!q->tins) 2650 2650 goto nomem;
+21 -8
net/tipc/link.c
··· 477 477 l->in_session = false; 478 478 l->bearer_id = bearer_id; 479 479 l->tolerance = tolerance; 480 + if (bc_rcvlink) 481 + bc_rcvlink->tolerance = tolerance; 480 482 l->net_plane = net_plane; 481 483 l->advertised_mtu = mtu; 482 484 l->mtu = mtu; ··· 845 843 846 844 void tipc_link_reset(struct tipc_link *l) 847 845 { 846 + struct sk_buff_head list; 847 + 848 + __skb_queue_head_init(&list); 849 + 848 850 l->in_session = false; 849 851 l->session++; 850 852 l->mtu = l->advertised_mtu; 853 + 851 854 spin_lock_bh(&l->wakeupq.lock); 852 - spin_lock_bh(&l->inputq->lock); 853 - skb_queue_splice_init(&l->wakeupq, l->inputq); 854 - spin_unlock_bh(&l->inputq->lock); 855 + skb_queue_splice_init(&l->wakeupq, &list); 855 856 spin_unlock_bh(&l->wakeupq.lock); 857 + 858 + spin_lock_bh(&l->inputq->lock); 859 + skb_queue_splice_init(&list, l->inputq); 860 + spin_unlock_bh(&l->inputq->lock); 856 861 857 862 __skb_queue_purge(&l->transmq); 858 863 __skb_queue_purge(&l->deferdq); ··· 1040 1031 /* Detect repeated retransmit failures on same packet */ 1041 1032 if (r->last_retransm != buf_seqno(skb)) { 1042 1033 r->last_retransm = buf_seqno(skb); 1043 - r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance); 1034 + r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance); 1044 1035 } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) { 1045 1036 link_retransmit_failure(l, skb); 1046 1037 if (link_is_bc_sndlink(l)) ··· 1585 1576 strncpy(if_name, data, TIPC_MAX_IF_NAME); 1586 1577 1587 1578 /* Update own tolerance if peer indicates a non-zero value */ 1588 - if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1579 + if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) { 1589 1580 l->tolerance = peers_tol; 1590 - 1581 + l->bc_rcvlink->tolerance = peers_tol; 1582 + } 1591 1583 /* Update own priority if peer's priority is higher */ 1592 1584 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) 1593 1585 l->priority = peers_prio; ··· 1614 1604 l->rcv_nxt_state = msg_seqno(hdr) + 1; 1615 1605 1616 1606 /* Update own tolerance if peer indicates a non-zero value */ 1617 - if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1607 + if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) { 1618 1608 l->tolerance = peers_tol; 1619 - 1609 + l->bc_rcvlink->tolerance = peers_tol; 1610 + } 1620 1611 /* Update own prio if peer indicates a different value */ 1621 1612 if ((peers_prio != l->priority) && 1622 1613 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { ··· 2234 2223 struct sk_buff_head *xmitq) 2235 2224 { 2236 2225 l->tolerance = tol; 2226 + if (l->bc_rcvlink) 2227 + l->bc_rcvlink->tolerance = tol; 2237 2228 if (link_is_up(l)) 2238 2229 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq); 2239 2230 }
+12 -2
net/tipc/socket.c
··· 1198 1198 * @skb: pointer to message buffer. 1199 1199 */ 1200 1200 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, 1201 + struct sk_buff_head *inputq, 1201 1202 struct sk_buff_head *xmitq) 1202 1203 { 1203 1204 struct tipc_msg *hdr = buf_msg(skb); ··· 1216 1215 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), 1217 1216 tsk_peer_port(tsk)); 1218 1217 sk->sk_state_change(sk); 1219 - goto exit; 1218 + 1219 + /* State change is ignored if socket already awake, 1220 + * - convert msg to abort msg and add to inqueue 1221 + */ 1222 + msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE); 1223 + msg_set_type(hdr, TIPC_CONN_MSG); 1224 + msg_set_size(hdr, BASIC_H_SIZE); 1225 + msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1226 + __skb_queue_tail(inputq, skb); 1227 + return; 1220 1228 } 1221 1229 1222 1230 tsk->probe_unacked = false; ··· 1953 1943 1954 1944 switch (msg_user(hdr)) { 1955 1945 case CONN_MANAGER: 1956 - tipc_sk_conn_proto_rcv(tsk, skb, xmitq); 1946 + tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq); 1957 1947 return; 1958 1948 case SOCK_WAKEUP: 1959 1949 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
+1
samples/Kconfig
··· 1 1 menuconfig SAMPLES 2 2 bool "Sample kernel code" 3 + depends on !UML 3 4 help 4 5 You can build and test sample kernel code here. 5 6
+1 -1
scripts/Makefile.build
··· 219 219 sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ 220 220 "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \ 221 221 "$(if $(CONFIG_64BIT),64,32)" \ 222 - "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \ 222 + "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)" \ 223 223 "$(LD) $(KBUILD_LDFLAGS)" "$(NM)" "$(RM)" "$(MV)" \ 224 224 "$(if $(part-of-module),1,0)" "$(@)"; 225 225 recordmcount_source := $(srctree)/scripts/recordmcount.pl
+1
tools/hv/hv_fcopy_daemon.c
··· 234 234 break; 235 235 236 236 default: 237 + error = HV_E_FAIL; 237 238 syslog(LOG_ERR, "Unknown operation: %d", 238 239 buffer.hdr.operation); 239 240
+9
tools/perf/scripts/python/export-to-postgresql.py
··· 204 204 libpq = CDLL("libpq.so.5") 205 205 PQconnectdb = libpq.PQconnectdb 206 206 PQconnectdb.restype = c_void_p 207 + PQconnectdb.argtypes = [ c_char_p ] 207 208 PQfinish = libpq.PQfinish 209 + PQfinish.argtypes = [ c_void_p ] 208 210 PQstatus = libpq.PQstatus 211 + PQstatus.restype = c_int 212 + PQstatus.argtypes = [ c_void_p ] 209 213 PQexec = libpq.PQexec 210 214 PQexec.restype = c_void_p 215 + PQexec.argtypes = [ c_void_p, c_char_p ] 211 216 PQresultStatus = libpq.PQresultStatus 217 + PQresultStatus.restype = c_int 218 + PQresultStatus.argtypes = [ c_void_p ] 212 219 PQputCopyData = libpq.PQputCopyData 220 + PQputCopyData.restype = c_int 213 221 PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ] 214 222 PQputCopyEnd = libpq.PQputCopyEnd 223 + PQputCopyEnd.restype = c_int 215 224 PQputCopyEnd.argtypes = [ c_void_p, c_void_p ] 216 225 217 226 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+5 -1
tools/perf/scripts/python/export-to-sqlite.py
··· 440 440 441 441 def sample_table(*x): 442 442 if branches: 443 - bind_exec(sample_query, 18, x) 443 + for xx in x[0:15]: 444 + sample_query.addBindValue(str(xx)) 445 + for xx in x[19:22]: 446 + sample_query.addBindValue(str(xx)) 447 + do_query_(sample_query) 444 448 else: 445 449 bind_exec(sample_query, 22, x) 446 450
+5 -3
tools/perf/util/machine.c
··· 2286 2286 if (!symbol_conf.inline_name || !map || !sym) 2287 2287 return ret; 2288 2288 2289 - addr = map__rip_2objdump(map, ip); 2289 + addr = map__map_ip(map, ip); 2290 + addr = map__rip_2objdump(map, addr); 2290 2291 2291 2292 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr); 2292 2293 if (!inline_node) { ··· 2313 2312 { 2314 2313 struct callchain_cursor *cursor = arg; 2315 2314 const char *srcline = NULL; 2316 - u64 addr; 2315 + u64 addr = entry->ip; 2317 2316 2318 2317 if (symbol_conf.hide_unresolved && entry->sym == NULL) 2319 2318 return 0; ··· 2325 2324 * Convert entry->ip from a virtual address to an offset in 2326 2325 * its corresponding binary. 2327 2326 */ 2328 - addr = map__map_ip(entry->map, entry->ip); 2327 + if (entry->map) 2328 + addr = map__map_ip(entry->map, entry->ip); 2329 2329 2330 2330 srcline = callchain_srcline(entry->map, entry->sym, addr); 2331 2331 return callchain_cursor_append(cursor, entry->ip,
+1 -1
tools/perf/util/setup.py
··· 35 35 36 36 cflags = getenv('CFLAGS', '').split() 37 37 # switch off several checks (need to be at the end of cflags list) 38 - cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ] 38 + cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ] 39 39 if cc != "clang": 40 40 cflags += ['-Wno-cast-function-type' ] 41 41
+1 -1
tools/testing/selftests/net/rtnetlink.sh
··· 1 - #!/bin/sh 1 + #!/bin/bash 2 2 # 3 3 # This test is for checking rtnetlink callpaths, and get as much coverage as possible. 4 4 #
+1 -1
tools/testing/selftests/net/udpgso_bench.sh
··· 1 - #!/bin/sh 1 + #!/bin/bash 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 # 4 4 # Run a series of udpgso benchmarks