Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Simple cases of overlapping changes in the packet scheduler.

Must easier to resolve this time.

Which probably means that I screwed it up somehow.

Signed-off-by: David S. Miller <davem@davemloft.net>

+1127 -438
+3
.mailmap
··· 15 15 Alan Cox <alan@lxorguk.ukuu.org.uk> 16 16 Alan Cox <root@hraefn.swansea.linux.org.uk> 17 17 Aleksey Gorelov <aleksey_gorelov@phoenix.com> 18 + Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com> 18 19 Al Viro <viro@ftp.linux.org.uk> 19 20 Al Viro <viro@zenIV.linux.org.uk> 20 21 Andreas Herrmann <aherrman@de.ibm.com> ··· 102 101 Linas Vepstas <linas@austin.ibm.com> 103 102 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de> 104 103 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> 104 + Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com> 105 105 Mark Brown <broonie@sirena.org.uk> 106 106 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> 107 107 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com> ··· 121 119 Mayuresh Janorkar <mayur@ti.com> 122 120 Michael Buesch <m@bues.ch> 123 121 Michel Dänzer <michel@tungstengraphics.com> 122 + Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com> 124 123 Mitesh shah <mshah@teja.com> 125 124 Mohit Kumar <mohit.kumar@st.com> <mohit.kumar.dhaka@gmail.com> 126 125 Morten Welinder <terra@gnome.org>
+16 -6
Documentation/process/kernel-enforcement-statement.rst
··· 50 50 Except where noted below, we speak only for ourselves, and not for any company 51 51 we might work for today, have in the past, or will in the future. 52 52 53 + - Laura Abbott 53 54 - Bjorn Andersson (Linaro) 54 - - Andrea Arcangeli (Red Hat) 55 + - Andrea Arcangeli 55 56 - Neil Armstrong 56 57 - Jens Axboe 57 58 - Pablo Neira Ayuso ··· 61 60 - Felipe Balbi 62 61 - Arnd Bergmann 63 62 - Ard Biesheuvel 64 - - Paolo Bonzini (Red Hat) 63 + - Tim Bird 64 + - Paolo Bonzini 65 65 - Christian Borntraeger 66 66 - Mark Brown (Linaro) 67 67 - Paul Burton 68 68 - Javier Martinez Canillas 69 69 - Rob Clark 70 70 - Jonathan Corbet 71 + - Dennis Dalessandro 71 72 - Vivien Didelot (Savoir-faire Linux) 72 - - Hans de Goede (Red Hat) 73 + - Hans de Goede 73 74 - Mel Gorman (SUSE) 74 75 - Sven Eckelmann 75 76 - Alex Elder (Linaro) ··· 82 79 - Juergen Gross 83 80 - Shawn Guo 84 81 - Ulf Hansson 82 + - Stephen Hemminger (Microsoft) 85 83 - Tejun Heo 86 84 - Rob Herring 87 85 - Masami Hiramatsu ··· 108 104 - Viresh Kumar 109 105 - Aneesh Kumar K.V 110 106 - Julia Lawall 111 - - Doug Ledford (Red Hat) 107 + - Doug Ledford 112 108 - Chuck Lever (Oracle) 113 109 - Daniel Lezcano 114 110 - Shaohua Li 115 - - Xin Long (Red Hat) 111 + - Xin Long 116 112 - Tony Luck 113 + - Catalin Marinas (Arm Ltd) 117 114 - Mike Marshall 118 115 - Chris Mason 119 116 - Paul E. McKenney 120 117 - David S. Miller 121 118 - Ingo Molnar 122 119 - Kuninori Morimoto 120 + - Trond Myklebust 121 + - Martin K. Petersen (Oracle) 123 122 - Borislav Petkov 124 123 - Jiri Pirko 125 124 - Josh Poimboeuf ··· 131 124 - Joerg Roedel 132 125 - Leon Romanovsky 133 126 - Steven Rostedt (VMware) 134 - - Ivan Safonov 127 + - Frank Rowand 135 128 - Ivan Safonov 136 129 - Anna Schumaker 137 130 - Jes Sorensen 138 131 - K.Y. Srinivasan 139 132 - Heiko Stuebner 140 133 - Jiri Kosina (SUSE) 134 + - Willy Tarreau 141 135 - Dmitry Torokhov 142 136 - Linus Torvalds 143 137 - Thierry Reding 144 138 - Rik van Riel 145 139 - Geert Uytterhoeven (Glider bvba) 140 + - Eduardo Valentin (Amazon.com) 146 141 - Daniel Vetter 147 142 - Linus Walleij 148 143 - Richard Weinberger ··· 154 145 - Masahiro Yamada 155 146 - Wei Yongjun 156 147 - Lv Zheng 148 + - Marc Zyngier (Arm Ltd)
+9 -5
MAINTAINERS
··· 873 873 F: drivers/staging/android/ 874 874 875 875 ANDROID GOLDFISH RTC DRIVER 876 - M: Miodrag Dinic <miodrag.dinic@imgtec.com> 876 + M: Miodrag Dinic <miodrag.dinic@mips.com> 877 877 S: Supported 878 878 F: Documentation/devicetree/bindings/rtc/google,goldfish-rtc.txt 879 879 F: drivers/rtc/rtc-goldfish.c ··· 7752 7752 F: Documentation/scsi/53c700.txt 7753 7753 F: drivers/scsi/53c700* 7754 7754 7755 + LEAKING_ADDRESSES 7756 + M: Tobin C. Harding <me@tobin.cc> 7757 + S: Maintained 7758 + F: scripts/leaking_addresses.pl 7759 + 7755 7760 LED SUBSYSTEM 7756 7761 M: Richard Purdie <rpurdie@rpsys.net> 7757 7762 M: Jacek Anaszewski <jacek.anaszewski@gmail.com> ··· 9032 9027 F: drivers/*/*/*loongson1* 9033 9028 9034 9029 MIPS RINT INSTRUCTION EMULATION 9035 - M: Aleksandar Markovic <aleksandar.markovic@imgtec.com> 9030 + M: Aleksandar Markovic <aleksandar.markovic@mips.com> 9036 9031 L: linux-mips@linux-mips.org 9037 9032 S: Supported 9038 9033 F: arch/mips/math-emu/sp_rint.c ··· 10697 10692 F: drivers/pinctrl/spear/ 10698 10693 10699 10694 PISTACHIO SOC SUPPORT 10700 - M: James Hartley <james.hartley@imgtec.com> 10701 - M: Ionela Voinescu <ionela.voinescu@imgtec.com> 10695 + M: James Hartley <james.hartley@sondrel.com> 10702 10696 L: linux-mips@linux-mips.org 10703 - S: Maintained 10697 + S: Odd Fixes 10704 10698 F: arch/mips/pistachio/ 10705 10699 F: arch/mips/include/asm/mach-pistachio/ 10706 10700 F: arch/mips/boot/dts/img/pistachio*
+1 -1
Makefile
··· 2 2 VERSION = 4 3 3 PATCHLEVEL = 14 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc7 5 + EXTRAVERSION = -rc8 6 6 NAME = Fearless Coyote 7 7 8 8 # *DOCUMENTATION*
+2
arch/arm/Makefile
··· 44 44 45 45 ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) 46 46 KBUILD_CPPFLAGS += -mbig-endian 47 + CHECKFLAGS += -D__ARMEB__ 47 48 AS += -EB 48 49 LD += -EB 49 50 else 50 51 KBUILD_CPPFLAGS += -mlittle-endian 52 + CHECKFLAGS += -D__ARMEL__ 51 53 AS += -EL 52 54 LD += -EL 53 55 endif
+9
arch/arm/boot/compressed/vmlinux.lds.S
··· 85 85 86 86 _edata = .; 87 87 88 + /* 89 + * The image_end section appears after any additional loadable sections 90 + * that the linker may decide to insert in the binary image. Having 91 + * this symbol allows further debug in the near future. 92 + */ 93 + .image_end (NOLOAD) : { 94 + _edata_real = .; 95 + } 96 + 88 97 _magic_sig = ZIMAGE_MAGIC(0x016f2818); 89 98 _magic_start = ZIMAGE_MAGIC(_start); 90 99 _magic_end = ZIMAGE_MAGIC(_edata);
+2 -2
arch/arm/boot/dts/armada-375.dtsi
··· 178 178 reg = <0x8000 0x1000>; 179 179 cache-unified; 180 180 cache-level = <2>; 181 - arm,double-linefill-incr = <1>; 181 + arm,double-linefill-incr = <0>; 182 182 arm,double-linefill-wrap = <0>; 183 - arm,double-linefill = <1>; 183 + arm,double-linefill = <0>; 184 184 prefetch-data = <1>; 185 185 }; 186 186
+2 -2
arch/arm/boot/dts/armada-38x.dtsi
··· 143 143 reg = <0x8000 0x1000>; 144 144 cache-unified; 145 145 cache-level = <2>; 146 - arm,double-linefill-incr = <1>; 146 + arm,double-linefill-incr = <0>; 147 147 arm,double-linefill-wrap = <0>; 148 - arm,double-linefill = <1>; 148 + arm,double-linefill = <0>; 149 149 prefetch-data = <1>; 150 150 }; 151 151
+2 -2
arch/arm/boot/dts/armada-39x.dtsi
··· 111 111 reg = <0x8000 0x1000>; 112 112 cache-unified; 113 113 cache-level = <2>; 114 - arm,double-linefill-incr = <1>; 114 + arm,double-linefill-incr = <0>; 115 115 arm,double-linefill-wrap = <0>; 116 - arm,double-linefill = <1>; 116 + arm,double-linefill = <0>; 117 117 prefetch-data = <1>; 118 118 }; 119 119
+6 -3
arch/arm/boot/dts/uniphier-ld4.dtsi
··· 209 209 interrupts = <0 80 4>; 210 210 pinctrl-names = "default"; 211 211 pinctrl-0 = <&pinctrl_usb0>; 212 - clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>; 212 + clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>, 213 + <&mio_clk 12>; 213 214 resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>, 214 215 <&mio_rst 12>; 215 216 }; ··· 222 221 interrupts = <0 81 4>; 223 222 pinctrl-names = "default"; 224 223 pinctrl-0 = <&pinctrl_usb1>; 225 - clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>; 224 + clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>, 225 + <&mio_clk 13>; 226 226 resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>, 227 227 <&mio_rst 13>; 228 228 }; ··· 235 233 interrupts = <0 82 4>; 236 234 pinctrl-names = "default"; 237 235 pinctrl-0 = <&pinctrl_usb2>; 238 - clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>; 236 + clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 10>, 237 + <&mio_clk 14>; 239 238 resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>, 240 239 <&mio_rst 14>; 241 240 };
+4 -2
arch/arm/boot/dts/uniphier-pro4.dtsi
··· 241 241 interrupts = <0 80 4>; 242 242 pinctrl-names = "default"; 243 243 pinctrl-0 = <&pinctrl_usb2>; 244 - clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>; 244 + clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>, 245 + <&mio_clk 12>; 245 246 resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>, 246 247 <&mio_rst 12>; 247 248 }; ··· 254 253 interrupts = <0 81 4>; 255 254 pinctrl-names = "default"; 256 255 pinctrl-0 = <&pinctrl_usb3>; 257 - clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>; 256 + clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>, 257 + <&mio_clk 13>; 258 258 resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>, 259 259 <&mio_rst 13>; 260 260 };
+6 -3
arch/arm/boot/dts/uniphier-sld8.dtsi
··· 209 209 interrupts = <0 80 4>; 210 210 pinctrl-names = "default"; 211 211 pinctrl-0 = <&pinctrl_usb0>; 212 - clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>; 212 + clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>, 213 + <&mio_clk 12>; 213 214 resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>, 214 215 <&mio_rst 12>; 215 216 }; ··· 222 221 interrupts = <0 81 4>; 223 222 pinctrl-names = "default"; 224 223 pinctrl-0 = <&pinctrl_usb1>; 225 - clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>; 224 + clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>, 225 + <&mio_clk 13>; 226 226 resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>, 227 227 <&mio_rst 13>; 228 228 }; ··· 235 233 interrupts = <0 82 4>; 236 234 pinctrl-names = "default"; 237 235 pinctrl-0 = <&pinctrl_usb2>; 238 - clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>; 236 + clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 10>, 237 + <&mio_clk 14>; 239 238 resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>, 240 239 <&mio_rst 14>; 241 240 };
-1
arch/arm/include/asm/Kbuild
··· 20 20 generic-y += sizes.h 21 21 generic-y += timex.h 22 22 generic-y += trace_clock.h 23 - generic-y += unaligned.h 24 23 25 24 generated-y += mach-types.h 26 25 generated-y += unistd-nr.h
+27
arch/arm/include/asm/unaligned.h
··· 1 + #ifndef __ASM_ARM_UNALIGNED_H 2 + #define __ASM_ARM_UNALIGNED_H 3 + 4 + /* 5 + * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+, 6 + * but we don't want to use linux/unaligned/access_ok.h since that can lead 7 + * to traps on unaligned stm/ldm or strd/ldrd. 8 + */ 9 + #include <asm/byteorder.h> 10 + 11 + #if defined(__LITTLE_ENDIAN) 12 + # include <linux/unaligned/le_struct.h> 13 + # include <linux/unaligned/be_byteshift.h> 14 + # include <linux/unaligned/generic.h> 15 + # define get_unaligned __get_unaligned_le 16 + # define put_unaligned __put_unaligned_le 17 + #elif defined(__BIG_ENDIAN) 18 + # include <linux/unaligned/be_struct.h> 19 + # include <linux/unaligned/le_byteshift.h> 20 + # include <linux/unaligned/generic.h> 21 + # define get_unaligned __get_unaligned_be 22 + # define put_unaligned __put_unaligned_be 23 + #else 24 + # error need to define endianess 25 + #endif 26 + 27 + #endif /* __ASM_ARM_UNALIGNED_H */
+2 -4
arch/arm/kvm/emulate.c
··· 227 227 u32 return_offset = (is_thumb) ? 2 : 4; 228 228 229 229 kvm_update_psr(vcpu, UND_MODE); 230 - *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset; 230 + *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; 231 231 232 232 /* Branch to exception vector */ 233 233 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; ··· 239 239 */ 240 240 static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) 241 241 { 242 - unsigned long cpsr = *vcpu_cpsr(vcpu); 243 - bool is_thumb = (cpsr & PSR_T_BIT); 244 242 u32 vect_offset; 245 - u32 return_offset = (is_thumb) ? 4 : 0; 243 + u32 return_offset = (is_pabt) ? 4 : 8; 246 244 bool is_lpae; 247 245 248 246 kvm_update_psr(vcpu, ABT_MODE);
+1 -1
arch/arm/kvm/hyp/Makefile
··· 3 3 # Makefile for Kernel-based Virtual Machine module, HYP part 4 4 # 5 5 6 - ccflags-y += -fno-stack-protector 6 + ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING 7 7 8 8 KVM=../../../../virt/kvm 9 9
+6 -3
arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
··· 299 299 interrupts = <0 243 4>; 300 300 pinctrl-names = "default"; 301 301 pinctrl-0 = <&pinctrl_usb0>; 302 - clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>; 302 + clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>, 303 + <&mio_clk 12>; 303 304 resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>, 304 305 <&mio_rst 12>; 305 306 }; ··· 312 311 interrupts = <0 244 4>; 313 312 pinctrl-names = "default"; 314 313 pinctrl-0 = <&pinctrl_usb1>; 315 - clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>; 314 + clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>, 315 + <&mio_clk 13>; 316 316 resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>, 317 317 <&mio_rst 13>; 318 318 }; ··· 325 323 interrupts = <0 245 4>; 326 324 pinctrl-names = "default"; 327 325 pinctrl-0 = <&pinctrl_usb2>; 328 - clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>; 326 + clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 10>, 327 + <&mio_clk 14>; 329 328 resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>, 330 329 <&mio_rst 14>; 331 330 };
+1 -1
arch/arm64/kvm/hyp/Makefile
··· 3 3 # Makefile for Kernel-based Virtual Machine module, HYP part 4 4 # 5 5 6 - ccflags-y += -fno-stack-protector 6 + ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING 7 7 8 8 KVM=../../../../virt/kvm 9 9
+15 -1
arch/arm64/kvm/inject_fault.c
··· 33 33 #define LOWER_EL_AArch64_VECTOR 0x400 34 34 #define LOWER_EL_AArch32_VECTOR 0x600 35 35 36 + /* 37 + * Table taken from ARMv8 ARM DDI0487B-B, table G1-10. 38 + */ 39 + static const u8 return_offsets[8][2] = { 40 + [0] = { 0, 0 }, /* Reset, unused */ 41 + [1] = { 4, 2 }, /* Undefined */ 42 + [2] = { 0, 0 }, /* SVC, unused */ 43 + [3] = { 4, 4 }, /* Prefetch abort */ 44 + [4] = { 8, 8 }, /* Data abort */ 45 + [5] = { 0, 0 }, /* HVC, unused */ 46 + [6] = { 4, 4 }, /* IRQ, unused */ 47 + [7] = { 4, 4 }, /* FIQ, unused */ 48 + }; 49 + 36 50 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) 37 51 { 38 52 unsigned long cpsr; 39 53 unsigned long new_spsr_value = *vcpu_cpsr(vcpu); 40 54 bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); 41 - u32 return_offset = (is_thumb) ? 4 : 0; 55 + u32 return_offset = return_offsets[vect_offset >> 2][is_thumb]; 42 56 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); 43 57 44 58 cpsr = mode | COMPAT_PSR_I_BIT;
+2
arch/ia64/include/asm/acpi.h
··· 112 112 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; 113 113 } 114 114 115 + #define acpi_unlazy_tlb(x) 116 + 115 117 #ifdef CONFIG_ACPI_NUMA 116 118 extern cpumask_t early_cpu_possible_map; 117 119 #define for_each_possible_early_cpu(cpu) \
+1 -1
arch/mips/generic/board-ni169445.its.S
··· 1 - { 1 + / { 2 2 images { 3 3 fdt@ni169445 { 4 4 description = "NI 169445 device tree";
+1 -1
arch/mips/generic/init.c
··· 20 20 #include <asm/fw/fw.h> 21 21 #include <asm/irq_cpu.h> 22 22 #include <asm/machine.h> 23 - #include <asm/mips-cpc.h> 23 + #include <asm/mips-cps.h> 24 24 #include <asm/prom.h> 25 25 #include <asm/smp-ops.h> 26 26 #include <asm/time.h>
+1 -1
arch/mips/generic/kexec.c
··· 1 1 /* 2 2 * Copyright (C) 2016 Imagination Technologies 3 - * Author: Marcin Nowakowski <marcin.nowakowski@imgtec.com> 3 + * Author: Marcin Nowakowski <marcin.nowakowski@mips.com> 4 4 * 5 5 * This program is free software; you can redistribute it and/or modify it 6 6 * under the terms of the GNU General Public License as published by the
+2 -2
arch/mips/include/asm/mips-cm.h
··· 142 142 GCR_ACCESSOR_RW(64, 0x008, base) 143 143 #define CM_GCR_BASE_GCRBASE GENMASK_ULL(47, 15) 144 144 #define CM_GCR_BASE_CMDEFTGT GENMASK(1, 0) 145 - #define CM_GCR_BASE_CMDEFTGT_DISABLED 0 146 - #define CM_GCR_BASE_CMDEFTGT_MEM 1 145 + #define CM_GCR_BASE_CMDEFTGT_MEM 0 146 + #define CM_GCR_BASE_CMDEFTGT_RESERVED 1 147 147 #define CM_GCR_BASE_CMDEFTGT_IOCU0 2 148 148 #define CM_GCR_BASE_CMDEFTGT_IOCU1 3 149 149
+4 -4
arch/mips/include/asm/stackframe.h
··· 199 199 sll k0, 3 /* extract cu0 bit */ 200 200 .set noreorder 201 201 bltz k0, 8f 202 + move k0, sp 203 + .if \docfi 204 + .cfi_register sp, k0 205 + .endif 202 206 #ifdef CONFIG_EVA 203 207 /* 204 208 * Flush interAptiv's Return Prediction Stack (RPS) by writing ··· 229 225 MTC0 k0, CP0_ENTRYHI 230 226 #endif 231 227 .set reorder 232 - move k0, sp 233 - .if \docfi 234 - .cfi_register sp, k0 235 - .endif 236 228 /* Called from user mode, new stack. */ 237 229 get_saved_sp docfi=\docfi tosp=1 238 230 8:
+1 -1
arch/mips/kernel/probes-common.h
··· 1 1 /* 2 2 * Copyright (C) 2016 Imagination Technologies 3 - * Author: Marcin Nowakowski <marcin.nowakowski@imgtec.com> 3 + * Author: Marcin Nowakowski <marcin.nowakowski@mips.com> 4 4 * 5 5 * This program is free software; you can redistribute it and/or modify it 6 6 * under the terms of the GNU General Public License as published by the
+3 -3
arch/mips/kernel/smp-cmp.c
··· 19 19 #undef DEBUG 20 20 21 21 #include <linux/kernel.h> 22 - #include <linux/sched.h> 22 + #include <linux/sched/task_stack.h> 23 23 #include <linux/smp.h> 24 24 #include <linux/cpumask.h> 25 25 #include <linux/interrupt.h> ··· 50 50 51 51 #ifdef CONFIG_MIPS_MT_SMP 52 52 if (cpu_has_mipsmt) 53 - c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & 54 - TCBIND_CURVPE; 53 + cpu_set_vpe_id(c, (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & 54 + TCBIND_CURVPE); 55 55 #endif 56 56 } 57 57
+1 -1
arch/mips/kernel/smp-cps.c
··· 306 306 int err; 307 307 308 308 /* We don't yet support booting CPUs in other clusters */ 309 - if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&current_cpu_data)) 309 + if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data)) 310 310 return -ENOSYS; 311 311 312 312 vpe_cfg->pc = (unsigned long)&smp_bootstrap;
+17 -7
arch/mips/kernel/smp.c
··· 42 42 #include <asm/processor.h> 43 43 #include <asm/idle.h> 44 44 #include <asm/r4k-timer.h> 45 - #include <asm/mips-cpc.h> 45 + #include <asm/mips-cps.h> 46 46 #include <asm/mmu_context.h> 47 47 #include <asm/time.h> 48 48 #include <asm/setup.h> ··· 66 66 cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 67 67 EXPORT_SYMBOL(cpu_core_map); 68 68 69 + static DECLARE_COMPLETION(cpu_starting); 69 70 static DECLARE_COMPLETION(cpu_running); 70 71 71 72 /* ··· 375 374 cpumask_set_cpu(cpu, &cpu_coherent_mask); 376 375 notify_cpu_starting(cpu); 377 376 377 + /* Notify boot CPU that we're starting & ready to sync counters */ 378 + complete(&cpu_starting); 379 + 380 + synchronise_count_slave(cpu); 381 + 382 + /* The CPU is running and counters synchronised, now mark it online */ 378 383 set_cpu_online(cpu, true); 379 384 380 385 set_cpu_sibling_map(cpu); ··· 388 381 389 382 calculate_cpu_foreign_map(); 390 383 384 + /* 385 + * Notify boot CPU that we're up & online and it can safely return 386 + * from __cpu_up 387 + */ 391 388 complete(&cpu_running); 392 - synchronise_count_slave(cpu); 393 389 394 390 /* 395 391 * irq will be enabled in ->smp_finish(), enabling it too early ··· 455 445 if (err) 456 446 return err; 457 447 458 - /* 459 - * We must check for timeout here, as the CPU will not be marked 460 - * online until the counters are synchronised. 461 - */ 462 - if (!wait_for_completion_timeout(&cpu_running, 448 + /* Wait for CPU to start and be ready to sync counters */ 449 + if (!wait_for_completion_timeout(&cpu_starting, 463 450 msecs_to_jiffies(1000))) { 464 451 pr_crit("CPU%u: failed to start\n", cpu); 465 452 return -EIO; 466 453 } 467 454 468 455 synchronise_count_master(cpu); 456 + 457 + /* Wait for CPU to finish startup & mark itself online before return */ 458 + wait_for_completion(&cpu_running); 469 459 return 0; 470 460 } 471 461
+1 -1
arch/mips/mm/uasm-micromips.c
··· 80 80 [insn_jr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS}, 81 81 [insn_lb] = {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, 82 82 [insn_ld] = {0, 0}, 83 - [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM}, 83 + [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, 84 84 [insn_ll] = {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM}, 85 85 [insn_lld] = {0, 0}, 86 86 [insn_lui] = {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM},
+1 -1
arch/mips/net/ebpf_jit.c
··· 1513 1513 } 1514 1514 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); 1515 1515 if (src < 0) 1516 - return dst; 1516 + return src; 1517 1517 if (BPF_MODE(insn->code) == BPF_XADD) { 1518 1518 switch (BPF_SIZE(insn->code)) { 1519 1519 case BPF_W:
+6 -6
arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
··· 157 157 .endr 158 158 159 159 # Find min length 160 - vmovdqa _lens+0*16(state), %xmm0 161 - vmovdqa _lens+1*16(state), %xmm1 160 + vmovdqu _lens+0*16(state), %xmm0 161 + vmovdqu _lens+1*16(state), %xmm1 162 162 163 163 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} 164 164 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} ··· 178 178 vpsubd %xmm2, %xmm0, %xmm0 179 179 vpsubd %xmm2, %xmm1, %xmm1 180 180 181 - vmovdqa %xmm0, _lens+0*16(state) 182 - vmovdqa %xmm1, _lens+1*16(state) 181 + vmovdqu %xmm0, _lens+0*16(state) 182 + vmovdqu %xmm1, _lens+1*16(state) 183 183 184 184 # "state" and "args" are the same address, arg1 185 185 # len is arg2 ··· 235 235 jc .return_null 236 236 237 237 # Find min length 238 - vmovdqa _lens(state), %xmm0 239 - vmovdqa _lens+1*16(state), %xmm1 238 + vmovdqu _lens(state), %xmm0 239 + vmovdqu _lens+1*16(state), %xmm1 240 240 241 241 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} 242 242 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
+6 -6
arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
··· 155 155 .endr 156 156 157 157 # Find min length 158 - vmovdqa _lens+0*16(state), %xmm0 159 - vmovdqa _lens+1*16(state), %xmm1 158 + vmovdqu _lens+0*16(state), %xmm0 159 + vmovdqu _lens+1*16(state), %xmm1 160 160 161 161 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} 162 162 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} ··· 176 176 vpsubd %xmm2, %xmm0, %xmm0 177 177 vpsubd %xmm2, %xmm1, %xmm1 178 178 179 - vmovdqa %xmm0, _lens+0*16(state) 180 - vmovdqa %xmm1, _lens+1*16(state) 179 + vmovdqu %xmm0, _lens+0*16(state) 180 + vmovdqu %xmm1, _lens+1*16(state) 181 181 182 182 # "state" and "args" are the same address, arg1 183 183 # len is arg2 ··· 234 234 jc .return_null 235 235 236 236 # Find min length 237 - vmovdqa _lens(state), %xmm0 238 - vmovdqa _lens+1*16(state), %xmm1 237 + vmovdqu _lens(state), %xmm0 238 + vmovdqu _lens+1*16(state), %xmm1 239 239 240 240 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} 241 241 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
+2
arch/x86/include/asm/acpi.h
··· 150 150 extern int x86_acpi_numa_init(void); 151 151 #endif /* CONFIG_ACPI_NUMA */ 152 152 153 + #define acpi_unlazy_tlb(x) leave_mm(x) 154 + 153 155 #ifdef CONFIG_ACPI_APEI 154 156 static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) 155 157 {
+25 -92
arch/x86/kernel/cpu/mcheck/dev-mcelog.c
··· 24 24 static char mce_helper[128]; 25 25 static char *mce_helper_argv[2] = { mce_helper, NULL }; 26 26 27 - #define mce_log_get_idx_check(p) \ 28 - ({ \ 29 - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ 30 - !lockdep_is_held(&mce_chrdev_read_mutex), \ 31 - "suspicious mce_log_get_idx_check() usage"); \ 32 - smp_load_acquire(&(p)); \ 33 - }) 34 - 35 27 /* 36 28 * Lockless MCE logging infrastructure. 37 29 * This avoids deadlocks on printk locks without having to break locks. Also ··· 45 53 void *data) 46 54 { 47 55 struct mce *mce = (struct mce *)data; 48 - unsigned int next, entry; 56 + unsigned int entry; 49 57 50 - wmb(); 51 - for (;;) { 52 - entry = mce_log_get_idx_check(mcelog.next); 53 - for (;;) { 58 + mutex_lock(&mce_chrdev_read_mutex); 54 59 55 - /* 56 - * When the buffer fills up discard new entries. 57 - * Assume that the earlier errors are the more 58 - * interesting ones: 59 - */ 60 - if (entry >= MCE_LOG_LEN) { 61 - set_bit(MCE_OVERFLOW, 62 - (unsigned long *)&mcelog.flags); 63 - return NOTIFY_OK; 64 - } 65 - /* Old left over entry. Skip: */ 66 - if (mcelog.entry[entry].finished) { 67 - entry++; 68 - continue; 69 - } 70 - break; 71 - } 72 - smp_rmb(); 73 - next = entry + 1; 74 - if (cmpxchg(&mcelog.next, entry, next) == entry) 75 - break; 60 + entry = mcelog.next; 61 + 62 + /* 63 + * When the buffer fills up discard new entries. Assume that the 64 + * earlier errors are the more interesting ones: 65 + */ 66 + if (entry >= MCE_LOG_LEN) { 67 + set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags); 68 + goto unlock; 76 69 } 70 + 71 + mcelog.next = entry + 1; 72 + 77 73 memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); 78 - wmb(); 79 74 mcelog.entry[entry].finished = 1; 80 - wmb(); 81 75 82 76 /* wake processes polling /dev/mcelog */ 83 77 wake_up_interruptible(&mce_chrdev_wait); 78 + 79 + unlock: 80 + mutex_unlock(&mce_chrdev_read_mutex); 84 81 85 82 return NOTIFY_OK; 86 83 } ··· 158 177 return 0; 159 178 } 160 179 161 - static void collect_tscs(void *data) 162 - { 163 - unsigned long *cpu_tsc = (unsigned long *)data; 164 - 165 - cpu_tsc[smp_processor_id()] = rdtsc(); 166 - } 167 - 168 180 static int mce_apei_read_done; 169 181 170 182 /* Collect MCE record of previous boot in persistent storage via APEI ERST. */ ··· 205 231 size_t usize, loff_t *off) 206 232 { 207 233 char __user *buf = ubuf; 208 - unsigned long *cpu_tsc; 209 - unsigned prev, next; 234 + unsigned next; 210 235 int i, err; 211 - 212 - cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); 213 - if (!cpu_tsc) 214 - return -ENOMEM; 215 236 216 237 mutex_lock(&mce_chrdev_read_mutex); 217 238 ··· 216 247 goto out; 217 248 } 218 249 219 - next = mce_log_get_idx_check(mcelog.next); 220 - 221 250 /* Only supports full reads right now */ 222 251 err = -EINVAL; 223 252 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) 224 253 goto out; 225 254 255 + next = mcelog.next; 226 256 err = 0; 227 - prev = 0; 228 - do { 229 - for (i = prev; i < next; i++) { 230 - unsigned long start = jiffies; 231 - struct mce *m = &mcelog.entry[i]; 232 257 233 - while (!m->finished) { 234 - if (time_after_eq(jiffies, start + 2)) { 235 - memset(m, 0, sizeof(*m)); 236 - goto timeout; 237 - } 238 - cpu_relax(); 239 - } 240 - smp_rmb(); 241 - err |= copy_to_user(buf, m, sizeof(*m)); 242 - buf += sizeof(*m); 243 - timeout: 244 - ; 245 - } 246 - 247 - memset(mcelog.entry + prev, 0, 248 - (next - prev) * sizeof(struct mce)); 249 - prev = next; 250 - next = cmpxchg(&mcelog.next, prev, 0); 251 - } while (next != prev); 252 - 253 - synchronize_sched(); 254 - 255 - /* 256 - * Collect entries that were still getting written before the 257 - * synchronize. 258 - */ 259 - on_each_cpu(collect_tscs, cpu_tsc, 1); 260 - 261 - for (i = next; i < MCE_LOG_LEN; i++) { 258 + for (i = 0; i < next; i++) { 262 259 struct mce *m = &mcelog.entry[i]; 263 260 264 - if (m->finished && m->tsc < cpu_tsc[m->cpu]) { 265 - err |= copy_to_user(buf, m, sizeof(*m)); 266 - smp_rmb(); 267 - buf += sizeof(*m); 268 - memset(m, 0, sizeof(*m)); 269 - } 261 + err |= copy_to_user(buf, m, sizeof(*m)); 262 + buf += sizeof(*m); 270 263 } 264 + 265 + memset(mcelog.entry, 0, next * sizeof(struct mce)); 266 + mcelog.next = 0; 271 267 272 268 if (err) 273 269 err = -EFAULT; 274 270 275 271 out: 276 272 mutex_unlock(&mce_chrdev_read_mutex); 277 - kfree(cpu_tsc); 278 273 279 274 return err ? err : buf - ubuf; 280 275 }
+1 -1
arch/x86/kernel/kvmclock.c
··· 79 79 80 80 static int kvm_set_wallclock(const struct timespec *now) 81 81 { 82 - return -1; 82 + return -ENODEV; 83 83 } 84 84 85 85 static u64 kvm_clock_read(void)
+13
arch/x86/kernel/module.c
··· 172 172 case R_X86_64_NONE: 173 173 break; 174 174 case R_X86_64_64: 175 + if (*(u64 *)loc != 0) 176 + goto invalid_relocation; 175 177 *(u64 *)loc = val; 176 178 break; 177 179 case R_X86_64_32: 180 + if (*(u32 *)loc != 0) 181 + goto invalid_relocation; 178 182 *(u32 *)loc = val; 179 183 if (val != *(u32 *)loc) 180 184 goto overflow; 181 185 break; 182 186 case R_X86_64_32S: 187 + if (*(s32 *)loc != 0) 188 + goto invalid_relocation; 183 189 *(s32 *)loc = val; 184 190 if ((s64)val != *(s32 *)loc) 185 191 goto overflow; 186 192 break; 187 193 case R_X86_64_PC32: 194 + if (*(u32 *)loc != 0) 195 + goto invalid_relocation; 188 196 val -= (u64)loc; 189 197 *(u32 *)loc = val; 190 198 #if 0 ··· 207 199 } 208 200 } 209 201 return 0; 202 + 203 + invalid_relocation: 204 + pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n", 205 + (int)ELF64_R_TYPE(rel[i].r_info), loc, val); 206 + return -ENOEXEC; 210 207 211 208 overflow: 212 209 pr_err("overflow in relocation type %d val %Lx\n",
+5
arch/x86/kvm/lapic.c
··· 1992 1992 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP); 1993 1993 vcpu->arch.pv_eoi.msr_val = 0; 1994 1994 apic_update_ppr(apic); 1995 + if (vcpu->arch.apicv_active) { 1996 + kvm_x86_ops->apicv_post_state_restore(vcpu); 1997 + kvm_x86_ops->hwapic_irr_update(vcpu, -1); 1998 + kvm_x86_ops->hwapic_isr_update(vcpu, -1); 1999 + } 1995 2000 1996 2001 vcpu->arch.apic_arb_prio = 0; 1997 2002 vcpu->arch.apic_attention = 0;
-3
arch/x86/kvm/vmx.c
··· 5619 5619 5620 5620 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 5621 5621 5622 - if (kvm_vcpu_apicv_active(vcpu)) 5623 - memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); 5624 - 5625 5622 if (vmx->vpid != 0) 5626 5623 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 5627 5624
+1 -1
arch/x86/mm/mem_encrypt.c
··· 40 40 * section is later cleared. 41 41 */ 42 42 u64 sme_me_mask __section(.data) = 0; 43 - EXPORT_SYMBOL_GPL(sme_me_mask); 43 + EXPORT_SYMBOL(sme_me_mask); 44 44 45 45 /* Buffer used for early in-place encryption by BSP, no locking needed */ 46 46 static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
+14 -3
arch/x86/mm/tlb.c
··· 85 85 86 86 switch_mm(NULL, &init_mm, NULL); 87 87 } 88 + EXPORT_SYMBOL_GPL(leave_mm); 88 89 89 90 void switch_mm(struct mm_struct *prev, struct mm_struct *next, 90 91 struct task_struct *tsk) ··· 196 195 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); 197 196 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); 198 197 write_cr3(build_cr3(next, new_asid)); 199 - trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 200 - TLB_FLUSH_ALL); 198 + 199 + /* 200 + * NB: This gets called via leave_mm() in the idle path 201 + * where RCU functions differently. Tracing normally 202 + * uses RCU, so we need to use the _rcuidle variant. 203 + * 204 + * (There is no good reason for this. The idle code should 205 + * be rearranged to call this before rcu_idle_enter().) 206 + */ 207 + trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 201 208 } else { 202 209 /* The new ASID is already up to date. */ 203 210 write_cr3(build_cr3_noflush(next, new_asid)); 204 - trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); 211 + 212 + /* See above wrt _rcuidle. */ 213 + trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); 205 214 } 206 215 207 216 this_cpu_write(cpu_tlbstate.loaded_mm, next);
+3 -1
crypto/ccm.c
··· 363 363 unsigned int cryptlen = req->cryptlen; 364 364 u8 *authtag = pctx->auth_tag; 365 365 u8 *odata = pctx->odata; 366 - u8 *iv = req->iv; 366 + u8 *iv = pctx->idata; 367 367 int err; 368 368 369 369 cryptlen -= authsize; ··· 378 378 dst = pctx->src; 379 379 if (req->src != req->dst) 380 380 dst = pctx->dst; 381 + 382 + memcpy(iv, req->iv, 16); 381 383 382 384 skcipher_request_set_tfm(skreq, ctx->ctr); 383 385 skcipher_request_set_callback(skreq, pctx->flags,
+2
drivers/acpi/processor_idle.c
··· 710 710 static void acpi_idle_enter_bm(struct acpi_processor *pr, 711 711 struct acpi_processor_cx *cx, bool timer_bc) 712 712 { 713 + acpi_unlazy_tlb(smp_processor_id()); 714 + 713 715 /* 714 716 * Must be done before busmaster disable as we might need to 715 717 * access HPET !
+28
drivers/acpi/sleep.c
··· 160 160 return 0; 161 161 } 162 162 163 + static bool acpi_sleep_no_lps0; 164 + 165 + static int __init init_no_lps0(const struct dmi_system_id *d) 166 + { 167 + acpi_sleep_no_lps0 = true; 168 + return 0; 169 + } 170 + 163 171 static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { 164 172 { 165 173 .callback = init_old_suspend_ordering, ··· 351 343 DMI_MATCH(DMI_PRODUCT_NAME, "80E3"), 352 344 }, 353 345 }, 346 + /* 347 + * https://bugzilla.kernel.org/show_bug.cgi?id=196907 348 + * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power 349 + * S0 Idle firmware interface. 350 + */ 351 + { 352 + .callback = init_no_lps0, 353 + .ident = "Dell XPS13 9360", 354 + .matches = { 355 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 356 + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), 357 + }, 358 + }, 354 359 {}, 355 360 }; 356 361 ··· 506 485 } 507 486 #else /* !CONFIG_ACPI_SLEEP */ 508 487 #define acpi_target_sleep_state ACPI_STATE_S0 488 + #define acpi_sleep_no_lps0 (false) 509 489 static inline void acpi_sleep_dmi_check(void) {} 510 490 #endif /* CONFIG_ACPI_SLEEP */ 511 491 ··· 884 862 885 863 if (lps0_device_handle) 886 864 return 0; 865 + 866 + if (acpi_sleep_no_lps0) { 867 + acpi_handle_info(adev->handle, 868 + "Low Power S0 Idle interface disabled\n"); 869 + return 0; 870 + } 887 871 888 872 if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) 889 873 return 0;
+3 -3
drivers/firmware/efi/libstub/Makefile
··· 34 34 lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o 35 35 36 36 # include the stub's generic dependencies from lib/ when building for ARM/arm64 37 - arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c 37 + arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c 38 + arm-deps-$(CONFIG_ARM64) += sort.c 38 39 39 40 $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE 40 41 $(call if_changed_rule,cc_o_c) 41 42 42 43 lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \ 43 - $(patsubst %.c,lib-%.o,$(arm-deps)) 44 + $(patsubst %.c,lib-%.o,$(arm-deps-y)) 44 45 45 46 lib-$(CONFIG_ARM) += arm32-stub.o 46 47 lib-$(CONFIG_ARM64) += arm64-stub.o ··· 92 91 # explicitly by the decompressor linker script. 93 92 # 94 93 STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub 95 - STUBCOPY_RM-$(CONFIG_ARM) += -R ___ksymtab+sort -R ___kcrctab+sort 96 94 STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
+5 -2
drivers/firmware/efi/libstub/arm-stub.c
··· 350 350 * The easiest way to find adjacent regions is to sort the memory map 351 351 * before traversing it. 352 352 */ 353 - sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL); 353 + if (IS_ENABLED(CONFIG_ARM64)) 354 + sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, 355 + NULL); 354 356 355 357 for (l = 0; l < map_size; l += desc_size, prev = in) { 356 358 u64 paddr, size; ··· 369 367 * a 4k page size kernel to kexec a 64k page size kernel and 370 368 * vice versa. 371 369 */ 372 - if (!regions_are_adjacent(prev, in) || 370 + if ((IS_ENABLED(CONFIG_ARM64) && 371 + !regions_are_adjacent(prev, in)) || 373 372 !regions_have_compatible_memory_type_attrs(prev, in)) { 374 373 375 374 paddr = round_down(in->phys_addr, SZ_64K);
+6 -1
drivers/ide/ide-cd.c
··· 867 867 int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) 868 868 { 869 869 struct cdrom_info *info = drive->driver_data; 870 - struct cdrom_device_info *cdi = &info->devinfo; 870 + struct cdrom_device_info *cdi; 871 871 unsigned char cmd[BLK_MAX_CDB]; 872 872 873 873 ide_debug_log(IDE_DBG_FUNC, "enter"); 874 + 875 + if (!info) 876 + return -EIO; 877 + 878 + cdi = &info->devinfo; 874 879 875 880 memset(cmd, 0, BLK_MAX_CDB); 876 881 cmd[0] = GPCMD_TEST_UNIT_READY;
+5 -4
drivers/idle/intel_idle.c
··· 913 913 struct cpuidle_state *state = &drv->states[index]; 914 914 unsigned long eax = flg2MWAIT(state->flags); 915 915 unsigned int cstate; 916 + int cpu = smp_processor_id(); 916 917 917 918 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; 918 919 919 920 /* 920 - * NB: if CPUIDLE_FLAG_TLB_FLUSHED is set, this idle transition 921 - * will probably flush the TLB. It's not guaranteed to flush 922 - * the TLB, though, so it's not clear that we can do anything 923 - * useful with this knowledge. 921 + * leave_mm() to avoid costly and often unnecessary wakeups 922 + * for flushing the user TLB's associated with the active mm. 924 923 */ 924 + if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) 925 + leave_mm(cpu); 925 926 926 927 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 927 928 tick_broadcast_enter();
+1
drivers/input/sparse-keymap.c
··· 255 255 256 256 case KE_VSW: 257 257 input_report_switch(dev, ke->sw.code, value); 258 + input_sync(dev); 258 259 break; 259 260 } 260 261 }
+1
drivers/input/touchscreen/ar1021_i2c.c
··· 117 117 input->open = ar1021_i2c_open; 118 118 input->close = ar1021_i2c_close; 119 119 120 + __set_bit(INPUT_PROP_DIRECT, input->propbit); 120 121 input_set_capability(input, EV_KEY, BTN_TOUCH); 121 122 input_set_abs_params(input, ABS_X, 0, AR1021_MAX_X, 0, 0); 122 123 input_set_abs_params(input, ABS_Y, 0, AR1021_MAX_Y, 0, 0);
+1
drivers/irqchip/irq-mvebu-gicp.c
··· 194 194 return -ENOMEM; 195 195 196 196 gicp->dev = &pdev->dev; 197 + spin_lock_init(&gicp->spi_lock); 197 198 198 199 gicp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 199 200 if (!gicp->res)
+2 -1
drivers/net/bonding/bond_main.c
··· 2046 2046 2047 2047 bond_for_each_slave_rcu(bond, slave, iter) { 2048 2048 slave->new_link = BOND_LINK_NOCHANGE; 2049 + slave->link_new_state = slave->link; 2049 2050 2050 2051 link_state = bond_check_dev_link(bond, slave->dev, 0); 2051 2052 ··· 3268 3267 hash ^= (hash >> 16); 3269 3268 hash ^= (hash >> 8); 3270 3269 3271 - return hash; 3270 + return hash >> 1; 3272 3271 } 3273 3272 3274 3273 /*-------------------------- Device entry points ----------------------------*/
+3 -3
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
··· 37 37 38 38 #define T4FW_VERSION_MAJOR 0x01 39 39 #define T4FW_VERSION_MINOR 0x10 40 - #define T4FW_VERSION_MICRO 0x2D 40 + #define T4FW_VERSION_MICRO 0x3F 41 41 #define T4FW_VERSION_BUILD 0x00 42 42 43 43 #define T4FW_MIN_VERSION_MAJOR 0x01 ··· 46 46 47 47 #define T5FW_VERSION_MAJOR 0x01 48 48 #define T5FW_VERSION_MINOR 0x10 49 - #define T5FW_VERSION_MICRO 0x2D 49 + #define T5FW_VERSION_MICRO 0x3F 50 50 #define T5FW_VERSION_BUILD 0x00 51 51 52 52 #define T5FW_MIN_VERSION_MAJOR 0x00 ··· 55 55 56 56 #define T6FW_VERSION_MAJOR 0x01 57 57 #define T6FW_VERSION_MINOR 0x10 58 - #define T6FW_VERSION_MICRO 0x2D 58 + #define T6FW_VERSION_MICRO 0x3F 59 59 #define T6FW_VERSION_BUILD 0x00 60 60 61 61 #define T6FW_MIN_VERSION_MAJOR 0x00
+4
drivers/net/ethernet/marvell/mvpp2.c
··· 6941 6941 for (i = 0; i < port->nqvecs; i++) { 6942 6942 struct mvpp2_queue_vector *qv = port->qvecs + i; 6943 6943 6944 + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) 6945 + irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); 6946 + 6944 6947 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); 6945 6948 if (err) 6946 6949 goto err; ··· 6973 6970 struct mvpp2_queue_vector *qv = port->qvecs + i; 6974 6971 6975 6972 irq_set_affinity_hint(qv->irq, NULL); 6973 + irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); 6976 6974 free_irq(qv->irq, qv); 6977 6975 } 6978 6976 }
+8 -5
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
··· 365 365 struct mlx5e_l2_hash_node *hn) 366 366 { 367 367 u8 action = hn->action; 368 + u8 mac_addr[ETH_ALEN]; 368 369 int l2_err = 0; 370 + 371 + ether_addr_copy(mac_addr, hn->ai.addr); 369 372 370 373 switch (action) { 371 374 case MLX5E_ACTION_ADD: 372 375 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH); 373 - if (!is_multicast_ether_addr(hn->ai.addr)) { 374 - l2_err = mlx5_mpfs_add_mac(priv->mdev, hn->ai.addr); 376 + if (!is_multicast_ether_addr(mac_addr)) { 377 + l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr); 375 378 hn->mpfs = !l2_err; 376 379 } 377 380 hn->action = MLX5E_ACTION_NONE; 378 381 break; 379 382 380 383 case MLX5E_ACTION_DEL: 381 - if (!is_multicast_ether_addr(hn->ai.addr) && hn->mpfs) 382 - l2_err = mlx5_mpfs_del_mac(priv->mdev, hn->ai.addr); 384 + if (!is_multicast_ether_addr(mac_addr) && hn->mpfs) 385 + l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr); 383 386 mlx5e_del_l2_flow_rule(priv, &hn->ai); 384 387 mlx5e_del_l2_from_hash(hn); 385 388 break; ··· 390 387 391 388 if (l2_err) 392 389 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n", 393 - action == MLX5E_ACTION_ADD ? "add" : "del", hn->ai.addr, l2_err); 390 + action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err); 394 391 } 395 392 396 393 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+2 -2
drivers/net/usb/asix_devices.c
··· 626 626 struct usbnet *dev = usb_get_intfdata(intf); 627 627 struct asix_common_private *priv = dev->driver_priv; 628 628 629 - if (priv->suspend) 629 + if (priv && priv->suspend) 630 630 priv->suspend(dev); 631 631 632 632 return usbnet_suspend(intf, message); ··· 678 678 struct usbnet *dev = usb_get_intfdata(intf); 679 679 struct asix_common_private *priv = dev->driver_priv; 680 680 681 - if (priv->resume) 681 + if (priv && priv->resume) 682 682 priv->resume(dev); 683 683 684 684 return usbnet_resume(intf);
+1 -1
drivers/net/usb/cdc_ether.c
··· 230 230 goto bad_desc; 231 231 } 232 232 233 - if (header.usb_cdc_ether_desc) { 233 + if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { 234 234 dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize); 235 235 /* because of Zaurus, we may be ignoring the host 236 236 * side link address we were given.
+2 -1
drivers/net/usb/qmi_wwan.c
··· 499 499 return 1; 500 500 } 501 501 if (rawip) { 502 + skb_reset_mac_header(skb); 502 503 skb->dev = dev->net; /* normally set by eth_type_trans */ 503 504 skb->protocol = proto; 504 505 return 1; ··· 682 681 } 683 682 684 683 /* errors aren't fatal - we can live with the dynamic address */ 685 - if (cdc_ether) { 684 + if (cdc_ether && cdc_ether->wMaxSegmentSize) { 686 685 dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize); 687 686 usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress); 688 687 }
-3
drivers/scsi/scsi_lib.c
··· 2685 2685 2686 2686 } 2687 2687 sdev->sdev_state = state; 2688 - sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state"); 2689 2688 return 0; 2690 2689 2691 2690 illegal: ··· 3108 3109 case SDEV_BLOCK: 3109 3110 case SDEV_TRANSPORT_OFFLINE: 3110 3111 sdev->sdev_state = new_state; 3111 - sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state"); 3112 3112 break; 3113 3113 case SDEV_CREATED_BLOCK: 3114 3114 if (new_state == SDEV_TRANSPORT_OFFLINE || ··· 3115 3117 sdev->sdev_state = new_state; 3116 3118 else 3117 3119 sdev->sdev_state = SDEV_CREATED; 3118 - sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state"); 3119 3120 break; 3120 3121 case SDEV_CANCEL: 3121 3122 case SDEV_OFFLINE:
+1 -4
drivers/scsi/scsi_transport_srp.c
··· 556 556 */ 557 557 shost_for_each_device(sdev, shost) { 558 558 mutex_lock(&sdev->state_mutex); 559 - if (sdev->sdev_state == SDEV_OFFLINE) { 559 + if (sdev->sdev_state == SDEV_OFFLINE) 560 560 sdev->sdev_state = SDEV_RUNNING; 561 - sysfs_notify(&sdev->sdev_gendev.kobj, 562 - NULL, "state"); 563 - } 564 561 mutex_unlock(&sdev->state_mutex); 565 562 } 566 563 } else if (rport->state == SRP_RPORT_RUNNING) {
+2 -2
include/linux/compiler.h
··· 191 191 asm("%c0:\n\t" \ 192 192 ".pushsection .discard.reachable\n\t" \ 193 193 ".long %c0b - .\n\t" \ 194 - ".popsection\n\t" : : "i" (__LINE__)); \ 194 + ".popsection\n\t" : : "i" (__COUNTER__)); \ 195 195 }) 196 196 #define annotate_unreachable() ({ \ 197 197 asm("%c0:\n\t" \ 198 198 ".pushsection .discard.unreachable\n\t" \ 199 199 ".long %c0b - .\n\t" \ 200 - ".popsection\n\t" : : "i" (__LINE__)); \ 200 + ".popsection\n\t" : : "i" (__COUNTER__)); \ 201 201 }) 202 202 #define ASM_UNREACHABLE \ 203 203 "999:\n\t" \
+7
include/linux/skbuff.h
··· 3841 3841 #endif 3842 3842 } 3843 3843 3844 + static inline void ipvs_reset(struct sk_buff *skb) 3845 + { 3846 + #if IS_ENABLED(CONFIG_IP_VS) 3847 + skb->ipvs_property = 0; 3848 + #endif 3849 + } 3850 + 3844 3851 /* Note: This doesn't put any conntrack and bridge info in dst. */ 3845 3852 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, 3846 3853 bool copy)
+1 -3
include/net/act_api.h
··· 14 14 struct tcf_idrinfo { 15 15 spinlock_t lock; 16 16 struct idr action_idr; 17 - struct net *net; 18 17 }; 19 18 20 19 struct tc_action_ops; ··· 104 105 105 106 static inline 106 107 int tc_action_net_init(struct tc_action_net *tn, 107 - const struct tc_action_ops *ops, struct net *net) 108 + const struct tc_action_ops *ops) 108 109 { 109 110 int err = 0; 110 111 ··· 112 113 if (!tn->idrinfo) 113 114 return -ENOMEM; 114 115 tn->ops = ops; 115 - tn->idrinfo->net = net; 116 116 spin_lock_init(&tn->idrinfo->lock); 117 117 idr_init(&tn->idrinfo->action_idr); 118 118 return err;
+24
include/net/pkt_cls.h
··· 231 231 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 232 232 int nr_actions; 233 233 struct tc_action **actions; 234 + struct net *net; 234 235 #endif 235 236 /* Map to export classifier specific extension TLV types to the 236 237 * generic extensions API. Unsupported extensions must be set to 0. ··· 245 244 #ifdef CONFIG_NET_CLS_ACT 246 245 exts->type = 0; 247 246 exts->nr_actions = 0; 247 + exts->net = NULL; 248 248 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 249 249 GFP_KERNEL); 250 250 if (!exts->actions) ··· 254 252 exts->action = action; 255 253 exts->police = police; 256 254 return 0; 255 + } 256 + 257 + /* Return false if the netns is being destroyed in cleanup_net(). Callers 258 + * need to do cleanup synchronously in this case, otherwise may race with 259 + * tc_action_net_exit(). Return true for other cases. 260 + */ 261 + static inline bool tcf_exts_get_net(struct tcf_exts *exts) 262 + { 263 + #ifdef CONFIG_NET_CLS_ACT 264 + exts->net = maybe_get_net(exts->net); 265 + return exts->net != NULL; 266 + #else 267 + return true; 268 + #endif 269 + } 270 + 271 + static inline void tcf_exts_put_net(struct tcf_exts *exts) 272 + { 273 + #ifdef CONFIG_NET_CLS_ACT 274 + if (exts->net) 275 + put_net(exts->net); 276 + #endif 257 277 } 258 278 259 279 static inline void tcf_exts_to_list(const struct tcf_exts *exts,
+2 -1
include/sound/seq_kernel.h
··· 49 49 #define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200 50 50 51 51 /* max delivery path length */ 52 - #define SNDRV_SEQ_MAX_HOPS 10 52 + /* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */ 53 + #define SNDRV_SEQ_MAX_HOPS 8 53 54 54 55 /* max size of event size */ 55 56 #define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff
+2
include/sound/timer.h
··· 90 90 struct list_head ack_list_head; 91 91 struct list_head sack_list_head; /* slow ack list head */ 92 92 struct tasklet_struct task_queue; 93 + int max_instances; /* upper limit of timer instances */ 94 + int num_instances; /* current number of timer instances */ 93 95 }; 94 96 95 97 struct snd_timer_instance {
+3 -3
include/uapi/sound/asound.h
··· 94 94 SNDRV_HWDEP_IFACE_VX, /* Digigram VX cards */ 95 95 SNDRV_HWDEP_IFACE_MIXART, /* Digigram miXart cards */ 96 96 SNDRV_HWDEP_IFACE_USX2Y, /* Tascam US122, US224 & US428 usb */ 97 - SNDRV_HWDEP_IFACE_EMUX_WAVETABLE, /* EmuX wavetable */ 97 + SNDRV_HWDEP_IFACE_EMUX_WAVETABLE, /* EmuX wavetable */ 98 98 SNDRV_HWDEP_IFACE_BLUETOOTH, /* Bluetooth audio */ 99 99 SNDRV_HWDEP_IFACE_USX2Y_PCM, /* Tascam US122, US224 & US428 rawusb pcm */ 100 100 SNDRV_HWDEP_IFACE_PCXHR, /* Digigram PCXHR */ ··· 384 384 385 385 struct snd_pcm_hw_params { 386 386 unsigned int flags; 387 - struct snd_mask masks[SNDRV_PCM_HW_PARAM_LAST_MASK - 387 + struct snd_mask masks[SNDRV_PCM_HW_PARAM_LAST_MASK - 388 388 SNDRV_PCM_HW_PARAM_FIRST_MASK + 1]; 389 389 struct snd_mask mres[5]; /* reserved masks */ 390 390 struct snd_interval intervals[SNDRV_PCM_HW_PARAM_LAST_INTERVAL - ··· 857 857 #define SNDRV_CTL_ELEM_ACCESS_INACTIVE (1<<8) /* control does actually nothing, but may be updated */ 858 858 #define SNDRV_CTL_ELEM_ACCESS_LOCK (1<<9) /* write lock */ 859 859 #define SNDRV_CTL_ELEM_ACCESS_OWNER (1<<10) /* write lock owner */ 860 - #define SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK (1<<28) /* kernel use a TLV callback */ 860 + #define SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK (1<<28) /* kernel use a TLV callback */ 861 861 #define SNDRV_CTL_ELEM_ACCESS_USER (1<<29) /* user space element */ 862 862 /* bits 30 and 31 are obsoleted (for indirect access) */ 863 863
+4 -2
kernel/events/core.c
··· 901 901 cpuctx_entry = &cpuctx->cgrp_cpuctx_entry; 902 902 /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/ 903 903 if (add) { 904 + struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); 905 + 904 906 list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list)); 905 - if (perf_cgroup_from_task(current, ctx) == event->cgrp) 906 - cpuctx->cgrp = event->cgrp; 907 + if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) 908 + cpuctx->cgrp = cgrp; 907 909 } else { 908 910 list_del(cpuctx_entry); 909 911 cpuctx->cgrp = NULL;
+20 -3
kernel/futex.c
··· 903 903 */ 904 904 raw_spin_lock_irq(&curr->pi_lock); 905 905 while (!list_empty(head)) { 906 - 907 906 next = head->next; 908 907 pi_state = list_entry(next, struct futex_pi_state, list); 909 908 key = pi_state->key; 910 909 hb = hash_futex(&key); 910 + 911 + /* 912 + * We can race against put_pi_state() removing itself from the 913 + * list (a waiter going away). put_pi_state() will first 914 + * decrement the reference count and then modify the list, so 915 + * its possible to see the list entry but fail this reference 916 + * acquire. 917 + * 918 + * In that case; drop the locks to let put_pi_state() make 919 + * progress and retry the loop. 920 + */ 921 + if (!atomic_inc_not_zero(&pi_state->refcount)) { 922 + raw_spin_unlock_irq(&curr->pi_lock); 923 + cpu_relax(); 924 + raw_spin_lock_irq(&curr->pi_lock); 925 + continue; 926 + } 911 927 raw_spin_unlock_irq(&curr->pi_lock); 912 928 913 929 spin_lock(&hb->lock); ··· 934 918 * task still owns the PI-state: 935 919 */ 936 920 if (head->next != next) { 921 + /* retain curr->pi_lock for the loop invariant */ 937 922 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); 938 923 spin_unlock(&hb->lock); 924 + put_pi_state(pi_state); 939 925 continue; 940 926 } 941 927 ··· 945 927 WARN_ON(list_empty(&pi_state->list)); 946 928 list_del_init(&pi_state->list); 947 929 pi_state->owner = NULL; 948 - raw_spin_unlock(&curr->pi_lock); 949 930 950 - get_pi_state(pi_state); 931 + raw_spin_unlock(&curr->pi_lock); 951 932 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); 952 933 spin_unlock(&hb->lock); 953 934
+1 -5
kernel/sched/cpufreq_schedutil.c
··· 649 649 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 650 650 651 651 memset(sg_cpu, 0, sizeof(*sg_cpu)); 652 + sg_cpu->cpu = cpu; 652 653 sg_cpu->sg_policy = sg_policy; 653 654 sg_cpu->flags = SCHED_CPUFREQ_RT; 654 655 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; ··· 715 714 716 715 static int __init sugov_register(void) 717 716 { 718 - int cpu; 719 - 720 - for_each_possible_cpu(cpu) 721 - per_cpu(sugov_cpu, cpu).cpu = cpu; 722 - 723 717 return cpufreq_register_governor(&schedutil_gov); 724 718 } 725 719 fs_initcall(sugov_register);
+10 -5
kernel/watchdog_hld.c
··· 13 13 #define pr_fmt(fmt) "NMI watchdog: " fmt 14 14 15 15 #include <linux/nmi.h> 16 + #include <linux/atomic.h> 16 17 #include <linux/module.h> 17 18 #include <linux/sched/debug.h> 18 19 ··· 23 22 static DEFINE_PER_CPU(bool, hard_watchdog_warn); 24 23 static DEFINE_PER_CPU(bool, watchdog_nmi_touch); 25 24 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 25 + static DEFINE_PER_CPU(struct perf_event *, dead_event); 26 26 static struct cpumask dead_events_mask; 27 27 28 28 static unsigned long hardlockup_allcpu_dumped; 29 - static unsigned int watchdog_cpus; 29 + static atomic_t watchdog_cpus = ATOMIC_INIT(0); 30 30 31 31 void arch_touch_nmi_watchdog(void) 32 32 { ··· 191 189 if (hardlockup_detector_event_create()) 192 190 return; 193 191 194 - if (!watchdog_cpus++) 192 + /* use original value for check */ 193 + if (!atomic_fetch_inc(&watchdog_cpus)) 195 194 pr_info("Enabled. Permanently consumes one hw-PMU counter.\n"); 196 195 197 196 perf_event_enable(this_cpu_read(watchdog_ev)); ··· 207 204 208 205 if (event) { 209 206 perf_event_disable(event); 207 + this_cpu_write(watchdog_ev, NULL); 208 + this_cpu_write(dead_event, event); 210 209 cpumask_set_cpu(smp_processor_id(), &dead_events_mask); 211 - watchdog_cpus--; 210 + atomic_dec(&watchdog_cpus); 212 211 } 213 212 } 214 213 ··· 224 219 int cpu; 225 220 226 221 for_each_cpu(cpu, &dead_events_mask) { 227 - struct perf_event *event = per_cpu(watchdog_ev, cpu); 222 + struct perf_event *event = per_cpu(dead_event, cpu); 228 223 229 224 /* 230 225 * Required because for_each_cpu() reports unconditionally ··· 232 227 */ 233 228 if (event) 234 229 perf_event_release_kernel(event); 235 - per_cpu(watchdog_ev, cpu) = NULL; 230 + per_cpu(dead_event, cpu) = NULL; 236 231 } 237 232 cpumask_clear(&dead_events_mask); 238 233 }
+2 -1
kernel/workqueue_internal.h
··· 10 10 11 11 #include <linux/workqueue.h> 12 12 #include <linux/kthread.h> 13 + #include <linux/preempt.h> 13 14 14 15 struct worker_pool; 15 16 ··· 61 60 */ 62 61 static inline struct worker *current_wq_worker(void) 63 62 { 64 - if (current->flags & PF_WQ_WORKER) 63 + if (in_task() && (current->flags & PF_WQ_WORKER)) 65 64 return kthread_data(current); 66 65 return NULL; 67 66 }
+2 -2
lib/asn1_decoder.c
··· 228 228 hdr = 2; 229 229 230 230 /* Extract a tag from the data */ 231 - if (unlikely(dp >= datalen - 1)) 231 + if (unlikely(datalen - dp < 2)) 232 232 goto data_overrun_error; 233 233 tag = data[dp++]; 234 234 if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) ··· 274 274 int n = len - 0x80; 275 275 if (unlikely(n > 2)) 276 276 goto length_too_long; 277 - if (unlikely(dp >= datalen - n)) 277 + if (unlikely(n > datalen - dp)) 278 278 goto data_overrun_error; 279 279 hdr += n; 280 280 for (len = 0; n > 0; n--) {
+1
net/core/skbuff.c
··· 4869 4869 if (!xnet) 4870 4870 return; 4871 4871 4872 + ipvs_reset(skb); 4872 4873 skb_orphan(skb); 4873 4874 skb->mark = 0; 4874 4875 }
+1 -1
net/ipv4/tcp_input.c
··· 100 100 101 101 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 102 102 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 103 - #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) 103 + #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK) 104 104 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 105 105 106 106 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
+9 -15
net/l2tp/l2tp_ip.c
··· 123 123 unsigned char *ptr, *optr; 124 124 struct l2tp_session *session; 125 125 struct l2tp_tunnel *tunnel = NULL; 126 + struct iphdr *iph; 126 127 int length; 127 128 128 129 if (!pskb_may_pull(skb, 4)) ··· 179 178 goto discard; 180 179 181 180 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 182 - tunnel = l2tp_tunnel_find(net, tunnel_id); 183 - if (tunnel) { 184 - sk = tunnel->sock; 185 - sock_hold(sk); 186 - } else { 187 - struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 181 + iph = (struct iphdr *)skb_network_header(skb); 188 182 189 - read_lock_bh(&l2tp_ip_lock); 190 - sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, 191 - inet_iif(skb), tunnel_id); 192 - if (!sk) { 193 - read_unlock_bh(&l2tp_ip_lock); 194 - goto discard; 195 - } 196 - 197 - sock_hold(sk); 183 + read_lock_bh(&l2tp_ip_lock); 184 + sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb), 185 + tunnel_id); 186 + if (!sk) { 198 187 read_unlock_bh(&l2tp_ip_lock); 188 + goto discard; 199 189 } 190 + sock_hold(sk); 191 + read_unlock_bh(&l2tp_ip_lock); 200 192 201 193 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 202 194 goto discard_put;
+9 -15
net/l2tp/l2tp_ip6.c
··· 136 136 unsigned char *ptr, *optr; 137 137 struct l2tp_session *session; 138 138 struct l2tp_tunnel *tunnel = NULL; 139 + struct ipv6hdr *iph; 139 140 int length; 140 141 141 142 if (!pskb_may_pull(skb, 4)) ··· 193 192 goto discard; 194 193 195 194 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 196 - tunnel = l2tp_tunnel_find(net, tunnel_id); 197 - if (tunnel) { 198 - sk = tunnel->sock; 199 - sock_hold(sk); 200 - } else { 201 - struct ipv6hdr *iph = ipv6_hdr(skb); 195 + iph = ipv6_hdr(skb); 202 196 203 - read_lock_bh(&l2tp_ip6_lock); 204 - sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr, 205 - inet6_iif(skb), tunnel_id); 206 - if (!sk) { 207 - read_unlock_bh(&l2tp_ip6_lock); 208 - goto discard; 209 - } 210 - 211 - sock_hold(sk); 197 + read_lock_bh(&l2tp_ip6_lock); 198 + sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr, 199 + inet6_iif(skb), tunnel_id); 200 + if (!sk) { 212 201 read_unlock_bh(&l2tp_ip6_lock); 202 + goto discard; 213 203 } 204 + sock_hold(sk); 205 + read_unlock_bh(&l2tp_ip6_lock); 214 206 215 207 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 216 208 goto discard_put;
+1 -1
net/qrtr/qrtr.c
··· 1120 1120 1121 1121 return 0; 1122 1122 } 1123 - module_init(qrtr_proto_init); 1123 + postcore_initcall(qrtr_proto_init); 1124 1124 1125 1125 static void __exit qrtr_proto_fini(void) 1126 1126 {
-2
net/sched/act_api.c
··· 80 80 spin_lock_bh(&idrinfo->lock); 81 81 idr_remove_ext(&idrinfo->action_idr, p->tcfa_index); 82 82 spin_unlock_bh(&idrinfo->lock); 83 - put_net(idrinfo->net); 84 83 gen_kill_estimator(&p->tcfa_rate_est); 85 84 free_tcf(p); 86 85 } ··· 338 339 p->idrinfo = idrinfo; 339 340 p->ops = ops; 340 341 INIT_LIST_HEAD(&p->list); 341 - get_net(idrinfo->net); 342 342 *a = p; 343 343 return 0; 344 344 }
+1 -1
net/sched/act_bpf.c
··· 398 398 { 399 399 struct tc_action_net *tn = net_generic(net, bpf_net_id); 400 400 401 - return tc_action_net_init(tn, &act_bpf_ops, net); 401 + return tc_action_net_init(tn, &act_bpf_ops); 402 402 } 403 403 404 404 static void __net_exit bpf_exit_net(struct net *net)
+1 -1
net/sched/act_connmark.c
··· 206 206 { 207 207 struct tc_action_net *tn = net_generic(net, connmark_net_id); 208 208 209 - return tc_action_net_init(tn, &act_connmark_ops, net); 209 + return tc_action_net_init(tn, &act_connmark_ops); 210 210 } 211 211 212 212 static void __net_exit connmark_exit_net(struct net *net)
+1 -1
net/sched/act_csum.c
··· 626 626 { 627 627 struct tc_action_net *tn = net_generic(net, csum_net_id); 628 628 629 - return tc_action_net_init(tn, &act_csum_ops, net); 629 + return tc_action_net_init(tn, &act_csum_ops); 630 630 } 631 631 632 632 static void __net_exit csum_exit_net(struct net *net)
+1 -1
net/sched/act_gact.c
··· 232 232 { 233 233 struct tc_action_net *tn = net_generic(net, gact_net_id); 234 234 235 - return tc_action_net_init(tn, &act_gact_ops, net); 235 + return tc_action_net_init(tn, &act_gact_ops); 236 236 } 237 237 238 238 static void __net_exit gact_exit_net(struct net *net)
+1 -1
net/sched/act_ife.c
··· 855 855 { 856 856 struct tc_action_net *tn = net_generic(net, ife_net_id); 857 857 858 - return tc_action_net_init(tn, &act_ife_ops, net); 858 + return tc_action_net_init(tn, &act_ife_ops); 859 859 } 860 860 861 861 static void __net_exit ife_exit_net(struct net *net)
+2 -2
net/sched/act_ipt.c
··· 334 334 { 335 335 struct tc_action_net *tn = net_generic(net, ipt_net_id); 336 336 337 - return tc_action_net_init(tn, &act_ipt_ops, net); 337 + return tc_action_net_init(tn, &act_ipt_ops); 338 338 } 339 339 340 340 static void __net_exit ipt_exit_net(struct net *net) ··· 384 384 { 385 385 struct tc_action_net *tn = net_generic(net, xt_net_id); 386 386 387 - return tc_action_net_init(tn, &act_xt_ops, net); 387 + return tc_action_net_init(tn, &act_xt_ops); 388 388 } 389 389 390 390 static void __net_exit xt_exit_net(struct net *net)
+1 -1
net/sched/act_mirred.c
··· 340 340 { 341 341 struct tc_action_net *tn = net_generic(net, mirred_net_id); 342 342 343 - return tc_action_net_init(tn, &act_mirred_ops, net); 343 + return tc_action_net_init(tn, &act_mirred_ops); 344 344 } 345 345 346 346 static void __net_exit mirred_exit_net(struct net *net)
+1 -1
net/sched/act_nat.c
··· 307 307 { 308 308 struct tc_action_net *tn = net_generic(net, nat_net_id); 309 309 310 - return tc_action_net_init(tn, &act_nat_ops, net); 310 + return tc_action_net_init(tn, &act_nat_ops); 311 311 } 312 312 313 313 static void __net_exit nat_exit_net(struct net *net)
+1 -1
net/sched/act_pedit.c
··· 450 450 { 451 451 struct tc_action_net *tn = net_generic(net, pedit_net_id); 452 452 453 - return tc_action_net_init(tn, &act_pedit_ops, net); 453 + return tc_action_net_init(tn, &act_pedit_ops); 454 454 } 455 455 456 456 static void __net_exit pedit_exit_net(struct net *net)
+1 -1
net/sched/act_police.c
··· 331 331 { 332 332 struct tc_action_net *tn = net_generic(net, police_net_id); 333 333 334 - return tc_action_net_init(tn, &act_police_ops, net); 334 + return tc_action_net_init(tn, &act_police_ops); 335 335 } 336 336 337 337 static void __net_exit police_exit_net(struct net *net)
+1 -1
net/sched/act_sample.c
··· 240 240 { 241 241 struct tc_action_net *tn = net_generic(net, sample_net_id); 242 242 243 - return tc_action_net_init(tn, &act_sample_ops, net); 243 + return tc_action_net_init(tn, &act_sample_ops); 244 244 } 245 245 246 246 static void __net_exit sample_exit_net(struct net *net)
+1 -1
net/sched/act_simple.c
··· 201 201 { 202 202 struct tc_action_net *tn = net_generic(net, simp_net_id); 203 203 204 - return tc_action_net_init(tn, &act_simp_ops, net); 204 + return tc_action_net_init(tn, &act_simp_ops); 205 205 } 206 206 207 207 static void __net_exit simp_exit_net(struct net *net)
+1 -1
net/sched/act_skbedit.c
··· 238 238 { 239 239 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 240 240 241 - return tc_action_net_init(tn, &act_skbedit_ops, net); 241 + return tc_action_net_init(tn, &act_skbedit_ops); 242 242 } 243 243 244 244 static void __net_exit skbedit_exit_net(struct net *net)
+1 -1
net/sched/act_skbmod.c
··· 263 263 { 264 264 struct tc_action_net *tn = net_generic(net, skbmod_net_id); 265 265 266 - return tc_action_net_init(tn, &act_skbmod_ops, net); 266 + return tc_action_net_init(tn, &act_skbmod_ops); 267 267 } 268 268 269 269 static void __net_exit skbmod_exit_net(struct net *net)
+1 -1
net/sched/act_tunnel_key.c
··· 322 322 { 323 323 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); 324 324 325 - return tc_action_net_init(tn, &act_tunnel_key_ops, net); 325 + return tc_action_net_init(tn, &act_tunnel_key_ops); 326 326 } 327 327 328 328 static void __net_exit tunnel_key_exit_net(struct net *net)
+1 -1
net/sched/act_vlan.c
··· 269 269 { 270 270 struct tc_action_net *tn = net_generic(net, vlan_net_id); 271 271 272 - return tc_action_net_init(tn, &act_vlan_ops, net); 272 + return tc_action_net_init(tn, &act_vlan_ops); 273 273 } 274 274 275 275 static void __net_exit vlan_exit_net(struct net *net)
+1
net/sched/cls_api.c
··· 1110 1110 exts->actions[i++] = act; 1111 1111 exts->nr_actions = i; 1112 1112 } 1113 + exts->net = net; 1113 1114 } 1114 1115 #else 1115 1116 if ((exts->action && tb[exts->action]) ||
+15 -5
net/sched/cls_basic.c
··· 87 87 return 0; 88 88 } 89 89 90 + static void __basic_delete_filter(struct basic_filter *f) 91 + { 92 + tcf_exts_destroy(&f->exts); 93 + tcf_em_tree_destroy(&f->ematches); 94 + tcf_exts_put_net(&f->exts); 95 + kfree(f); 96 + } 97 + 90 98 static void basic_delete_filter_work(struct work_struct *work) 91 99 { 92 100 struct basic_filter *f = container_of(work, struct basic_filter, work); 93 101 94 102 rtnl_lock(); 95 - tcf_exts_destroy(&f->exts); 96 - tcf_em_tree_destroy(&f->ematches); 103 + __basic_delete_filter(f); 97 104 rtnl_unlock(); 98 - 99 - kfree(f); 100 105 } 101 106 102 107 static void basic_delete_filter(struct rcu_head *head) ··· 121 116 list_del_rcu(&f->link); 122 117 tcf_unbind_filter(tp, &f->res); 123 118 idr_remove_ext(&head->handle_idr, f->handle); 124 - call_rcu(&f->rcu, basic_delete_filter); 119 + if (tcf_exts_get_net(&f->exts)) 120 + call_rcu(&f->rcu, basic_delete_filter); 121 + else 122 + __basic_delete_filter(f); 125 123 } 126 124 idr_destroy(&head->handle_idr); 127 125 kfree_rcu(head, rcu); ··· 138 130 list_del_rcu(&f->link); 139 131 tcf_unbind_filter(tp, &f->res); 140 132 idr_remove_ext(&head->handle_idr, f->handle); 133 + tcf_exts_get_net(&f->exts); 141 134 call_rcu(&f->rcu, basic_delete_filter); 142 135 *last = list_empty(&head->flist); 143 136 return 0; ··· 234 225 idr_replace_ext(&head->handle_idr, fnew, fnew->handle); 235 226 list_replace_rcu(&fold->link, &fnew->link); 236 227 tcf_unbind_filter(tp, &fold->res); 228 + tcf_exts_get_net(&fold->exts); 237 229 call_rcu(&fold->rcu, basic_delete_filter); 238 230 } else { 239 231 list_add_rcu(&fnew->link, &head->flist);
+6 -1
net/sched/cls_bpf.c
··· 261 261 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) 262 262 { 263 263 tcf_exts_destroy(&prog->exts); 264 + tcf_exts_put_net(&prog->exts); 264 265 265 266 if (cls_bpf_is_ebpf(prog)) 266 267 bpf_prog_put(prog->filter); ··· 298 297 cls_bpf_stop_offload(tp, prog); 299 298 list_del_rcu(&prog->link); 300 299 tcf_unbind_filter(tp, &prog->res); 301 - call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu); 300 + if (tcf_exts_get_net(&prog->exts)) 301 + call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu); 302 + else 303 + __cls_bpf_delete_prog(prog); 302 304 } 303 305 304 306 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last) ··· 530 526 idr_replace_ext(&head->handle_idr, prog, handle); 531 527 list_replace_rcu(&oldprog->link, &prog->link); 532 528 tcf_unbind_filter(tp, &oldprog->res); 529 + tcf_exts_get_net(&oldprog->exts); 533 530 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu); 534 531 } else { 535 532 list_add_rcu(&prog->link, &head->plist);
+18 -6
net/sched/cls_cgroup.c
··· 60 60 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 61 61 }; 62 62 63 + static void __cls_cgroup_destroy(struct cls_cgroup_head *head) 64 + { 65 + tcf_exts_destroy(&head->exts); 66 + tcf_em_tree_destroy(&head->ematches); 67 + tcf_exts_put_net(&head->exts); 68 + kfree(head); 69 + } 70 + 63 71 static void cls_cgroup_destroy_work(struct work_struct *work) 64 72 { 65 73 struct cls_cgroup_head *head = container_of(work, 66 74 struct cls_cgroup_head, 67 75 work); 68 76 rtnl_lock(); 69 - tcf_exts_destroy(&head->exts); 70 - tcf_em_tree_destroy(&head->ematches); 71 - kfree(head); 77 + __cls_cgroup_destroy(head); 72 78 rtnl_unlock(); 73 79 } 74 80 ··· 130 124 goto errout; 131 125 132 126 rcu_assign_pointer(tp->root, new); 133 - if (head) 127 + if (head) { 128 + tcf_exts_get_net(&head->exts); 134 129 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 130 + } 135 131 return 0; 136 132 errout: 137 133 tcf_exts_destroy(&new->exts); ··· 146 138 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 147 139 148 140 /* Head can still be NULL due to cls_cgroup_init(). */ 149 - if (head) 150 - call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 141 + if (head) { 142 + if (tcf_exts_get_net(&head->exts)) 143 + call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 144 + else 145 + __cls_cgroup_destroy(head); 146 + } 151 147 } 152 148 153 149 static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last)
+18 -6
net/sched/cls_flow.c
··· 372 372 [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, 373 373 }; 374 374 375 + static void __flow_destroy_filter(struct flow_filter *f) 376 + { 377 + del_timer_sync(&f->perturb_timer); 378 + tcf_exts_destroy(&f->exts); 379 + tcf_em_tree_destroy(&f->ematches); 380 + tcf_exts_put_net(&f->exts); 381 + kfree(f); 382 + } 383 + 375 384 static void flow_destroy_filter_work(struct work_struct *work) 376 385 { 377 386 struct flow_filter *f = container_of(work, struct flow_filter, work); 378 387 379 388 rtnl_lock(); 380 - del_timer_sync(&f->perturb_timer); 381 - tcf_exts_destroy(&f->exts); 382 - tcf_em_tree_destroy(&f->ematches); 383 - kfree(f); 389 + __flow_destroy_filter(f); 384 390 rtnl_unlock(); 385 391 } 386 392 ··· 560 554 561 555 *arg = fnew; 562 556 563 - if (fold) 557 + if (fold) { 558 + tcf_exts_get_net(&fold->exts); 564 559 call_rcu(&fold->rcu, flow_destroy_filter); 560 + } 565 561 return 0; 566 562 567 563 err2: ··· 580 572 struct flow_filter *f = arg; 581 573 582 574 list_del_rcu(&f->list); 575 + tcf_exts_get_net(&f->exts); 583 576 call_rcu(&f->rcu, flow_destroy_filter); 584 577 *last = list_empty(&head->filters); 585 578 return 0; ··· 605 596 606 597 list_for_each_entry_safe(f, next, &head->filters, list) { 607 598 list_del_rcu(&f->list); 608 - call_rcu(&f->rcu, flow_destroy_filter); 599 + if (tcf_exts_get_net(&f->exts)) 600 + call_rcu(&f->rcu, flow_destroy_filter); 601 + else 602 + __flow_destroy_filter(f); 609 603 } 610 604 kfree_rcu(head, rcu); 611 605 }
+13 -3
net/sched/cls_flower.c
··· 193 193 return 0; 194 194 } 195 195 196 + static void __fl_destroy_filter(struct cls_fl_filter *f) 197 + { 198 + tcf_exts_destroy(&f->exts); 199 + tcf_exts_put_net(&f->exts); 200 + kfree(f); 201 + } 202 + 196 203 static void fl_destroy_filter_work(struct work_struct *work) 197 204 { 198 205 struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work); 199 206 200 207 rtnl_lock(); 201 - tcf_exts_destroy(&f->exts); 202 - kfree(f); 208 + __fl_destroy_filter(f); 203 209 rtnl_unlock(); 204 210 } 205 211 ··· 288 282 if (!tc_skip_hw(f->flags)) 289 283 fl_hw_destroy_filter(tp, f); 290 284 tcf_unbind_filter(tp, &f->res); 291 - call_rcu(&f->rcu, fl_destroy_filter); 285 + if (tcf_exts_get_net(&f->exts)) 286 + call_rcu(&f->rcu, fl_destroy_filter); 287 + else 288 + __fl_destroy_filter(f); 292 289 } 293 290 294 291 static void fl_destroy_sleepable(struct work_struct *work) ··· 961 952 idr_replace_ext(&head->handle_idr, fnew, fnew->handle); 962 953 list_replace_rcu(&fold->list, &fnew->list); 963 954 tcf_unbind_filter(tp, &fold->res); 955 + tcf_exts_get_net(&fold->exts); 964 956 call_rcu(&fold->rcu, fl_destroy_filter); 965 957 } else { 966 958 list_add_tail_rcu(&fnew->list, &head->filters);
+14 -3
net/sched/cls_fw.c
··· 125 125 return 0; 126 126 } 127 127 128 + static void __fw_delete_filter(struct fw_filter *f) 129 + { 130 + tcf_exts_destroy(&f->exts); 131 + tcf_exts_put_net(&f->exts); 132 + kfree(f); 133 + } 134 + 128 135 static void fw_delete_filter_work(struct work_struct *work) 129 136 { 130 137 struct fw_filter *f = container_of(work, struct fw_filter, work); 131 138 132 139 rtnl_lock(); 133 - tcf_exts_destroy(&f->exts); 134 - kfree(f); 140 + __fw_delete_filter(f); 135 141 rtnl_unlock(); 136 142 } 137 143 ··· 163 157 RCU_INIT_POINTER(head->ht[h], 164 158 rtnl_dereference(f->next)); 165 159 tcf_unbind_filter(tp, &f->res); 166 - call_rcu(&f->rcu, fw_delete_filter); 160 + if (tcf_exts_get_net(&f->exts)) 161 + call_rcu(&f->rcu, fw_delete_filter); 162 + else 163 + __fw_delete_filter(f); 167 164 } 168 165 } 169 166 kfree_rcu(head, rcu); ··· 191 182 if (pfp == f) { 192 183 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); 193 184 tcf_unbind_filter(tp, &f->res); 185 + tcf_exts_get_net(&f->exts); 194 186 call_rcu(&f->rcu, fw_delete_filter); 195 187 ret = 0; 196 188 break; ··· 312 302 RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next)); 313 303 rcu_assign_pointer(*fp, fnew); 314 304 tcf_unbind_filter(tp, &f->res); 305 + tcf_exts_get_net(&f->exts); 315 306 call_rcu(&f->rcu, fw_delete_filter); 316 307 317 308 *arg = fnew;
+12 -3
net/sched/cls_matchall.c
··· 44 44 return 0; 45 45 } 46 46 47 + static void __mall_destroy(struct cls_mall_head *head) 48 + { 49 + tcf_exts_destroy(&head->exts); 50 + tcf_exts_put_net(&head->exts); 51 + kfree(head); 52 + } 53 + 47 54 static void mall_destroy_work(struct work_struct *work) 48 55 { 49 56 struct cls_mall_head *head = container_of(work, struct cls_mall_head, 50 57 work); 51 58 rtnl_lock(); 52 - tcf_exts_destroy(&head->exts); 53 - kfree(head); 59 + __mall_destroy(head); 54 60 rtnl_unlock(); 55 61 } 56 62 ··· 122 116 if (!tc_skip_hw(head->flags)) 123 117 mall_destroy_hw_filter(tp, head, (unsigned long) head); 124 118 125 - call_rcu(&head->rcu, mall_destroy_rcu); 119 + if (tcf_exts_get_net(&head->exts)) 120 + call_rcu(&head->rcu, mall_destroy_rcu); 121 + else 122 + __mall_destroy(head); 126 123 } 127 124 128 125 static void *mall_get(struct tcf_proto *tp, u32 handle)
+14 -3
net/sched/cls_route.c
··· 257 257 return 0; 258 258 } 259 259 260 + static void __route4_delete_filter(struct route4_filter *f) 261 + { 262 + tcf_exts_destroy(&f->exts); 263 + tcf_exts_put_net(&f->exts); 264 + kfree(f); 265 + } 266 + 260 267 static void route4_delete_filter_work(struct work_struct *work) 261 268 { 262 269 struct route4_filter *f = container_of(work, struct route4_filter, work); 263 270 264 271 rtnl_lock(); 265 - tcf_exts_destroy(&f->exts); 266 - kfree(f); 272 + __route4_delete_filter(f); 267 273 rtnl_unlock(); 268 274 } 269 275 ··· 303 297 next = rtnl_dereference(f->next); 304 298 RCU_INIT_POINTER(b->ht[h2], next); 305 299 tcf_unbind_filter(tp, &f->res); 306 - call_rcu(&f->rcu, route4_delete_filter); 300 + if (tcf_exts_get_net(&f->exts)) 301 + call_rcu(&f->rcu, route4_delete_filter); 302 + else 303 + __route4_delete_filter(f); 307 304 } 308 305 } 309 306 RCU_INIT_POINTER(head->table[h1], NULL); ··· 347 338 348 339 /* Delete it */ 349 340 tcf_unbind_filter(tp, &f->res); 341 + tcf_exts_get_net(&f->exts); 350 342 call_rcu(&f->rcu, route4_delete_filter); 351 343 352 344 /* Strip RTNL protected tree */ ··· 551 541 *arg = f; 552 542 if (fold) { 553 543 tcf_unbind_filter(tp, &fold->res); 544 + tcf_exts_get_net(&fold->exts); 554 545 call_rcu(&fold->rcu, route4_delete_filter); 555 546 } 556 547 return 0;
+12 -3
net/sched/cls_rsvp.h
··· 285 285 return -ENOBUFS; 286 286 } 287 287 288 + static void __rsvp_delete_filter(struct rsvp_filter *f) 289 + { 290 + tcf_exts_destroy(&f->exts); 291 + tcf_exts_put_net(&f->exts); 292 + kfree(f); 293 + } 294 + 288 295 static void rsvp_delete_filter_work(struct work_struct *work) 289 296 { 290 297 struct rsvp_filter *f = container_of(work, struct rsvp_filter, work); 291 298 292 299 rtnl_lock(); 293 - tcf_exts_destroy(&f->exts); 294 - kfree(f); 300 + __rsvp_delete_filter(f); 295 301 rtnl_unlock(); 296 302 } 297 303 ··· 316 310 * grace period, since converted-to-rcu actions are relying on that 317 311 * in cleanup() callback 318 312 */ 319 - call_rcu(&f->rcu, rsvp_delete_filter_rcu); 313 + if (tcf_exts_get_net(&f->exts)) 314 + call_rcu(&f->rcu, rsvp_delete_filter_rcu); 315 + else 316 + __rsvp_delete_filter(f); 320 317 } 321 318 322 319 static void rsvp_destroy(struct tcf_proto *tp)
+26 -7
net/sched/cls_tcindex.c
··· 142 142 return 0; 143 143 } 144 144 145 + static void __tcindex_destroy_rexts(struct tcindex_filter_result *r) 146 + { 147 + tcf_exts_destroy(&r->exts); 148 + tcf_exts_put_net(&r->exts); 149 + } 150 + 145 151 static void tcindex_destroy_rexts_work(struct work_struct *work) 146 152 { 147 153 struct tcindex_filter_result *r; 148 154 149 155 r = container_of(work, struct tcindex_filter_result, work); 150 156 rtnl_lock(); 151 - tcf_exts_destroy(&r->exts); 157 + __tcindex_destroy_rexts(r); 152 158 rtnl_unlock(); 153 159 } 154 160 ··· 167 161 tcf_queue_work(&r->work); 168 162 } 169 163 164 + static void __tcindex_destroy_fexts(struct tcindex_filter *f) 165 + { 166 + tcf_exts_destroy(&f->result.exts); 167 + tcf_exts_put_net(&f->result.exts); 168 + kfree(f); 169 + } 170 + 170 171 static void tcindex_destroy_fexts_work(struct work_struct *work) 171 172 { 172 173 struct tcindex_filter *f = container_of(work, struct tcindex_filter, 173 174 work); 174 175 175 176 rtnl_lock(); 176 - tcf_exts_destroy(&f->result.exts); 177 - kfree(f); 177 + __tcindex_destroy_fexts(f); 178 178 rtnl_unlock(); 179 179 } 180 180 ··· 225 213 * grace period, since converted-to-rcu actions are relying on that 226 214 * in cleanup() callback 227 215 */ 228 - if (f) 229 - call_rcu(&f->rcu, tcindex_destroy_fexts); 230 - else 231 - call_rcu(&r->rcu, tcindex_destroy_rexts); 216 + if (f) { 217 + if (tcf_exts_get_net(&f->result.exts)) 218 + call_rcu(&f->rcu, tcindex_destroy_fexts); 219 + else 220 + __tcindex_destroy_fexts(f); 221 + } else { 222 + if (tcf_exts_get_net(&r->exts)) 223 + call_rcu(&r->rcu, tcindex_destroy_rexts); 224 + else 225 + __tcindex_destroy_rexts(r); 226 + } 232 227 233 228 *last = false; 234 229 return 0;
+7 -1
net/sched/cls_u32.c
··· 399 399 bool free_pf) 400 400 { 401 401 tcf_exts_destroy(&n->exts); 402 + tcf_exts_put_net(&n->exts); 402 403 if (n->ht_down) 403 404 n->ht_down->refcnt--; 404 405 #ifdef CONFIG_CLS_U32_PERF ··· 477 476 RCU_INIT_POINTER(*kp, key->next); 478 477 479 478 tcf_unbind_filter(tp, &key->res); 479 + tcf_exts_get_net(&key->exts); 480 480 call_rcu(&key->rcu, u32_delete_key_freepf_rcu); 481 481 return 0; 482 482 } ··· 592 590 tcf_unbind_filter(tp, &n->res); 593 591 u32_remove_hw_knode(tp, n->handle); 594 592 idr_remove_ext(&ht->handle_idr, n->handle); 595 - call_rcu(&n->rcu, u32_delete_key_freepf_rcu); 593 + if (tcf_exts_get_net(&n->exts)) 594 + call_rcu(&n->rcu, u32_delete_key_freepf_rcu); 595 + else 596 + u32_destroy_key(n->tp, n, true); 596 597 } 597 598 } 598 599 } ··· 954 949 955 950 u32_replace_knode(tp, tp_c, new); 956 951 tcf_unbind_filter(tp, &n->res); 952 + tcf_exts_get_net(&n->exts); 957 953 call_rcu(&n->rcu, u32_delete_key_rcu); 958 954 return 0; 959 955 }
+2 -2
net/xfrm/xfrm_input.c
··· 266 266 goto lock; 267 267 } 268 268 269 - daddr = (xfrm_address_t *)(skb_network_header(skb) + 270 - XFRM_SPI_SKB_CB(skb)->daddroff); 271 269 family = XFRM_SPI_SKB_CB(skb)->family; 272 270 273 271 /* if tunnel is present override skb->mark value with tunnel i_key */ ··· 292 294 goto drop; 293 295 } 294 296 297 + daddr = (xfrm_address_t *)(skb_network_header(skb) + 298 + XFRM_SPI_SKB_CB(skb)->daddroff); 295 299 do { 296 300 if (skb->sp->len == XFRM_MAX_DEPTH) { 297 301 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
+39 -40
net/xfrm/xfrm_policy.c
··· 1360 1360 struct net *net = xp_net(policy); 1361 1361 int nx; 1362 1362 int i, error; 1363 - xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 1364 - xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 1365 1363 xfrm_address_t tmp; 1366 1364 1367 1365 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) { 1368 1366 struct xfrm_state *x; 1369 - xfrm_address_t *remote = daddr; 1370 - xfrm_address_t *local = saddr; 1367 + xfrm_address_t *local; 1368 + xfrm_address_t *remote; 1371 1369 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 1372 1370 1373 - if (tmpl->mode == XFRM_MODE_TUNNEL || 1374 - tmpl->mode == XFRM_MODE_BEET) { 1375 - remote = &tmpl->id.daddr; 1376 - local = &tmpl->saddr; 1377 - if (xfrm_addr_any(local, tmpl->encap_family)) { 1378 - error = xfrm_get_saddr(net, fl->flowi_oif, 1379 - &tmp, remote, 1380 - tmpl->encap_family, 0); 1381 - if (error) 1382 - goto fail; 1383 - local = &tmp; 1384 - } 1371 + remote = &tmpl->id.daddr; 1372 + local = &tmpl->saddr; 1373 + if (xfrm_addr_any(local, tmpl->encap_family)) { 1374 + error = xfrm_get_saddr(net, fl->flowi_oif, 1375 + &tmp, remote, 1376 + tmpl->encap_family, 0); 1377 + if (error) 1378 + goto fail; 1379 + local = &tmp; 1385 1380 } 1386 1381 1387 1382 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 1388 1383 1389 1384 if (x && x->km.state == XFRM_STATE_VALID) { 1390 1385 xfrm[nx++] = x; 1391 - daddr = remote; 1392 - saddr = local; 1393 1386 continue; 1394 1387 } 1395 1388 if (x) { ··· 1779 1786 put_online_cpus(); 1780 1787 } 1781 1788 1782 - static bool xfrm_pol_dead(struct xfrm_dst *xdst) 1789 + static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst, 1790 + struct xfrm_state * const xfrm[], 1791 + int num) 1783 1792 { 1784 - unsigned int num_pols = xdst->num_pols; 1785 - unsigned int pol_dead = 0, i; 1793 + const struct dst_entry *dst = &xdst->u.dst; 1794 + int i; 1786 1795 1787 - for (i = 0; i < num_pols; i++) 1788 - pol_dead |= xdst->pols[i]->walk.dead; 1796 + if (xdst->num_xfrms != num) 1797 + return false; 1789 1798 1790 - /* Mark DST_OBSOLETE_DEAD to fail the next xfrm_dst_check() */ 1791 - if (pol_dead) 1792 - xdst->u.dst.obsolete = DST_OBSOLETE_DEAD; 1799 + for (i = 0; i < num; i++) { 1800 + if (!dst || dst->xfrm != xfrm[i]) 1801 + return false; 1802 + dst = dst->child; 1803 + } 1793 1804 1794 - return pol_dead; 1805 + return xfrm_bundle_ok(xdst); 1795 1806 } 1796 1807 1797 1808 static struct xfrm_dst * ··· 1809 1812 struct dst_entry *dst; 1810 1813 int err; 1811 1814 1812 - xdst = this_cpu_read(xfrm_last_dst); 1813 - if (xdst && 1814 - xdst->u.dst.dev == dst_orig->dev && 1815 - xdst->num_pols == num_pols && 1816 - !xfrm_pol_dead(xdst) && 1817 - memcmp(xdst->pols, pols, 1818 - sizeof(struct xfrm_policy *) * num_pols) == 0 && 1819 - xfrm_bundle_ok(xdst)) { 1820 - dst_hold(&xdst->u.dst); 1821 - return xdst; 1822 - } 1823 - 1824 - old = xdst; 1825 1815 /* Try to instantiate a bundle */ 1826 1816 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1827 1817 if (err <= 0) { ··· 1816 1832 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1817 1833 return ERR_PTR(err); 1818 1834 } 1835 + 1836 + xdst = this_cpu_read(xfrm_last_dst); 1837 + if (xdst && 1838 + xdst->u.dst.dev == dst_orig->dev && 1839 + xdst->num_pols == num_pols && 1840 + memcmp(xdst->pols, pols, 1841 + sizeof(struct xfrm_policy *) * num_pols) == 0 && 1842 + xfrm_xdst_can_reuse(xdst, xfrm, err)) { 1843 + dst_hold(&xdst->u.dst); 1844 + while (err > 0) 1845 + xfrm_state_put(xfrm[--err]); 1846 + return xdst; 1847 + } 1848 + 1849 + old = xdst; 1819 1850 1820 1851 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); 1821 1852 if (IS_ERR(dst)) {
+305
scripts/leaking_addresses.pl
··· 1 + #!/usr/bin/env perl 2 + # 3 + # (c) 2017 Tobin C. Harding <me@tobin.cc> 4 + # Licensed under the terms of the GNU GPL License version 2 5 + # 6 + # leaking_addresses.pl: Scan 64 bit kernel for potential leaking addresses. 7 + # - Scans dmesg output. 8 + # - Walks directory tree and parses each file (for each directory in @DIRS). 9 + # 10 + # You can configure the behaviour of the script; 11 + # 12 + # - By adding paths, for directories you do not want to walk; 13 + # absolute paths: @skip_walk_dirs_abs 14 + # directory names: @skip_walk_dirs_any 15 + # 16 + # - By adding paths, for files you do not want to parse; 17 + # absolute paths: @skip_parse_files_abs 18 + # file names: @skip_parse_files_any 19 + # 20 + # The use of @skip_xxx_xxx_any causes files to be skipped where ever they occur. 21 + # For example adding 'fd' to @skip_walk_dirs_any causes the fd/ directory to be 22 + # skipped for all PID sub-directories of /proc 23 + # 24 + # The same thing can be achieved by passing command line options to --dont-walk 25 + # and --dont-parse. If absolute paths are supplied to these options they are 26 + # appended to the @skip_xxx_xxx_abs arrays. If file names are supplied to these 27 + # options, they are appended to the @skip_xxx_xxx_any arrays. 28 + # 29 + # Use --debug to output path before parsing, this is useful to find files that 30 + # cause the script to choke. 31 + # 32 + # You may like to set kptr_restrict=2 before running script 33 + # (see Documentation/sysctl/kernel.txt). 34 + 35 + use warnings; 36 + use strict; 37 + use POSIX; 38 + use File::Basename; 39 + use File::Spec; 40 + use Cwd 'abs_path'; 41 + use Term::ANSIColor qw(:constants); 42 + use Getopt::Long qw(:config no_auto_abbrev); 43 + 44 + my $P = $0; 45 + my $V = '0.01'; 46 + 47 + # Directories to scan. 48 + my @DIRS = ('/proc', '/sys'); 49 + 50 + # Command line options. 51 + my $help = 0; 52 + my $debug = 0; 53 + my @dont_walk = (); 54 + my @dont_parse = (); 55 + 56 + # Do not parse these files (absolute path). 57 + my @skip_parse_files_abs = ('/proc/kmsg', 58 + '/proc/kcore', 59 + '/proc/fs/ext4/sdb1/mb_groups', 60 + '/proc/1/fd/3', 61 + '/sys/kernel/debug/tracing/trace_pipe', 62 + '/sys/kernel/security/apparmor/revision'); 63 + 64 + # Do not parse thes files under any subdirectory. 65 + my @skip_parse_files_any = ('0', 66 + '1', 67 + '2', 68 + 'pagemap', 69 + 'events', 70 + 'access', 71 + 'registers', 72 + 'snapshot_raw', 73 + 'trace_pipe_raw', 74 + 'ptmx', 75 + 'trace_pipe'); 76 + 77 + # Do not walk these directories (absolute path). 78 + my @skip_walk_dirs_abs = (); 79 + 80 + # Do not walk these directories under any subdirectory. 81 + my @skip_walk_dirs_any = ('self', 82 + 'thread-self', 83 + 'cwd', 84 + 'fd', 85 + 'stderr', 86 + 'stdin', 87 + 'stdout'); 88 + 89 + sub help 90 + { 91 + my ($exitcode) = @_; 92 + 93 + print << "EOM"; 94 + Usage: $P [OPTIONS] 95 + Version: $V 96 + 97 + Options: 98 + 99 + --dont-walk=<dir> Don't walk tree starting at <dir>. 100 + --dont-parse=<file> Don't parse <file>. 101 + -d, --debug Display debugging output. 102 + -h, --help, --version Display this help and exit. 103 + 104 + If an absolute path is passed to --dont_XXX then this path is skipped. If a 105 + single filename is passed then this file/directory will be skipped when 106 + appearing under any subdirectory. 107 + 108 + Example: 109 + 110 + # Just scan dmesg output. 111 + scripts/leaking_addresses.pl --dont_walk_abs /proc --dont_walk_abs /sys 112 + 113 + Scans the running (64 bit) kernel for potential leaking addresses. 114 + 115 + EOM 116 + exit($exitcode); 117 + } 118 + 119 + GetOptions( 120 + 'dont-walk=s' => \@dont_walk, 121 + 'dont-parse=s' => \@dont_parse, 122 + 'd|debug' => \$debug, 123 + 'h|help' => \$help, 124 + 'version' => \$help 125 + ) or help(1); 126 + 127 + help(0) if ($help); 128 + 129 + push_to_global(); 130 + 131 + parse_dmesg(); 132 + walk(@DIRS); 133 + 134 + exit 0; 135 + 136 + sub debug_arrays 137 + { 138 + print 'dirs_any: ' . join(", ", @skip_walk_dirs_any) . "\n"; 139 + print 'dirs_abs: ' . join(", ", @skip_walk_dirs_abs) . "\n"; 140 + print 'parse_any: ' . join(", ", @skip_parse_files_any) . "\n"; 141 + print 'parse_abs: ' . join(", ", @skip_parse_files_abs) . "\n"; 142 + } 143 + 144 + sub dprint 145 + { 146 + printf(STDERR @_) if $debug; 147 + } 148 + 149 + sub push_in_abs_any 150 + { 151 + my ($in, $abs, $any) = @_; 152 + 153 + foreach my $path (@$in) { 154 + if (File::Spec->file_name_is_absolute($path)) { 155 + push @$abs, $path; 156 + } elsif (index($path,'/') == -1) { 157 + push @$any, $path; 158 + } else { 159 + print 'path error: ' . $path; 160 + } 161 + } 162 + } 163 + 164 + # Push command line options to global arrays. 165 + sub push_to_global 166 + { 167 + push_in_abs_any(\@dont_walk, \@skip_walk_dirs_abs, \@skip_walk_dirs_any); 168 + push_in_abs_any(\@dont_parse, \@skip_parse_files_abs, \@skip_parse_files_any); 169 + } 170 + 171 + sub is_false_positive 172 + { 173 + my ($match) = @_; 174 + 175 + if ($match =~ '\b(0x)?(f|F){16}\b' or 176 + $match =~ '\b(0x)?0{16}\b') { 177 + return 1; 178 + } 179 + 180 + # vsyscall memory region, we should probably check against a range here. 181 + if ($match =~ '\bf{10}600000\b' or 182 + $match =~ '\bf{10}601000\b') { 183 + return 1; 184 + } 185 + 186 + return 0; 187 + } 188 + 189 + # True if argument potentially contains a kernel address. 190 + sub may_leak_address 191 + { 192 + my ($line) = @_; 193 + my $address = '\b(0x)?ffff[[:xdigit:]]{12}\b'; 194 + 195 + # Signal masks. 196 + if ($line =~ '^SigBlk:' or 197 + $line =~ '^SigCgt:') { 198 + return 0; 199 + } 200 + 201 + if ($line =~ '\bKEY=[[:xdigit:]]{14} [[:xdigit:]]{16} [[:xdigit:]]{16}\b' or 202 + $line =~ '\b[[:xdigit:]]{14} [[:xdigit:]]{16} [[:xdigit:]]{16}\b') { 203 + return 0; 204 + } 205 + 206 + while (/($address)/g) { 207 + if (!is_false_positive($1)) { 208 + return 1; 209 + } 210 + } 211 + 212 + return 0; 213 + } 214 + 215 + sub parse_dmesg 216 + { 217 + open my $cmd, '-|', 'dmesg'; 218 + while (<$cmd>) { 219 + if (may_leak_address($_)) { 220 + print 'dmesg: ' . $_; 221 + } 222 + } 223 + close $cmd; 224 + } 225 + 226 + # True if we should skip this path. 227 + sub skip 228 + { 229 + my ($path, $paths_abs, $paths_any) = @_; 230 + 231 + foreach (@$paths_abs) { 232 + return 1 if (/^$path$/); 233 + } 234 + 235 + my($filename, $dirs, $suffix) = fileparse($path); 236 + foreach (@$paths_any) { 237 + return 1 if (/^$filename$/); 238 + } 239 + 240 + return 0; 241 + } 242 + 243 + sub skip_parse 244 + { 245 + my ($path) = @_; 246 + return skip($path, \@skip_parse_files_abs, \@skip_parse_files_any); 247 + } 248 + 249 + sub parse_file 250 + { 251 + my ($file) = @_; 252 + 253 + if (! -R $file) { 254 + return; 255 + } 256 + 257 + if (skip_parse($file)) { 258 + dprint "skipping file: $file\n"; 259 + return; 260 + } 261 + dprint "parsing: $file\n"; 262 + 263 + open my $fh, "<", $file or return; 264 + while ( <$fh> ) { 265 + if (may_leak_address($_)) { 266 + print $file . ': ' . $_; 267 + } 268 + } 269 + close $fh; 270 + } 271 + 272 + 273 + # True if we should skip walking this directory. 274 + sub skip_walk 275 + { 276 + my ($path) = @_; 277 + return skip($path, \@skip_walk_dirs_abs, \@skip_walk_dirs_any) 278 + } 279 + 280 + # Recursively walk directory tree. 281 + sub walk 282 + { 283 + my @dirs = @_; 284 + my %seen; 285 + 286 + while (my $pwd = shift @dirs) { 287 + next if (skip_walk($pwd)); 288 + next if (!opendir(DIR, $pwd)); 289 + my @files = readdir(DIR); 290 + closedir(DIR); 291 + 292 + foreach my $file (@files) { 293 + next if ($file eq '.' or $file eq '..'); 294 + 295 + my $path = "$pwd/$file"; 296 + next if (-l $path); 297 + 298 + if (-d $path) { 299 + push @dirs, $path; 300 + } else { 301 + parse_file($path); 302 + } 303 + } 304 + } 305 + }
+2 -2
security/apparmor/ipc.c
··· 128 128 return SIGUNKNOWN; 129 129 else if (sig >= SIGRTMIN) 130 130 return sig - SIGRTMIN + 128; /* rt sigs mapped to 128 */ 131 - else if (sig <= MAXMAPPED_SIG) 131 + else if (sig < MAXMAPPED_SIG) 132 132 return sig_map[sig]; 133 133 return SIGUNKNOWN; 134 134 } ··· 163 163 audit_signal_mask(ab, aad(sa)->denied); 164 164 } 165 165 } 166 - if (aad(sa)->signal <= MAXMAPPED_SIG) 166 + if (aad(sa)->signal < MAXMAPPED_SIG) 167 167 audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]); 168 168 else 169 169 audit_log_format(ab, " signal=rtmin+%d",
+1
sound/core/hrtimer.c
··· 159 159 timer->hw = hrtimer_hw; 160 160 timer->hw.resolution = resolution; 161 161 timer->hw.ticks = NANO_SEC / resolution; 162 + timer->max_instances = 100; /* lower the limit */ 162 163 163 164 err = snd_timer_global_register(timer); 164 165 if (err < 0) {
+1 -3
sound/core/seq/oss/seq_oss_midi.c
··· 612 612 if (!dp->timer->running) 613 613 len = snd_seq_oss_timer_start(dp->timer); 614 614 if (ev->type == SNDRV_SEQ_EVENT_SYSEX) { 615 - if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE) 616 - snd_seq_oss_readq_puts(dp->readq, mdev->seq_device, 617 - ev->data.ext.ptr, ev->data.ext.len); 615 + snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev); 618 616 } else { 619 617 len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev); 620 618 if (len > 0)
+29
sound/core/seq/oss/seq_oss_readq.c
··· 118 118 } 119 119 120 120 /* 121 + * put MIDI sysex bytes; the event buffer may be chained, thus it has 122 + * to be expanded via snd_seq_dump_var_event(). 123 + */ 124 + struct readq_sysex_ctx { 125 + struct seq_oss_readq *readq; 126 + int dev; 127 + }; 128 + 129 + static int readq_dump_sysex(void *ptr, void *buf, int count) 130 + { 131 + struct readq_sysex_ctx *ctx = ptr; 132 + 133 + return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count); 134 + } 135 + 136 + int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev, 137 + struct snd_seq_event *ev) 138 + { 139 + struct readq_sysex_ctx ctx = { 140 + .readq = q, 141 + .dev = dev 142 + }; 143 + 144 + if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) 145 + return 0; 146 + return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx); 147 + } 148 + 149 + /* 121 150 * copy an event to input queue: 122 151 * return zero if enqueued 123 152 */
+2
sound/core/seq/oss/seq_oss_readq.h
··· 44 44 void snd_seq_oss_readq_clear(struct seq_oss_readq *readq); 45 45 unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait); 46 46 int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len); 47 + int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev, 48 + struct snd_seq_event *ev); 47 49 int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev); 48 50 int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode); 49 51 int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
+54 -13
sound/core/timer.c
··· 180 180 * 181 181 * call this with register_mutex down. 182 182 */ 183 - static void snd_timer_check_slave(struct snd_timer_instance *slave) 183 + static int snd_timer_check_slave(struct snd_timer_instance *slave) 184 184 { 185 185 struct snd_timer *timer; 186 186 struct snd_timer_instance *master; ··· 190 190 list_for_each_entry(master, &timer->open_list_head, open_list) { 191 191 if (slave->slave_class == master->slave_class && 192 192 slave->slave_id == master->slave_id) { 193 + if (master->timer->num_instances >= 194 + master->timer->max_instances) 195 + return -EBUSY; 193 196 list_move_tail(&slave->open_list, 194 197 &master->slave_list_head); 198 + master->timer->num_instances++; 195 199 spin_lock_irq(&slave_active_lock); 196 200 slave->master = master; 197 201 slave->timer = master->timer; 198 202 spin_unlock_irq(&slave_active_lock); 199 - return; 203 + return 0; 200 204 } 201 205 } 202 206 } 207 + return 0; 203 208 } 204 209 205 210 /* ··· 213 208 * 214 209 * call this with register_mutex down. 215 210 */ 216 - static void snd_timer_check_master(struct snd_timer_instance *master) 211 + static int snd_timer_check_master(struct snd_timer_instance *master) 217 212 { 218 213 struct snd_timer_instance *slave, *tmp; 219 214 ··· 221 216 list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) { 222 217 if (slave->slave_class == master->slave_class && 223 218 slave->slave_id == master->slave_id) { 219 + if (master->timer->num_instances >= 220 + master->timer->max_instances) 221 + return -EBUSY; 224 222 list_move_tail(&slave->open_list, &master->slave_list_head); 223 + master->timer->num_instances++; 225 224 spin_lock_irq(&slave_active_lock); 226 225 spin_lock(&master->timer->lock); 227 226 slave->master = master; ··· 237 228 spin_unlock_irq(&slave_active_lock); 238 229 } 239 230 } 231 + return 0; 240 232 } 233 + 234 + static int snd_timer_close_locked(struct snd_timer_instance *timeri); 241 235 242 236 /* 243 237 * open a timer instance ··· 252 240 { 253 241 struct snd_timer *timer; 254 242 struct snd_timer_instance *timeri = NULL; 243 + int err; 255 244 256 245 if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) { 257 246 /* open a slave instance */ ··· 272 259 timeri->slave_id = tid->device; 273 260 timeri->flags |= SNDRV_TIMER_IFLG_SLAVE; 274 261 list_add_tail(&timeri->open_list, &snd_timer_slave_list); 275 - snd_timer_check_slave(timeri); 262 + err = snd_timer_check_slave(timeri); 263 + if (err < 0) { 264 + snd_timer_close_locked(timeri); 265 + timeri = NULL; 266 + } 276 267 mutex_unlock(&register_mutex); 277 268 *ti = timeri; 278 - return 0; 269 + return err; 279 270 } 280 271 281 272 /* open a master instance */ ··· 304 287 mutex_unlock(&register_mutex); 305 288 return -EBUSY; 306 289 } 290 + } 291 + if (timer->num_instances >= timer->max_instances) { 292 + mutex_unlock(&register_mutex); 293 + return -EBUSY; 307 294 } 308 295 timeri = snd_timer_instance_new(owner, timer); 309 296 if (!timeri) { ··· 335 314 } 336 315 337 316 list_add_tail(&timeri->open_list, &timer->open_list_head); 338 - snd_timer_check_master(timeri); 317 + timer->num_instances++; 318 + err = snd_timer_check_master(timeri); 319 + if (err < 0) { 320 + snd_timer_close_locked(timeri); 321 + timeri = NULL; 322 + } 339 323 mutex_unlock(&register_mutex); 340 324 *ti = timeri; 341 - return 0; 325 + return err; 342 326 } 343 327 EXPORT_SYMBOL(snd_timer_open); 344 328 345 329 /* 346 330 * close a timer instance 331 + * call this with register_mutex down. 347 332 */ 348 - int snd_timer_close(struct snd_timer_instance *timeri) 333 + static int snd_timer_close_locked(struct snd_timer_instance *timeri) 349 334 { 350 335 struct snd_timer *timer = NULL; 351 336 struct snd_timer_instance *slave, *tmp; 352 337 353 - if (snd_BUG_ON(!timeri)) 354 - return -ENXIO; 355 - 356 - mutex_lock(&register_mutex); 357 338 list_del(&timeri->open_list); 358 339 359 340 /* force to stop the timer */ ··· 363 340 364 341 timer = timeri->timer; 365 342 if (timer) { 343 + timer->num_instances--; 366 344 /* wait, until the active callback is finished */ 367 345 spin_lock_irq(&timer->lock); 368 346 while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { ··· 379 355 list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head, 380 356 open_list) { 381 357 list_move_tail(&slave->open_list, &snd_timer_slave_list); 358 + timer->num_instances--; 382 359 slave->master = NULL; 383 360 slave->timer = NULL; 384 361 list_del_init(&slave->ack_list); ··· 407 382 module_put(timer->module); 408 383 } 409 384 410 - mutex_unlock(&register_mutex); 411 385 return 0; 386 + } 387 + 388 + /* 389 + * close a timer instance 390 + */ 391 + int snd_timer_close(struct snd_timer_instance *timeri) 392 + { 393 + int err; 394 + 395 + if (snd_BUG_ON(!timeri)) 396 + return -ENXIO; 397 + 398 + mutex_lock(&register_mutex); 399 + err = snd_timer_close_locked(timeri); 400 + mutex_unlock(&register_mutex); 401 + return err; 412 402 } 413 403 EXPORT_SYMBOL(snd_timer_close); 414 404 ··· 896 856 spin_lock_init(&timer->lock); 897 857 tasklet_init(&timer->task_queue, snd_timer_tasklet, 898 858 (unsigned long)timer); 859 + timer->max_instances = 1000; /* default limit per timer */ 899 860 if (card != NULL) { 900 861 timer->module = card->module; 901 862 err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
+5
sound/pci/hda/patch_realtek.c
··· 6544 6544 {0x14, 0x90170110}, 6545 6545 {0x1b, 0x90a70130}, 6546 6546 {0x21, 0x03211020}), 6547 + SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 6548 + {0x12, 0xb7a60130}, 6549 + {0x13, 0xb8a61140}, 6550 + {0x16, 0x90170110}, 6551 + {0x21, 0x04211020}), 6547 6552 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, 6548 6553 {0x12, 0x90a60130}, 6549 6554 {0x14, 0x90170110},
+1
sound/usb/quirks.c
··· 1375 1375 case 0x199: 1376 1376 return SNDRV_PCM_FMTBIT_DSD_U32_LE; 1377 1377 case 0x19b: 1378 + case 0x203: 1378 1379 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1379 1380 default: 1380 1381 break;
-1
tools/arch/x86/include/asm/disabled-features.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 1 #ifndef _ASM_X86_DISABLED_FEATURES_H 3 2 #define _ASM_X86_DISABLED_FEATURES_H 4 3
-1
tools/arch/x86/include/asm/required-features.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 1 #ifndef _ASM_X86_REQUIRED_FEATURES_H 3 2 #define _ASM_X86_REQUIRED_FEATURES_H 4 3
+1
tools/arch/x86/include/uapi/asm/unistd.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1 2 #ifndef _UAPI_ASM_X86_UNISTD_H 2 3 #define _UAPI_ASM_X86_UNISTD_H 3 4
+4 -1
tools/arch/x86/lib/memcpy_64.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 1 /* Copyright 2002 Andi Kleen */ 3 2 4 3 #include <linux/linkage.h> 5 4 #include <asm/errno.h> 6 5 #include <asm/cpufeatures.h> 7 6 #include <asm/alternative-asm.h> 7 + #include <asm/export.h> 8 8 9 9 /* 10 10 * We build a jump to memcpy_orig by default which gets NOPped out on ··· 41 41 ret 42 42 ENDPROC(memcpy) 43 43 ENDPROC(__memcpy) 44 + EXPORT_SYMBOL(memcpy) 45 + EXPORT_SYMBOL(__memcpy) 44 46 45 47 /* 46 48 * memcpy_erms() - enhanced fast string memcpy. This is faster and ··· 277 275 xorq %rax, %rax 278 276 ret 279 277 ENDPROC(memcpy_mcsafe_unrolled) 278 + EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled) 280 279 281 280 .section .fixup, "ax" 282 281 /* Return -EFAULT for any failure */
+1
tools/include/asm-generic/bitops/__fls.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 #ifndef _ASM_GENERIC_BITOPS___FLS_H_ 2 3 #define _ASM_GENERIC_BITOPS___FLS_H_ 3 4
+1
tools/include/asm-generic/bitops/arch_hweight.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 #ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ 2 3 #define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ 3 4
+1
tools/include/asm-generic/bitops/const_hweight.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 #ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ 2 3 #define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ 3 4
+1
tools/include/asm-generic/bitops/fls.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 #ifndef _ASM_GENERIC_BITOPS_FLS_H_ 2 3 #define _ASM_GENERIC_BITOPS_FLS_H_ 3 4
+1
tools/include/asm-generic/bitops/fls64.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 #ifndef _ASM_GENERIC_BITOPS_FLS64_H_ 2 3 #define _ASM_GENERIC_BITOPS_FLS64_H_ 3 4
+7
tools/include/asm/export.h
··· 1 + #ifndef _TOOLS_ASM_EXPORT_H 2 + #define _TOOLS_ASM_EXPORT_H 3 + 4 + #define EXPORT_SYMBOL(x) 5 + #define EXPORT_SYMBOL_GPL(x) 6 + 7 + #endif /* _TOOLS_ASM_EXPORT_H */
-1
tools/include/linux/hash.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 1 #ifndef _LINUX_HASH_H 3 2 #define _LINUX_HASH_H 4 3 /* Fast hashing routine for ints, longs and pointers.
+1
tools/include/uapi/asm-generic/ioctls.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1 2 #ifndef __ASM_GENERIC_IOCTLS_H 2 3 #define __ASM_GENERIC_IOCTLS_H 3 4
+1
tools/include/uapi/asm-generic/mman-common.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1 2 #ifndef __ASM_GENERIC_MMAN_COMMON_H 2 3 #define __ASM_GENERIC_MMAN_COMMON_H 3 4
+2 -2
tools/include/uapi/asm-generic/mman.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 2 #ifndef __ASM_GENERIC_MMAN_H 3 3 #define __ASM_GENERIC_MMAN_H 4 4 5 - #include <uapi/asm-generic/mman-common.h> 5 + #include <asm-generic/mman-common.h> 6 6 7 7 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 8 8 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+1
tools/include/uapi/linux/bpf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 3 * 3 4 * This program is free software; you can redistribute it and/or
+1 -1
tools/include/uapi/linux/bpf_common.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 2 #ifndef _UAPI__LINUX_BPF_COMMON_H__ 3 3 #define _UAPI__LINUX_BPF_COMMON_H__ 4 4
+1 -1
tools/include/uapi/linux/fcntl.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 2 #ifndef _UAPI_LINUX_FCNTL_H 3 3 #define _UAPI_LINUX_FCNTL_H 4 4
+1 -1
tools/include/uapi/linux/hw_breakpoint.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 2 #ifndef _UAPI_LINUX_HW_BREAKPOINT_H 3 3 #define _UAPI_LINUX_HW_BREAKPOINT_H 4 4
+1
tools/include/uapi/linux/kvm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1 2 #ifndef __LINUX_KVM_H 2 3 #define __LINUX_KVM_H 3 4
+1 -1
tools/include/uapi/linux/mman.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 2 #ifndef _UAPI_LINUX_MMAN_H 3 3 #define _UAPI_LINUX_MMAN_H 4 4
+1
tools/include/uapi/linux/perf_event.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1 2 /* 2 3 * Performance events: 3 4 *
+1
tools/include/uapi/linux/sched.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1 2 #ifndef _UAPI_LINUX_SCHED_H 2 3 #define _UAPI_LINUX_SCHED_H 3 4
+1 -1
tools/include/uapi/linux/stat.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 2 #ifndef _UAPI_LINUX_STAT_H 3 3 #define _UAPI_LINUX_STAT_H 4 4
+1
tools/include/uapi/linux/vhost.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1 2 #ifndef _LINUX_VHOST_H 2 3 #define _LINUX_VHOST_H 3 4 /* Userspace interface for in-kernel virtio accelerators. */
+1
tools/include/uapi/sound/asound.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ 1 2 /* 2 3 * Advanced Linux Sound Architecture - ALSA - Driver 3 4 * Copyright (c) 1994-2003 by Jaroslav Kysela <perex@perex.cz>,
+1
tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk
··· 1 1 #!/bin/awk -f 2 + # SPDX-License-Identifier: GPL-2.0 2 3 # gen-insn-attr-x86.awk: Instruction attribute table generator 3 4 # Written by Masami Hiramatsu <mhiramat@redhat.com> 4 5 #
+11 -1
tools/perf/util/annotate.c
··· 606 606 int symbol__alloc_hist(struct symbol *sym) 607 607 { 608 608 struct annotation *notes = symbol__annotation(sym); 609 - const size_t size = symbol__size(sym); 609 + size_t size = symbol__size(sym); 610 610 size_t sizeof_sym_hist; 611 + 612 + /* 613 + * Add buffer of one element for zero length symbol. 614 + * When sample is taken from first instruction of 615 + * zero length symbol, perf still resolves it and 616 + * shows symbol name in perf report and allows to 617 + * annotate it. 618 + */ 619 + if (size == 0) 620 + size = 1; 611 621 612 622 /* Check for overflow when calculating sizeof_sym_hist */ 613 623 if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(struct sym_hist_entry))
+6 -2
tools/perf/util/parse-events.l
··· 154 154 yycolumn += yyleng; \ 155 155 } while (0); 156 156 157 + #define USER_REJECT \ 158 + yycolumn -= yyleng; \ 159 + REJECT 160 + 157 161 %} 158 162 159 163 %x mem ··· 339 335 {num_hex} { return value(yyscanner, 16); } 340 336 341 337 {modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); } 342 - {bpf_object} { if (!isbpf(yyscanner)) REJECT; return str(yyscanner, PE_BPF_OBJECT); } 343 - {bpf_source} { if (!isbpf(yyscanner)) REJECT; return str(yyscanner, PE_BPF_SOURCE); } 338 + {bpf_object} { if (!isbpf(yyscanner)) USER_REJECT; return str(yyscanner, PE_BPF_OBJECT); } 339 + {bpf_source} { if (!isbpf(yyscanner)) USER_REJECT; return str(yyscanner, PE_BPF_SOURCE); } 344 340 {name} { return pmu_str_check(yyscanner); } 345 341 "/" { BEGIN(config); return '/'; } 346 342 - { return '-'; }
+14 -17
virt/kvm/arm/arm.c
··· 1326 1326 { 1327 1327 int cpu; 1328 1328 1329 - if (is_kernel_in_hyp_mode()) 1330 - return; 1331 - 1332 1329 free_hyp_pgds(); 1333 1330 for_each_possible_cpu(cpu) 1334 1331 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); 1335 1332 hyp_cpu_pm_exit(); 1336 - } 1337 - 1338 - static int init_vhe_mode(void) 1339 - { 1340 - kvm_info("VHE mode initialized successfully\n"); 1341 - return 0; 1342 1333 } 1343 1334 1344 1335 /** ··· 1412 1421 } 1413 1422 } 1414 1423 1415 - kvm_info("Hyp mode initialized successfully\n"); 1416 - 1417 1424 return 0; 1418 1425 1419 1426 out_err: ··· 1445 1456 { 1446 1457 int err; 1447 1458 int ret, cpu; 1459 + bool in_hyp_mode; 1448 1460 1449 1461 if (!is_hyp_mode_available()) { 1450 1462 kvm_err("HYP mode not available\n"); ··· 1464 1474 if (err) 1465 1475 return err; 1466 1476 1467 - if (is_kernel_in_hyp_mode()) 1468 - err = init_vhe_mode(); 1469 - else 1477 + in_hyp_mode = is_kernel_in_hyp_mode(); 1478 + 1479 + if (!in_hyp_mode) { 1470 1480 err = init_hyp_mode(); 1471 - if (err) 1472 - goto out_err; 1481 + if (err) 1482 + goto out_err; 1483 + } 1473 1484 1474 1485 err = init_subsystems(); 1475 1486 if (err) 1476 1487 goto out_hyp; 1477 1488 1489 + if (in_hyp_mode) 1490 + kvm_info("VHE mode initialized successfully\n"); 1491 + else 1492 + kvm_info("Hyp mode initialized successfully\n"); 1493 + 1478 1494 return 0; 1479 1495 1480 1496 out_hyp: 1481 - teardown_hyp_mode(); 1497 + if (!in_hyp_mode) 1498 + teardown_hyp_mode(); 1482 1499 out_err: 1483 1500 teardown_common_resources(); 1484 1501 return err;
+48 -25
virt/kvm/arm/vgic/vgic-its.c
··· 1466 1466 { 1467 1467 mutex_lock(&its->cmd_lock); 1468 1468 1469 + /* 1470 + * It is UNPREDICTABLE to enable the ITS if any of the CBASER or 1471 + * device/collection BASER are invalid 1472 + */ 1473 + if (!its->enabled && (val & GITS_CTLR_ENABLE) && 1474 + (!(its->baser_device_table & GITS_BASER_VALID) || 1475 + !(its->baser_coll_table & GITS_BASER_VALID) || 1476 + !(its->cbaser & GITS_CBASER_VALID))) 1477 + goto out; 1478 + 1469 1479 its->enabled = !!(val & GITS_CTLR_ENABLE); 1470 1480 1471 1481 /* ··· 1484 1474 */ 1485 1475 vgic_its_process_commands(kvm, its); 1486 1476 1477 + out: 1487 1478 mutex_unlock(&its->cmd_lock); 1488 1479 } 1489 1480 ··· 1812 1801 static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz, 1813 1802 int start_id, entry_fn_t fn, void *opaque) 1814 1803 { 1815 - void *entry = kzalloc(esz, GFP_KERNEL); 1816 1804 struct kvm *kvm = its->dev->kvm; 1817 1805 unsigned long len = size; 1818 1806 int id = start_id; 1819 1807 gpa_t gpa = base; 1808 + char entry[esz]; 1820 1809 int ret; 1810 + 1811 + memset(entry, 0, esz); 1821 1812 1822 1813 while (len > 0) { 1823 1814 int next_offset; ··· 1827 1814 1828 1815 ret = kvm_read_guest(kvm, gpa, entry, esz); 1829 1816 if (ret) 1830 - goto out; 1817 + return ret; 1831 1818 1832 1819 next_offset = fn(its, id, entry, opaque); 1833 - if (next_offset <= 0) { 1834 - ret = next_offset; 1835 - goto out; 1836 - } 1820 + if (next_offset <= 0) 1821 + return next_offset; 1837 1822 1838 1823 byte_offset = next_offset * esz; 1839 1824 id += next_offset; 1840 1825 gpa += byte_offset; 1841 1826 len -= byte_offset; 1842 1827 } 1843 - ret = 1; 1844 - 1845 - out: 1846 - kfree(entry); 1847 - return ret; 1828 + return 1; 1848 1829 } 1849 1830 1850 1831 /** ··· 1947 1940 return 0; 1948 1941 } 1949 1942 1943 + /** 1944 + * vgic_its_restore_itt - restore the ITT of a device 1945 + * 1946 + * @its: its handle 1947 + * @dev: device handle 1948 + * 1949 + * Return 0 on success, < 0 on error 1950 + */ 1950 1951 static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev) 1951 1952 { 1952 1953 const struct vgic_its_abi *abi = vgic_its_get_abi(its); ··· 1965 1950 1966 1951 ret = scan_its_table(its, base, max_size, ite_esz, 0, 1967 1952 vgic_its_restore_ite, dev); 1953 + 1954 + /* scan_its_table returns +1 if all ITEs are invalid */ 1955 + if (ret > 0) 1956 + ret = 0; 1968 1957 1969 1958 return ret; 1970 1959 } ··· 2067 2048 static int vgic_its_save_device_tables(struct vgic_its *its) 2068 2049 { 2069 2050 const struct vgic_its_abi *abi = vgic_its_get_abi(its); 2051 + u64 baser = its->baser_device_table; 2070 2052 struct its_device *dev; 2071 2053 int dte_esz = abi->dte_esz; 2072 - u64 baser; 2073 2054 2074 - baser = its->baser_device_table; 2055 + if (!(baser & GITS_BASER_VALID)) 2056 + return 0; 2075 2057 2076 2058 list_sort(NULL, &its->device_list, vgic_its_device_cmp); 2077 2059 ··· 2127 2107 ret = scan_its_table(its, gpa, SZ_64K, dte_esz, 2128 2108 l2_start_id, vgic_its_restore_dte, NULL); 2129 2109 2130 - if (ret <= 0) 2131 - return ret; 2132 - 2133 - return 1; 2110 + return ret; 2134 2111 } 2135 2112 2136 2113 /** ··· 2157 2140 vgic_its_restore_dte, NULL); 2158 2141 } 2159 2142 2143 + /* scan_its_table returns +1 if all entries are invalid */ 2160 2144 if (ret > 0) 2161 - ret = -EINVAL; 2145 + ret = 0; 2162 2146 2163 2147 return ret; 2164 2148 } ··· 2216 2198 static int vgic_its_save_collection_table(struct vgic_its *its) 2217 2199 { 2218 2200 const struct vgic_its_abi *abi = vgic_its_get_abi(its); 2201 + u64 baser = its->baser_coll_table; 2202 + gpa_t gpa = BASER_ADDRESS(baser); 2219 2203 struct its_collection *collection; 2220 2204 u64 val; 2221 - gpa_t gpa; 2222 2205 size_t max_size, filled = 0; 2223 2206 int ret, cte_esz = abi->cte_esz; 2224 2207 2225 - gpa = BASER_ADDRESS(its->baser_coll_table); 2226 - if (!gpa) 2208 + if (!(baser & GITS_BASER_VALID)) 2227 2209 return 0; 2228 2210 2229 - max_size = GITS_BASER_NR_PAGES(its->baser_coll_table) * SZ_64K; 2211 + max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K; 2230 2212 2231 2213 list_for_each_entry(collection, &its->collection_list, coll_list) { 2232 2214 ret = vgic_its_save_cte(its, collection, gpa, cte_esz); ··· 2257 2239 static int vgic_its_restore_collection_table(struct vgic_its *its) 2258 2240 { 2259 2241 const struct vgic_its_abi *abi = vgic_its_get_abi(its); 2242 + u64 baser = its->baser_coll_table; 2260 2243 int cte_esz = abi->cte_esz; 2261 2244 size_t max_size, read = 0; 2262 2245 gpa_t gpa; 2263 2246 int ret; 2264 2247 2265 - if (!(its->baser_coll_table & GITS_BASER_VALID)) 2248 + if (!(baser & GITS_BASER_VALID)) 2266 2249 return 0; 2267 2250 2268 - gpa = BASER_ADDRESS(its->baser_coll_table); 2251 + gpa = BASER_ADDRESS(baser); 2269 2252 2270 - max_size = GITS_BASER_NR_PAGES(its->baser_coll_table) * SZ_64K; 2253 + max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K; 2271 2254 2272 2255 while (read < max_size) { 2273 2256 ret = vgic_its_restore_cte(its, gpa, cte_esz); ··· 2277 2258 gpa += cte_esz; 2278 2259 read += cte_esz; 2279 2260 } 2261 + 2262 + if (ret > 0) 2263 + return 0; 2264 + 2280 2265 return ret; 2281 2266 } 2282 2267