Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 4.9-rc3 into tty-next

We want the serial/tty fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+2601 -1917
+3 -2
CREDITS
··· 1864 1864 1865 1865 N: Martin Kepplinger 1866 1866 E: martink@posteo.de 1867 - E: martin.kepplinger@theobroma-systems.com 1867 + E: martin.kepplinger@ginzinger.com 1868 1868 W: http://www.martinkepplinger.com 1869 1869 D: mma8452 accelerators iio driver 1870 - D: Kernel cleanups 1870 + D: pegasus_notetaker input driver 1871 + D: Kernel fixes and cleanups 1871 1872 S: Garnisonstraße 26 1872 1873 S: 4020 Linz 1873 1874 S: Austria
+1
Documentation/device-mapper/dm-raid.txt
··· 309 309 with a reshape in progress. 310 310 1.9.0 Add support for RAID level takeover/reshape/region size 311 311 and set size reduction. 312 + 1.9.1 Fix activation of existing RAID 4/10 mapped devices
+8 -8
Documentation/devicetree/bindings/clock/uniphier-clock.txt
··· 24 24 reg = <0x61840000 0x4000>; 25 25 26 26 clock { 27 - compatible = "socionext,uniphier-ld20-clock"; 27 + compatible = "socionext,uniphier-ld11-clock"; 28 28 #clock-cells = <1>; 29 29 }; 30 30 ··· 43 43 21: USB3 ch1 PHY1 44 44 45 45 46 - Media I/O (MIO) clock 47 - --------------------- 46 + Media I/O (MIO) clock, SD clock 47 + ------------------------------- 48 48 49 49 Required properties: 50 50 - compatible: should be one of the following: ··· 52 52 "socionext,uniphier-ld4-mio-clock" - for LD4 SoC. 53 53 "socionext,uniphier-pro4-mio-clock" - for Pro4 SoC. 54 54 "socionext,uniphier-sld8-mio-clock" - for sLD8 SoC. 55 - "socionext,uniphier-pro5-mio-clock" - for Pro5 SoC. 56 - "socionext,uniphier-pxs2-mio-clock" - for PXs2/LD6b SoC. 55 + "socionext,uniphier-pro5-sd-clock" - for Pro5 SoC. 56 + "socionext,uniphier-pxs2-sd-clock" - for PXs2/LD6b SoC. 57 57 "socionext,uniphier-ld11-mio-clock" - for LD11 SoC. 58 - "socionext,uniphier-ld20-mio-clock" - for LD20 SoC. 58 + "socionext,uniphier-ld20-sd-clock" - for LD20 SoC. 59 59 - #clock-cells: should be 1. 60 60 61 61 Example: ··· 66 66 reg = <0x59810000 0x800>; 67 67 68 68 clock { 69 - compatible = "socionext,uniphier-ld20-mio-clock"; 69 + compatible = "socionext,uniphier-ld11-mio-clock"; 70 70 #clock-cells = <1>; 71 71 }; 72 72 ··· 112 112 reg = <0x59820000 0x200>; 113 113 114 114 clock { 115 - compatible = "socionext,uniphier-ld20-peri-clock"; 115 + compatible = "socionext,uniphier-ld11-peri-clock"; 116 116 #clock-cells = <1>; 117 117 }; 118 118
+31 -31
Documentation/devicetree/bindings/reset/uniphier-reset.txt
··· 6 6 7 7 Required properties: 8 8 - compatible: should be one of the following: 9 - "socionext,uniphier-sld3-reset" - for PH1-sLD3 SoC. 10 - "socionext,uniphier-ld4-reset" - for PH1-LD4 SoC. 11 - "socionext,uniphier-pro4-reset" - for PH1-Pro4 SoC. 12 - "socionext,uniphier-sld8-reset" - for PH1-sLD8 SoC. 13 - "socionext,uniphier-pro5-reset" - for PH1-Pro5 SoC. 14 - "socionext,uniphier-pxs2-reset" - for ProXstream2/PH1-LD6b SoC. 15 - "socionext,uniphier-ld11-reset" - for PH1-LD11 SoC. 16 - "socionext,uniphier-ld20-reset" - for PH1-LD20 SoC. 9 + "socionext,uniphier-sld3-reset" - for sLD3 SoC. 10 + "socionext,uniphier-ld4-reset" - for LD4 SoC. 11 + "socionext,uniphier-pro4-reset" - for Pro4 SoC. 12 + "socionext,uniphier-sld8-reset" - for sLD8 SoC. 13 + "socionext,uniphier-pro5-reset" - for Pro5 SoC. 14 + "socionext,uniphier-pxs2-reset" - for PXs2/LD6b SoC. 15 + "socionext,uniphier-ld11-reset" - for LD11 SoC. 16 + "socionext,uniphier-ld20-reset" - for LD20 SoC. 17 17 - #reset-cells: should be 1. 18 18 19 19 Example: 20 20 21 21 sysctrl@61840000 { 22 - compatible = "socionext,uniphier-ld20-sysctrl", 22 + compatible = "socionext,uniphier-ld11-sysctrl", 23 23 "simple-mfd", "syscon"; 24 24 reg = <0x61840000 0x4000>; 25 25 26 26 reset { 27 - compatible = "socionext,uniphier-ld20-reset"; 27 + compatible = "socionext,uniphier-ld11-reset"; 28 28 #reset-cells = <1>; 29 29 }; 30 30 ··· 32 32 }; 33 33 34 34 35 - Media I/O (MIO) reset 36 - --------------------- 35 + Media I/O (MIO) reset, SD reset 36 + ------------------------------- 37 37 38 38 Required properties: 39 39 - compatible: should be one of the following: 40 - "socionext,uniphier-sld3-mio-reset" - for PH1-sLD3 SoC. 41 - "socionext,uniphier-ld4-mio-reset" - for PH1-LD4 SoC. 42 - "socionext,uniphier-pro4-mio-reset" - for PH1-Pro4 SoC. 43 - "socionext,uniphier-sld8-mio-reset" - for PH1-sLD8 SoC. 44 - "socionext,uniphier-pro5-mio-reset" - for PH1-Pro5 SoC. 45 - "socionext,uniphier-pxs2-mio-reset" - for ProXstream2/PH1-LD6b SoC. 46 - "socionext,uniphier-ld11-mio-reset" - for PH1-LD11 SoC. 47 - "socionext,uniphier-ld20-mio-reset" - for PH1-LD20 SoC. 40 + "socionext,uniphier-sld3-mio-reset" - for sLD3 SoC. 41 + "socionext,uniphier-ld4-mio-reset" - for LD4 SoC. 42 + "socionext,uniphier-pro4-mio-reset" - for Pro4 SoC. 43 + "socionext,uniphier-sld8-mio-reset" - for sLD8 SoC. 44 + "socionext,uniphier-pro5-sd-reset" - for Pro5 SoC. 45 + "socionext,uniphier-pxs2-sd-reset" - for PXs2/LD6b SoC. 46 + "socionext,uniphier-ld11-mio-reset" - for LD11 SoC. 47 + "socionext,uniphier-ld20-sd-reset" - for LD20 SoC. 48 48 - #reset-cells: should be 1. 49 49 50 50 Example: 51 51 52 52 mioctrl@59810000 { 53 - compatible = "socionext,uniphier-ld20-mioctrl", 53 + compatible = "socionext,uniphier-ld11-mioctrl", 54 54 "simple-mfd", "syscon"; 55 55 reg = <0x59810000 0x800>; 56 56 57 57 reset { 58 - compatible = "socionext,uniphier-ld20-mio-reset"; 58 + compatible = "socionext,uniphier-ld11-mio-reset"; 59 59 #reset-cells = <1>; 60 60 }; 61 61 ··· 68 68 69 69 Required properties: 70 70 - compatible: should be one of the following: 71 - "socionext,uniphier-ld4-peri-reset" - for PH1-LD4 SoC. 72 - "socionext,uniphier-pro4-peri-reset" - for PH1-Pro4 SoC. 73 - "socionext,uniphier-sld8-peri-reset" - for PH1-sLD8 SoC. 74 - "socionext,uniphier-pro5-peri-reset" - for PH1-Pro5 SoC. 75 - "socionext,uniphier-pxs2-peri-reset" - for ProXstream2/PH1-LD6b SoC. 76 - "socionext,uniphier-ld11-peri-reset" - for PH1-LD11 SoC. 77 - "socionext,uniphier-ld20-peri-reset" - for PH1-LD20 SoC. 71 + "socionext,uniphier-ld4-peri-reset" - for LD4 SoC. 72 + "socionext,uniphier-pro4-peri-reset" - for Pro4 SoC. 73 + "socionext,uniphier-sld8-peri-reset" - for sLD8 SoC. 74 + "socionext,uniphier-pro5-peri-reset" - for Pro5 SoC. 75 + "socionext,uniphier-pxs2-peri-reset" - for PXs2/LD6b SoC. 76 + "socionext,uniphier-ld11-peri-reset" - for LD11 SoC. 77 + "socionext,uniphier-ld20-peri-reset" - for LD20 SoC. 78 78 - #reset-cells: should be 1. 79 79 80 80 Example: 81 81 82 82 perictrl@59820000 { 83 - compatible = "socionext,uniphier-ld20-perictrl", 83 + compatible = "socionext,uniphier-ld11-perictrl", 84 84 "simple-mfd", "syscon"; 85 85 reg = <0x59820000 0x200>; 86 86 87 87 reset { 88 - compatible = "socionext,uniphier-ld20-peri-reset"; 88 + compatible = "socionext,uniphier-ld11-peri-reset"; 89 89 #reset-cells = <1>; 90 90 }; 91 91
+3 -1
Documentation/devicetree/bindings/serial/cdns,uart.txt
··· 1 1 Binding for Cadence UART Controller 2 2 3 3 Required properties: 4 - - compatible : should be "cdns,uart-r1p8", or "xlnx,xuartps" 4 + - compatible : 5 + Use "xlnx,xuartps","cdns,uart-r1p8" for Zynq-7xxx SoC. 6 + Use "xlnx,zynqmp-uart","cdns,uart-r1p12" for Zynq Ultrascale+ MPSoC. 5 7 - reg: Should contain UART controller registers location and length. 6 8 - interrupts: Should contain UART controller interrupts. 7 9 - clocks: Must contain phandles to the UART clocks
+8
Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
··· 9 9 - "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART. 10 10 - "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART. 11 11 - "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART. 12 + - "renesas,scif-r8a7743" for R8A7743 (RZ/G1M) SCIF compatible UART. 13 + - "renesas,scifa-r8a7743" for R8A7743 (RZ/G1M) SCIFA compatible UART. 14 + - "renesas,scifb-r8a7743" for R8A7743 (RZ/G1M) SCIFB compatible UART. 15 + - "renesas,hscif-r8a7743" for R8A7743 (RZ/G1M) HSCIF compatible UART. 16 + - "renesas,scif-r8a7745" for R8A7745 (RZ/G1E) SCIF compatible UART. 17 + - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART. 18 + - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART. 19 + - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART. 12 20 - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART. 13 21 - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART. 14 22 - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
+1 -4
Documentation/devicetree/bindings/usb/dwc2.txt
··· 28 28 - g-use-dma: enable dma usage in gadget driver. 29 29 - g-rx-fifo-size: size of rx fifo size in gadget mode. 30 30 - g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode. 31 - 32 - Deprecated properties: 33 - - g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) 34 - in gadget mode. 31 + - g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode. 35 32 36 33 Example: 37 34
+7 -4
Documentation/gpio/board.txt
··· 6 6 description of the deprecated integer-based GPIO interface please refer to 7 7 gpio-legacy.txt (actually, there is no real mapping possible with the old 8 8 interface; you just fetch an integer from somewhere and request the 9 - corresponding GPIO. 9 + corresponding GPIO). 10 10 11 11 All platforms can enable the GPIO library, but if the platform strictly 12 12 requires GPIO functionality to be present, it needs to select GPIOLIB from its ··· 162 162 163 163 Since the "led" GPIOs are mapped as active-high, this example will switch their 164 164 signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped 165 - as active-low, its actual signal will be 0 after this code. Contrary to the legacy 166 - integer GPIO interface, the active-low property is handled during mapping and is 167 - thus transparent to GPIO consumers. 165 + as active-low, its actual signal will be 0 after this code. Contrary to the 166 + legacy integer GPIO interface, the active-low property is handled during 167 + mapping and is thus transparent to GPIO consumers. 168 + 169 + A set of functions such as gpiod_set_value() is available to work with 170 + the new descriptor-oriented interface.
+8
MAINTAINERS
··· 1442 1442 F: arch/arm/configs/mvebu_*_defconfig 1443 1443 1444 1444 ARM/Marvell Berlin SoC support 1445 + M: Jisheng Zhang <jszhang@marvell.com> 1445 1446 M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 1446 1447 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1447 1448 S: Maintained ··· 5288 5287 S: Maintained 5289 5288 F: scripts/get_maintainer.pl 5290 5289 5290 + GENWQE (IBM Generic Workqueue Card) 5291 + M: Frank Haverkamp <haver@linux.vnet.ibm.com> 5292 + M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com> 5293 + S: Supported 5294 + F: drivers/misc/genwqe/ 5295 + 5291 5296 GFS2 FILE SYSTEM 5292 5297 M: Steven Whitehouse <swhiteho@redhat.com> 5293 5298 M: Bob Peterson <rpeterso@redhat.com> ··· 8107 8100 F: drivers/media/dvb-frontends/mn88473* 8108 8101 8109 8102 MODULE SUPPORT 8103 + M: Jessica Yu <jeyu@redhat.com> 8110 8104 M: Rusty Russell <rusty@rustcorp.com.au> 8111 8105 S: Maintained 8112 8106 F: include/linux/module.h
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 9 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc2 4 + EXTRAVERSION = -rc3 5 5 NAME = Psychotic Stoned Sheep 6 6 7 7 # *DOCUMENTATION*
+11 -16
arch/arc/Kconfig
··· 41 41 select PERF_USE_VMALLOC 42 42 select HAVE_DEBUG_STACKOVERFLOW 43 43 select HAVE_GENERIC_DMA_COHERENT 44 + select HAVE_KERNEL_GZIP 45 + select HAVE_KERNEL_LZMA 44 46 45 47 config MIGHT_HAVE_PCI 46 48 bool ··· 188 186 config ARC_HAS_COH_CACHES 189 187 def_bool n 190 188 191 - config ARC_MCIP 192 - bool "ARConnect Multicore IP (MCIP) Support " 193 - depends on ISA_ARCV2 194 - help 195 - This IP block enables SMP in ARC-HS38 cores. 196 - It provides for cross-core interrupts, multi-core debug 197 - hardware semaphores, shared memory,.... 198 - 199 189 config NR_CPUS 200 190 int "Maximum number of CPUs (2-4096)" 201 191 range 2 4096 ··· 204 210 entry point and spin wait for Master's signal. 205 211 206 212 endif #SMP 213 + 214 + config ARC_MCIP 215 + bool "ARConnect Multicore IP (MCIP) Support " 216 + depends on ISA_ARCV2 217 + default y if SMP 218 + help 219 + This IP block enables SMP in ARC-HS38 cores. 220 + It provides for cross-core interrupts, multi-core debug 221 + hardware semaphores, shared memory,.... 207 222 208 223 menuconfig ARC_CACHE 209 224 bool "Enable Cache Support" ··· 539 536 config ARC_DBG_TLB_PARANOIA 540 537 bool "Paranoia Checks in Low Level TLB Handlers" 541 538 default n 542 - 543 - config ARC_DBG_TLB_MISS_COUNT 544 - bool "Profile TLB Misses" 545 - default n 546 - select DEBUG_FS 547 - help 548 - Counts number of I and D TLB Misses and exports them via Debugfs 549 - The counters can be cleared via Debugfs as well 550 539 551 540 endif 552 541
-3
arch/arc/Makefile
··· 50 50 51 51 cflags-$(atleast_gcc44) += -fsection-anchors 52 52 53 - cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock 54 - cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape 55 - 56 53 ifdef CONFIG_ISA_ARCV2 57 54 58 55 ifndef CONFIG_ARC_HAS_LL64
+14 -2
arch/arc/boot/Makefile
··· 14 14 15 15 suffix-y := bin 16 16 suffix-$(CONFIG_KERNEL_GZIP) := gz 17 + suffix-$(CONFIG_KERNEL_LZMA) := lzma 17 18 18 - targets += uImage uImage.bin uImage.gz 19 - extra-y += vmlinux.bin vmlinux.bin.gz 19 + targets += uImage 20 + targets += uImage.bin 21 + targets += uImage.gz 22 + targets += uImage.lzma 23 + extra-y += vmlinux.bin 24 + extra-y += vmlinux.bin.gz 25 + extra-y += vmlinux.bin.lzma 20 26 21 27 $(obj)/vmlinux.bin: vmlinux FORCE 22 28 $(call if_changed,objcopy) ··· 30 24 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 31 25 $(call if_changed,gzip) 32 26 27 + $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE 28 + $(call if_changed,lzma) 29 + 33 30 $(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE 34 31 $(call if_changed,uimage,none) 35 32 36 33 $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE 37 34 $(call if_changed,uimage,gzip) 35 + 36 + $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE 37 + $(call if_changed,uimage,lzma) 38 38 39 39 $(obj)/uImage: $(obj)/uImage.$(suffix-y) 40 40 @ln -sf $(notdir $<) $@
+2 -1
arch/arc/include/asm/arcregs.h
··· 349 349 struct cpuinfo_arc_bpu bpu; 350 350 struct bcr_identity core; 351 351 struct bcr_isa isa; 352 + const char *details, *name; 352 353 unsigned int vec_base; 353 354 struct cpuinfo_arc_ccm iccm, dccm; 354 355 struct { 355 - unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3, 356 + unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, 356 357 fpu_sp:1, fpu_dp:1, pad2:6, 357 358 debug:1, ap:1, smart:1, rtt:1, pad3:4, 358 359 timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
+1 -1
arch/arc/include/asm/cache.h
··· 53 53 extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); 54 54 extern void read_decode_cache_bcr(void); 55 55 56 - extern int ioc_exists; 56 + extern int ioc_enable; 57 57 extern unsigned long perip_base, perip_end; 58 58 59 59 #endif /* !__ASSEMBLY__ */
+1 -1
arch/arc/include/asm/elf.h
··· 54 54 * the loader. We need to make sure that it is out of the way of the program 55 55 * that it will "exec", and that there is sufficient room for the brk. 56 56 */ 57 - #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) 57 + #define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3) 58 58 59 59 /* 60 60 * When the program starts, a1 contains a pointer to a function to be
+16
arch/arc/include/asm/mcip.h
··· 55 55 #define IDU_M_DISTRI_DEST 0x2 56 56 }; 57 57 58 + struct mcip_bcr { 59 + #ifdef CONFIG_CPU_BIG_ENDIAN 60 + unsigned int pad3:8, 61 + idu:1, llm:1, num_cores:6, 62 + iocoh:1, gfrc:1, dbg:1, pad2:1, 63 + msg:1, sem:1, ipi:1, pad:1, 64 + ver:8; 65 + #else 66 + unsigned int ver:8, 67 + pad:1, ipi:1, sem:1, msg:1, 68 + pad2:1, dbg:1, gfrc:1, iocoh:1, 69 + num_cores:6, llm:1, idu:1, 70 + pad3:8; 71 + #endif 72 + }; 73 + 58 74 /* 59 75 * MCIP programming model 60 76 *
+1
arch/arc/include/asm/module.h
··· 18 18 struct mod_arch_specific { 19 19 void *unw_info; 20 20 int unw_sec_idx; 21 + const char *secstr; 21 22 }; 22 23 #endif 23 24
+1 -5
arch/arc/include/asm/setup.h
··· 27 27 const char *str; 28 28 }; 29 29 30 - struct cpuinfo_data { 31 - struct id_to_str info; 32 - int up_range; 33 - }; 34 - 35 30 extern int root_mountflags, end_mem; 36 31 37 32 void setup_processor(void); ··· 38 43 #define IS_USED_RUN(v) ((v) ? "" : "(not used) ") 39 44 #define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg)) 40 45 #define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg)) 46 + #define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2)) 41 47 42 48 #endif /* __ASMARC_SETUP_H */
+1
arch/arc/include/asm/syscalls.h
··· 17 17 int sys_cacheflush(uint32_t, uint32_t uint32_t); 18 18 int sys_arc_settls(void *); 19 19 int sys_arc_gettls(void); 20 + int sys_arc_usr_cmpxchg(int *, int, int); 20 21 21 22 #include <asm-generic/syscalls.h> 22 23
+5 -4
arch/arc/include/uapi/asm/unistd.h
··· 27 27 28 28 #define NR_syscalls __NR_syscalls 29 29 30 + /* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */ 31 + #define __NR_sysfs (__NR_arch_specific_syscall + 3) 32 + 30 33 /* ARC specific syscall */ 31 34 #define __NR_cacheflush (__NR_arch_specific_syscall + 0) 32 35 #define __NR_arc_settls (__NR_arch_specific_syscall + 1) 33 36 #define __NR_arc_gettls (__NR_arch_specific_syscall + 2) 37 + #define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4) 34 38 35 39 __SYSCALL(__NR_cacheflush, sys_cacheflush) 36 40 __SYSCALL(__NR_arc_settls, sys_arc_settls) 37 41 __SYSCALL(__NR_arc_gettls, sys_arc_gettls) 38 - 39 - 40 - /* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */ 41 - #define __NR_sysfs (__NR_arch_specific_syscall + 3) 42 + __SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg) 42 43 __SYSCALL(__NR_sysfs, sys_sysfs) 43 44 44 45 #undef __SYSCALL
+11 -20
arch/arc/kernel/mcip.c
··· 15 15 #include <asm/mcip.h> 16 16 #include <asm/setup.h> 17 17 18 - static char smp_cpuinfo_buf[128]; 19 - static int idu_detected; 20 - 21 18 static DEFINE_RAW_SPINLOCK(mcip_lock); 19 + 20 + #ifdef CONFIG_SMP 21 + 22 + static char smp_cpuinfo_buf[128]; 22 23 23 24 static void mcip_setup_per_cpu(int cpu) 24 25 { ··· 87 86 88 87 static void mcip_probe_n_setup(void) 89 88 { 90 - struct mcip_bcr { 91 - #ifdef CONFIG_CPU_BIG_ENDIAN 92 - unsigned int pad3:8, 93 - idu:1, llm:1, num_cores:6, 94 - iocoh:1, gfrc:1, dbg:1, pad2:1, 95 - msg:1, sem:1, ipi:1, pad:1, 96 - ver:8; 97 - #else 98 - unsigned int ver:8, 99 - pad:1, ipi:1, sem:1, msg:1, 100 - pad2:1, dbg:1, gfrc:1, iocoh:1, 101 - num_cores:6, llm:1, idu:1, 102 - pad3:8; 103 - #endif 104 - } mp; 89 + struct mcip_bcr mp; 105 90 106 91 READ_BCR(ARC_REG_MCIP_BCR, mp); 107 92 ··· 101 114 IS_AVAIL1(mp.gfrc, "GFRC")); 102 115 103 116 cpuinfo_arc700[0].extn.gfrc = mp.gfrc; 104 - idu_detected = mp.idu; 105 117 106 118 if (mp.dbg) { 107 119 __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); ··· 115 129 .ipi_send = mcip_ipi_send, 116 130 .ipi_clear = mcip_ipi_clear, 117 131 }; 132 + 133 + #endif 118 134 119 135 /*************************************************************************** 120 136 * ARCv2 Interrupt Distribution Unit (IDU) ··· 283 295 /* Read IDU BCR to confirm nr_irqs */ 284 296 int nr_irqs = of_irq_count(intc); 285 297 int i, irq; 298 + struct mcip_bcr mp; 286 299 287 - if (!idu_detected) 300 + READ_BCR(ARC_REG_MCIP_BCR, mp); 301 + 302 + if (!mp.idu) 288 303 panic("IDU not detected, but DeviceTree using it"); 289 304 290 305 pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
+28 -23
arch/arc/kernel/module.c
··· 30 30 char *secstr, struct module *mod) 31 31 { 32 32 #ifdef CONFIG_ARC_DW2_UNWIND 33 - int i; 34 - 35 33 mod->arch.unw_sec_idx = 0; 36 34 mod->arch.unw_info = NULL; 37 - 38 - for (i = 1; i < hdr->e_shnum; i++) { 39 - if (strcmp(secstr+sechdrs[i].sh_name, ".eh_frame") == 0) { 40 - mod->arch.unw_sec_idx = i; 41 - break; 42 - } 43 - } 35 + mod->arch.secstr = secstr; 44 36 #endif 45 37 return 0; 46 38 } ··· 51 59 unsigned int relsec, /* sec index for relo sec */ 52 60 struct module *module) 53 61 { 54 - int i, n; 62 + int i, n, relo_type; 55 63 Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr; 56 64 Elf32_Sym *sym_entry, *sym_sec; 57 - Elf32_Addr relocation; 58 - Elf32_Addr location; 59 - Elf32_Addr sec_to_patch; 60 - int relo_type; 65 + Elf32_Addr relocation, location, tgt_addr; 66 + unsigned int tgtsec; 61 67 62 - sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr; 68 + /* 69 + * @relsec has relocations e.g. .rela.init.text 70 + * @tgtsec is section to patch e.g. .init.text 71 + */ 72 + tgtsec = sechdrs[relsec].sh_info; 73 + tgt_addr = sechdrs[tgtsec].sh_addr; 63 74 sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr; 64 75 n = sechdrs[relsec].sh_size / sizeof(*rel_entry); 65 76 66 - pr_debug("\n========== Module Sym reloc ===========================\n"); 67 - pr_debug("Section to fixup %x\n", sec_to_patch); 77 + pr_debug("\nSection to fixup %s @%x\n", 78 + module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr); 68 79 pr_debug("=========================================================\n"); 69 - pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n"); 80 + pr_debug("r_off\tr_add\tst_value ADDRESS VALUE\n"); 70 81 pr_debug("=========================================================\n"); 71 82 72 83 /* Loop thru entries in relocation section */ 73 84 for (i = 0; i < n; i++) { 85 + const char *s; 74 86 75 87 /* This is where to make the change */ 76 - location = sec_to_patch + rel_entry[i].r_offset; 88 + location = tgt_addr + rel_entry[i].r_offset; 77 89 78 90 /* This is the symbol it is referring to. Note that all 79 91 undefined symbols have been resolved. */ ··· 85 89 86 90 relocation = sym_entry->st_value + rel_entry[i].r_addend; 87 91 88 - pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n", 89 - rel_entry[i].r_offset, rel_entry[i].r_addend, 90 - sym_entry->st_value, location, relocation, 91 - strtab + sym_entry->st_name); 92 + if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) { 93 + s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name; 94 + } else { 95 + s = strtab + sym_entry->st_name; 96 + } 97 + 98 + pr_debug(" %x\t%x\t%x %x %x [%s]\n", 99 + rel_entry[i].r_offset, rel_entry[i].r_addend, 100 + sym_entry->st_value, location, relocation, s); 92 101 93 102 /* This assumes modules are built with -mlong-calls 94 103 * so any branches/jumps are absolute 32 bit jmps ··· 112 111 goto relo_err; 113 112 114 113 } 114 + 115 + if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0) 116 + module->arch.unw_sec_idx = tgtsec; 117 + 115 118 return 0; 116 119 117 120 relo_err:
+33
arch/arc/kernel/process.c
··· 41 41 return task_thread_info(current)->thr_ptr; 42 42 } 43 43 44 + SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) 45 + { 46 + int uval; 47 + int ret; 48 + 49 + /* 50 + * This is only for old cores lacking LLOCK/SCOND, which by defintion 51 + * can't possibly be SMP. Thus doesn't need to be SMP safe. 52 + * And this also helps reduce the overhead for serializing in 53 + * the UP case 54 + */ 55 + WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP)); 56 + 57 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 58 + return -EFAULT; 59 + 60 + preempt_disable(); 61 + 62 + ret = __get_user(uval, uaddr); 63 + if (ret) 64 + goto done; 65 + 66 + if (uval != expected) 67 + ret = -EAGAIN; 68 + else 69 + ret = __put_user(new, uaddr); 70 + 71 + done: 72 + preempt_enable(); 73 + 74 + return ret; 75 + } 76 + 44 77 void arch_cpu_idle(void) 45 78 { 46 79 /* sleep, but enable all interrupts before committing */
+64 -51
arch/arc/kernel/setup.c
··· 40 40 41 41 struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; 42 42 43 + static const struct id_to_str arc_cpu_rel[] = { 44 + #ifdef CONFIG_ISA_ARCOMPACT 45 + { 0x34, "R4.10"}, 46 + { 0x35, "R4.11"}, 47 + #else 48 + { 0x51, "R2.0" }, 49 + { 0x52, "R2.1" }, 50 + { 0x53, "R3.0" }, 51 + #endif 52 + { 0x00, NULL } 53 + }; 54 + 55 + static const struct id_to_str arc_cpu_nm[] = { 56 + #ifdef CONFIG_ISA_ARCOMPACT 57 + { 0x20, "ARC 600" }, 58 + { 0x30, "ARC 770" }, /* 750 identified seperately */ 59 + #else 60 + { 0x40, "ARC EM" }, 61 + { 0x50, "ARC HS38" }, 62 + #endif 63 + { 0x00, "Unknown" } 64 + }; 65 + 43 66 static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu) 44 67 { 45 68 if (is_isa_arcompact()) { ··· 115 92 struct bcr_timer timer; 116 93 struct bcr_generic bcr; 117 94 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 95 + const struct id_to_str *tbl; 96 + 118 97 FIX_PTR(cpu); 119 98 120 99 READ_BCR(AUX_IDENTITY, cpu->core); 121 100 READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); 101 + 102 + for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) { 103 + if (cpu->core.family == tbl->id) { 104 + cpu->details = tbl->str; 105 + break; 106 + } 107 + } 108 + 109 + for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) { 110 + if ((cpu->core.family & 0xF0) == tbl->id) 111 + break; 112 + } 113 + cpu->name = tbl->str; 122 114 123 115 READ_BCR(ARC_REG_TIMERS_BCR, timer); 124 116 cpu->extn.timer0 = timer.t0; ··· 149 111 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */ 150 112 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0; 151 113 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */ 114 + cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 : 115 + IS_ENABLED(CONFIG_ARC_HAS_SWAPE); 116 + 152 117 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem); 153 118 154 119 /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */ ··· 201 160 cpu->extn.rtt = bcr.ver ? 1 : 0; 202 161 203 162 cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; 163 + 164 + /* some hacks for lack of feature BCR info in old ARC700 cores */ 165 + if (is_isa_arcompact()) { 166 + if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */ 167 + cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC); 168 + else 169 + cpu->isa.atomic = cpu->isa.atomic1; 170 + 171 + cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); 172 + 173 + /* there's no direct way to distinguish 750 vs. 770 */ 174 + if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3)) 175 + cpu->name = "ARC750"; 176 + } 204 177 } 205 - 206 - static const struct cpuinfo_data arc_cpu_tbl[] = { 207 - #ifdef CONFIG_ISA_ARCOMPACT 208 - { {0x20, "ARC 600" }, 0x2F}, 209 - { {0x30, "ARC 700" }, 0x33}, 210 - { {0x34, "ARC 700 R4.10"}, 0x34}, 211 - { {0x35, "ARC 700 R4.11"}, 0x35}, 212 - #else 213 - { {0x50, "ARC HS38 R2.0"}, 0x51}, 214 - { {0x52, "ARC HS38 R2.1"}, 0x52}, 215 - { {0x53, "ARC HS38 R3.0"}, 0x53}, 216 - #endif 217 - { {0x00, NULL } } 218 - }; 219 - 220 178 221 179 static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) 222 180 { 223 181 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; 224 182 struct bcr_identity *core = &cpu->core; 225 - const struct cpuinfo_data *tbl; 226 - char *isa_nm; 227 - int i, be, atomic; 228 - int n = 0; 183 + int i, n = 0; 229 184 230 185 FIX_PTR(cpu); 231 - 232 - if (is_isa_arcompact()) { 233 - isa_nm = "ARCompact"; 234 - be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); 235 - 236 - atomic = cpu->isa.atomic1; 237 - if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */ 238 - atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC); 239 - } else { 240 - isa_nm = "ARCv2"; 241 - be = cpu->isa.be; 242 - atomic = cpu->isa.atomic; 243 - } 244 186 245 187 n += scnprintf(buf + n, len - n, 246 188 "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n", 247 189 core->family, core->cpu_id, core->chip_id); 248 190 249 - for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) { 250 - if ((core->family >= tbl->info.id) && 251 - (core->family <= tbl->up_range)) { 252 - n += scnprintf(buf + n, len - n, 253 - "processor [%d]\t: %s (%s ISA) %s\n", 254 - cpu_id, tbl->info.str, isa_nm, 255 - IS_AVAIL1(be, "[Big-Endian]")); 256 - break; 257 - } 258 - } 259 - 260 - if (tbl->info.id == 0) 261 - n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n"); 191 + n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n", 192 + cpu_id, cpu->name, cpu->details, 193 + is_isa_arcompact() ? "ARCompact" : "ARCv2", 194 + IS_AVAIL1(cpu->isa.be, "[Big-Endian]")); 262 195 263 196 n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ", 264 197 IS_AVAIL1(cpu->extn.timer0, "Timer0 "), ··· 241 226 CONFIG_ARC_HAS_RTC)); 242 227 243 228 n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s", 244 - IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC), 229 + IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC), 245 230 IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64), 246 231 IS_AVAIL1(cpu->isa.unalign, "unalign (not used)")); 247 232 ··· 268 253 IS_AVAIL1(cpu->extn.swap, "swap "), 269 254 IS_AVAIL1(cpu->extn.minmax, "minmax "), 270 255 IS_AVAIL1(cpu->extn.crc, "crc "), 271 - IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE)); 256 + IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE)); 272 257 273 258 if (cpu->bpu.ver) 274 259 n += scnprintf(buf + n, len - n, ··· 287 272 288 273 FIX_PTR(cpu); 289 274 290 - n += scnprintf(buf + n, len - n, 291 - "Vector Table\t: %#x\nPeripherals\t: %#lx:%#lx\n", 292 - cpu->vec_base, perip_base, perip_end); 275 + n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base); 293 276 294 277 if (cpu->extn.fpu_sp || cpu->extn.fpu_dp) 295 278 n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n", ··· 520 507 * way to pass it w/o having to kmalloc/free a 2 byte string. 521 508 * Encode cpu-id as 0xFFcccc, which is decoded by show routine. 522 509 */ 523 - return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL; 510 + return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; 524 511 } 525 512 526 513 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-110
arch/arc/kernel/troubleshoot.c
··· 237 237 if (!user_mode(regs)) 238 238 show_stacktrace(current, regs); 239 239 } 240 - 241 - #ifdef CONFIG_DEBUG_FS 242 - 243 - #include <linux/module.h> 244 - #include <linux/fs.h> 245 - #include <linux/mount.h> 246 - #include <linux/pagemap.h> 247 - #include <linux/init.h> 248 - #include <linux/namei.h> 249 - #include <linux/debugfs.h> 250 - 251 - static struct dentry *test_dentry; 252 - static struct dentry *test_dir; 253 - static struct dentry *test_u32_dentry; 254 - 255 - static u32 clr_on_read = 1; 256 - 257 - #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 258 - u32 numitlb, numdtlb, num_pte_not_present; 259 - 260 - static int fill_display_data(char *kbuf) 261 - { 262 - size_t num = 0; 263 - num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb); 264 - num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb); 265 - num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present); 266 - 267 - if (clr_on_read) 268 - numitlb = numdtlb = num_pte_not_present = 0; 269 - 270 - return num; 271 - } 272 - 273 - static int tlb_stats_open(struct inode *inode, struct file *file) 274 - { 275 - file->private_data = (void *)__get_free_page(GFP_KERNEL); 276 - return 0; 277 - } 278 - 279 - /* called on user read(): display the counters */ 280 - static ssize_t tlb_stats_output(struct file *file, /* file descriptor */ 281 - char __user *user_buf, /* user buffer */ 282 - size_t len, /* length of buffer */ 283 - loff_t *offset) /* offset in the file */ 284 - { 285 - size_t num; 286 - char *kbuf = (char *)file->private_data; 287 - 288 - /* All of the data can he shoved in one iteration */ 289 - if (*offset != 0) 290 - return 0; 291 - 292 - num = fill_display_data(kbuf); 293 - 294 - /* simple_read_from_buffer() is helper for copy to user space 295 - It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset 296 - @3 (offset) into the user space address starting at @1 (user_buf). 297 - @5 (len) is max size of user buffer 298 - */ 299 - return simple_read_from_buffer(user_buf, num, offset, kbuf, len); 300 - } 301 - 302 - /* called on user write : clears the counters */ 303 - static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf, 304 - size_t length, loff_t *offset) 305 - { 306 - numitlb = numdtlb = num_pte_not_present = 0; 307 - return length; 308 - } 309 - 310 - static int tlb_stats_close(struct inode *inode, struct file *file) 311 - { 312 - free_page((unsigned long)(file->private_data)); 313 - return 0; 314 - } 315 - 316 - static const struct file_operations tlb_stats_file_ops = { 317 - .read = tlb_stats_output, 318 - .write = tlb_stats_clear, 319 - .open = tlb_stats_open, 320 - .release = tlb_stats_close 321 - }; 322 - #endif 323 - 324 - static int __init arc_debugfs_init(void) 325 - { 326 - test_dir = debugfs_create_dir("arc", NULL); 327 - 328 - #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 329 - test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL, 330 - &tlb_stats_file_ops); 331 - #endif 332 - 333 - test_u32_dentry = 334 - debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read); 335 - 336 - return 0; 337 - } 338 - 339 - module_init(arc_debugfs_init); 340 - 341 - static void __exit arc_debugfs_exit(void) 342 - { 343 - debugfs_remove(test_u32_dentry); 344 - debugfs_remove(test_dentry); 345 - debugfs_remove(test_dir); 346 - } 347 - module_exit(arc_debugfs_exit); 348 - 349 - #endif
+9 -10
arch/arc/mm/cache.c
··· 22 22 #include <asm/setup.h> 23 23 24 24 static int l2_line_sz; 25 - int ioc_exists; 26 - volatile int slc_enable = 1, ioc_enable = 1; 25 + static int ioc_exists; 26 + int slc_enable = 1, ioc_enable = 1; 27 27 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ 28 28 unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ 29 29 ··· 53 53 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); 54 54 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); 55 55 56 - if (!is_isa_arcv2()) 57 - return buf; 58 - 59 56 p = &cpuinfo_arc700[c].slc; 60 57 if (p->ver) 61 58 n += scnprintf(buf + n, len - n, 62 59 "SLC\t\t: %uK, %uB Line%s\n", 63 60 p->sz_k, p->line_len, IS_USED_RUN(slc_enable)); 64 61 65 - if (ioc_exists) 66 - n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n", 67 - IS_DISABLED_RUN(ioc_enable)); 62 + n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", 63 + perip_base, 64 + IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency ")); 68 65 69 66 return buf; 70 67 } ··· 110 113 } 111 114 112 115 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); 113 - if (cbcr.c && ioc_enable) 116 + if (cbcr.c) 114 117 ioc_exists = 1; 118 + else 119 + ioc_enable = 0; 115 120 116 121 /* HS 2.0 didn't have AUX_VOL */ 117 122 if (cpuinfo_arc700[cpu].core.family > 0x51) { ··· 1001 1002 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE); 1002 1003 } 1003 1004 1004 - if (is_isa_arcv2() && ioc_exists) { 1005 + if (is_isa_arcv2() && ioc_enable) { 1005 1006 /* IO coherency base - 0x8z */ 1006 1007 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); 1007 1008 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
+2 -2
arch/arc/mm/dma.c
··· 45 45 * -For coherent data, Read/Write to buffers terminate early in cache 46 46 * (vs. always going to memory - thus are faster) 47 47 */ 48 - if ((is_isa_arcv2() && ioc_exists) || 48 + if ((is_isa_arcv2() && ioc_enable) || 49 49 (attrs & DMA_ATTR_NON_CONSISTENT)) 50 50 need_coh = 0; 51 51 ··· 97 97 int is_non_coh = 1; 98 98 99 99 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || 100 - (is_isa_arcv2() && ioc_exists); 100 + (is_isa_arcv2() && ioc_enable); 101 101 102 102 if (PageHighMem(page) || !is_non_coh) 103 103 iounmap((void __force __iomem *)vaddr);
+3 -3
arch/arc/mm/tlb.c
··· 793 793 char super_pg[64] = ""; 794 794 795 795 if (p_mmu->s_pg_sz_m) 796 - scnprintf(super_pg, 64, "%dM Super Page%s, ", 796 + scnprintf(super_pg, 64, "%dM Super Page %s", 797 797 p_mmu->s_pg_sz_m, 798 798 IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE)); 799 799 800 800 n += scnprintf(buf + n, len - n, 801 - "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n", 801 + "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n", 802 802 p_mmu->ver, p_mmu->pg_sz_k, super_pg, 803 803 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways, 804 804 p_mmu->u_dtlb, p_mmu->u_itlb, 805 - IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40)); 805 + IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40)); 806 806 807 807 return buf; 808 808 }
-21
arch/arc/mm/tlbex.S
··· 237 237 238 238 2: 239 239 240 - #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 241 - and.f 0, r0, _PAGE_PRESENT 242 - bz 1f 243 - ld r3, [num_pte_not_present] 244 - add r3, r3, 1 245 - st r3, [num_pte_not_present] 246 - 1: 247 - #endif 248 - 249 240 .endm 250 241 251 242 ;----------------------------------------------------------------- ··· 300 309 301 310 TLBMISS_FREEUP_REGS 302 311 303 - #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 304 - ld r0, [@numitlb] 305 - add r0, r0, 1 306 - st r0, [@numitlb] 307 - #endif 308 - 309 312 ;---------------------------------------------------------------- 310 313 ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA 311 314 LOAD_FAULT_PTE ··· 333 348 ENTRY(EV_TLBMissD) 334 349 335 350 TLBMISS_FREEUP_REGS 336 - 337 - #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 338 - ld r0, [@numdtlb] 339 - add r0, r0, 1 340 - st r0, [@numdtlb] 341 - #endif 342 351 343 352 ;---------------------------------------------------------------- 344 353 ; Get the PTE corresponding to V-addr accessed
+13 -2
arch/arm/boot/dts/ste-snowball.dts
··· 239 239 arm,primecell-periphid = <0x10480180>; 240 240 max-frequency = <100000000>; 241 241 bus-width = <4>; 242 + cap-sd-highspeed; 242 243 cap-mmc-highspeed; 244 + sd-uhs-sdr12; 245 + sd-uhs-sdr25; 246 + /* All direction control is used */ 247 + st,sig-dir-cmd; 248 + st,sig-dir-dat0; 249 + st,sig-dir-dat2; 250 + st,sig-dir-dat31; 251 + st,sig-pin-fbclk; 252 + full-pwr-cycle; 243 253 vmmc-supply = <&ab8500_ldo_aux3_reg>; 244 254 vqmmc-supply = <&vmmci>; 245 255 pinctrl-names = "default", "sleep"; 246 256 pinctrl-0 = <&sdi0_default_mode>; 247 257 pinctrl-1 = <&sdi0_sleep_mode>; 248 258 249 - cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>; // 218 259 + /* GPIO218 MMC_CD */ 260 + cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>; 250 261 251 262 status = "okay"; 252 263 }; ··· 560 549 /* VMMCI level-shifter enable */ 561 550 snowball_cfg3 { 562 551 pins = "GPIO217_AH12"; 563 - ste,config = <&gpio_out_lo>; 552 + ste,config = <&gpio_out_hi>; 564 553 }; 565 554 /* VMMCI level-shifter voltage select */ 566 555 snowball_cfg4 {
+2 -2
arch/arm/boot/dts/uniphier-pro5.dtsi
··· 184 184 }; 185 185 186 186 &mio_clk { 187 - compatible = "socionext,uniphier-pro5-mio-clock"; 187 + compatible = "socionext,uniphier-pro5-sd-clock"; 188 188 }; 189 189 190 190 &mio_rst { 191 - compatible = "socionext,uniphier-pro5-mio-reset"; 191 + compatible = "socionext,uniphier-pro5-sd-reset"; 192 192 }; 193 193 194 194 &peri_clk {
+2 -2
arch/arm/boot/dts/uniphier-pxs2.dtsi
··· 197 197 }; 198 198 199 199 &mio_clk { 200 - compatible = "socionext,uniphier-pxs2-mio-clock"; 200 + compatible = "socionext,uniphier-pxs2-sd-clock"; 201 201 }; 202 202 203 203 &mio_rst { 204 - compatible = "socionext,uniphier-pxs2-mio-reset"; 204 + compatible = "socionext,uniphier-pxs2-sd-reset"; 205 205 }; 206 206 207 207 &peri_clk {
+1 -1
arch/arm/boot/dts/vf500.dtsi
··· 70 70 global_timer: timer@40002200 { 71 71 compatible = "arm,cortex-a9-global-timer"; 72 72 reg = <0x40002200 0x20>; 73 - interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 73 + interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>; 74 74 interrupt-parent = <&intc>; 75 75 clocks = <&clks VF610_CLK_PLATFORM_BUS>; 76 76 };
+1
arch/arm/configs/multi_v7_defconfig
··· 850 850 CONFIG_PWM_TEGRA=y 851 851 CONFIG_PWM_VT8500=y 852 852 CONFIG_PHY_HIX5HD2_SATA=y 853 + CONFIG_E1000E=y 853 854 CONFIG_PWM_STI=y 854 855 CONFIG_PWM_BCM2835=y 855 856 CONFIG_PWM_BRCMSTB=m
+13 -4
arch/arm/mach-imx/gpc.c
··· 408 408 static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg) 409 409 { 410 410 struct clk *clk; 411 - int i; 411 + int i, ret; 412 412 413 413 imx6q_pu_domain.reg = pu_reg; 414 414 ··· 430 430 if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) 431 431 return 0; 432 432 433 - pm_genpd_init(&imx6q_pu_domain.base, NULL, false); 434 - return of_genpd_add_provider_onecell(dev->of_node, 435 - &imx_gpc_onecell_data); 433 + for (i = 0; i < ARRAY_SIZE(imx_gpc_domains); i++) 434 + pm_genpd_init(imx_gpc_domains[i], NULL, false); 436 435 436 + ret = of_genpd_add_provider_onecell(dev->of_node, 437 + &imx_gpc_onecell_data); 438 + if (ret) 439 + goto power_off; 440 + 441 + return 0; 442 + 443 + power_off: 444 + imx6q_pm_pu_power_off(&imx6q_pu_domain.base); 437 445 clk_err: 438 446 while (i--) 439 447 clk_put(imx6q_pu_domain.clk[i]); 448 + imx6q_pu_domain.reg = NULL; 440 449 return -EINVAL; 441 450 } 442 451
+1 -1
arch/arm/mach-imx/mach-imx6q.c
··· 173 173 ksz9021rn_phy_fixup); 174 174 phy_register_fixup_for_uid(PHY_ID_KSZ9031, MICREL_PHY_ID_MASK, 175 175 ksz9031rn_phy_fixup); 176 - phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff, 176 + phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef, 177 177 ar8031_phy_fixup); 178 178 phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef, 179 179 ar8035_phy_fixup);
+1 -3
arch/arm/mach-mvebu/Kconfig
··· 23 23 select CACHE_L2X0 24 24 select ARM_CPU_SUSPEND 25 25 select MACH_MVEBU_ANY 26 + select MVEBU_CLK_COREDIV 26 27 27 28 config MACH_ARMADA_370 28 29 bool "Marvell Armada 370 boards" ··· 33 32 select CPU_PJ4B 34 33 select MACH_MVEBU_V7 35 34 select PINCTRL_ARMADA_370 36 - select MVEBU_CLK_COREDIV 37 35 help 38 36 Say 'Y' here if you want your kernel to support boards based 39 37 on the Marvell Armada 370 SoC with device tree. ··· 50 50 select HAVE_SMP 51 51 select MACH_MVEBU_V7 52 52 select PINCTRL_ARMADA_375 53 - select MVEBU_CLK_COREDIV 54 53 help 55 54 Say 'Y' here if you want your kernel to support boards based 56 55 on the Marvell Armada 375 SoC with device tree. ··· 67 68 select HAVE_SMP 68 69 select MACH_MVEBU_V7 69 70 select PINCTRL_ARMADA_38X 70 - select MVEBU_CLK_COREDIV 71 71 help 72 72 Say 'Y' here if you want your kernel to support boards based 73 73 on the Marvell Armada 380/385 SoC with device tree.
+1
arch/arm/mach-uniphier/Kconfig
··· 1 1 config ARCH_UNIPHIER 2 2 bool "Socionext UniPhier SoCs" 3 3 depends on ARCH_MULTI_V7 4 + select ARCH_HAS_RESET_CONTROLLER 4 5 select ARM_AMBA 5 6 select ARM_GLOBAL_TIMER 6 7 select ARM_GIC
+1
arch/arm64/Kconfig.platforms
··· 190 190 191 191 config ARCH_UNIPHIER 192 192 bool "Socionext UniPhier SoC Family" 193 + select ARCH_HAS_RESET_CONTROLLER 193 194 select PINCTRL 194 195 help 195 196 This enables support for Socionext UniPhier SoC family.
+2
arch/arm64/boot/dts/broadcom/ns2-svk.dts
··· 164 164 nand-ecc-mode = "hw"; 165 165 nand-ecc-strength = <8>; 166 166 nand-ecc-step-size = <512>; 167 + nand-bus-width = <16>; 168 + brcm,nand-oob-sector-size = <16>; 167 169 #address-cells = <1>; 168 170 #size-cells = <1>; 169 171 };
+1
arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
··· 123 123 <1 14 0xf08>, /* Physical Non-Secure PPI */ 124 124 <1 11 0xf08>, /* Virtual PPI */ 125 125 <1 10 0xf08>; /* Hypervisor PPI */ 126 + fsl,erratum-a008585; 126 127 }; 127 128 128 129 pmu {
+1
arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
··· 195 195 <1 14 4>, /* Physical Non-Secure PPI, active-low */ 196 196 <1 11 4>, /* Virtual PPI, active-low */ 197 197 <1 10 4>; /* Hypervisor PPI, active-low */ 198 + fsl,erratum-a008585; 198 199 }; 199 200 200 201 pmu {
+1 -1
arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
··· 131 131 #address-cells = <0x1>; 132 132 #size-cells = <0x0>; 133 133 cell-index = <1>; 134 - clocks = <&cpm_syscon0 0 3>; 134 + clocks = <&cpm_syscon0 1 21>; 135 135 status = "disabled"; 136 136 }; 137 137
-3
arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
··· 116 116 cap-mmc-highspeed; 117 117 clock-frequency = <150000000>; 118 118 disable-wp; 119 - keep-power-in-suspend; 120 119 non-removable; 121 120 num-slots = <1>; 122 121 vmmc-supply = <&vcc_io>; ··· 257 258 }; 258 259 259 260 vcc_sd: SWITCH_REG1 { 260 - regulator-always-on; 261 - regulator-boot-on; 262 261 regulator-name = "vcc_sd"; 263 262 }; 264 263
-4
arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
··· 152 152 gpio = <&gpio3 11 GPIO_ACTIVE_LOW>; 153 153 regulator-min-microvolt = <1800000>; 154 154 regulator-max-microvolt = <3300000>; 155 - regulator-always-on; 156 - regulator-boot-on; 157 155 vin-supply = <&vcc_io>; 158 156 }; 159 157 ··· 199 201 bus-width = <8>; 200 202 cap-mmc-highspeed; 201 203 disable-wp; 202 - keep-power-in-suspend; 203 204 mmc-pwrseq = <&emmc_pwrseq>; 204 205 mmc-hs200-1_2v; 205 206 mmc-hs200-1_8v; ··· 347 350 clock-freq-min-max = <400000 50000000>; 348 351 cap-sd-highspeed; 349 352 card-detect-delay = <200>; 350 - keep-power-in-suspend; 351 353 num-slots = <1>; 352 354 pinctrl-names = "default"; 353 355 pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
+6 -6
arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
··· 257 257 reg = <0x59801000 0x400>; 258 258 }; 259 259 260 - mioctrl@59810000 { 261 - compatible = "socionext,uniphier-mioctrl", 260 + sdctrl@59810000 { 261 + compatible = "socionext,uniphier-ld20-sdctrl", 262 262 "simple-mfd", "syscon"; 263 263 reg = <0x59810000 0x800>; 264 264 265 - mio_clk: clock { 266 - compatible = "socionext,uniphier-ld20-mio-clock"; 265 + sd_clk: clock { 266 + compatible = "socionext,uniphier-ld20-sd-clock"; 267 267 #clock-cells = <1>; 268 268 }; 269 269 270 - mio_rst: reset { 271 - compatible = "socionext,uniphier-ld20-mio-reset"; 270 + sd_rst: reset { 271 + compatible = "socionext,uniphier-ld20-sd-reset"; 272 272 #reset-cells = <1>; 273 273 }; 274 274 };
+1 -1
arch/arm64/include/asm/memory.h
··· 217 217 #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 218 218 #else 219 219 #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) 220 - #define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) 220 + #define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) 221 221 222 222 #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) 223 223 #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
+6 -3
arch/arm64/mm/numa.c
··· 147 147 148 148 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) 149 149 { 150 - return node_distance(from, to); 150 + return node_distance(early_cpu_to_node(from), early_cpu_to_node(to)); 151 151 } 152 152 153 153 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, ··· 223 223 void *nd; 224 224 int tnid; 225 225 226 - pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", 227 - nid, start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1); 226 + if (start_pfn < end_pfn) 227 + pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid, 228 + start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1); 229 + else 230 + pr_info("Initmem setup node %d [<memory-less node>]\n", nid); 228 231 229 232 nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); 230 233 nd = __va(nd_pa);
+1 -1
arch/cris/arch-v32/drivers/cryptocop.c
··· 3149 3149 printk("print_dma_descriptors start\n"); 3150 3150 3151 3151 printk("iop:\n"); 3152 - printk("\tsid: 0x%lld\n", iop->sid); 3152 + printk("\tsid: 0x%llx\n", iop->sid); 3153 3153 3154 3154 printk("\tcdesc_out: 0x%p\n", iop->cdesc_out); 3155 3155 printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
-4
arch/h8300/include/asm/thread_info.h
··· 31 31 int cpu; /* cpu we're on */ 32 32 int preempt_count; /* 0 => preemptable, <0 => BUG */ 33 33 mm_segment_t addr_limit; 34 - struct restart_block restart_block; 35 34 }; 36 35 37 36 /* ··· 43 44 .cpu = 0, \ 44 45 .preempt_count = INIT_PREEMPT_COUNT, \ 45 46 .addr_limit = KERNEL_DS, \ 46 - .restart_block = { \ 47 - .fn = do_no_restart_syscall, \ 48 - }, \ 49 47 } 50 48 51 49 #define init_thread_info (init_thread_union.thread_info)
+1 -1
arch/h8300/kernel/signal.c
··· 79 79 unsigned int er0; 80 80 81 81 /* Always make any pending restarted system calls return -EINTR */ 82 - current_thread_info()->restart_block.fn = do_no_restart_syscall; 82 + current->restart_block.fn = do_no_restart_syscall; 83 83 84 84 /* restore passed registers */ 85 85 #define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
+1 -1
arch/powerpc/include/asm/cpuidle.h
··· 26 26 std r0,0(r1); \ 27 27 ptesync; \ 28 28 ld r0,0(r1); \ 29 - 1: cmp cr0,r0,r0; \ 29 + 1: cmpd cr0,r0,r0; \ 30 30 bne 1b; \ 31 31 IDLE_INST; \ 32 32 b .
+16
arch/powerpc/include/asm/exception-64s.h
··· 93 93 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 94 94 ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l; 95 95 96 + #define __LOAD_HANDLER(reg, label) \ 97 + ld reg,PACAKBASE(r13); \ 98 + ori reg,reg,(ABS_ADDR(label))@l; 99 + 96 100 /* Exception register prefixes */ 97 101 #define EXC_HV H 98 102 #define EXC_STD ··· 210 206 #define kvmppc_interrupt kvmppc_interrupt_hv 211 207 #else 212 208 #define kvmppc_interrupt kvmppc_interrupt_pr 209 + #endif 210 + 211 + #ifdef CONFIG_RELOCATABLE 212 + #define BRANCH_TO_COMMON(reg, label) \ 213 + __LOAD_HANDLER(reg, label); \ 214 + mtctr reg; \ 215 + bctr 216 + 217 + #else 218 + #define BRANCH_TO_COMMON(reg, label) \ 219 + b label 220 + 213 221 #endif 214 222 215 223 #define __KVM_HANDLER_PROLOG(area, n) \
+12
arch/powerpc/include/asm/tlb.h
··· 52 52 return cpumask_subset(mm_cpumask(mm), 53 53 topology_sibling_cpumask(smp_processor_id())); 54 54 } 55 + 56 + static inline int mm_is_thread_local(struct mm_struct *mm) 57 + { 58 + return cpumask_equal(mm_cpumask(mm), 59 + cpumask_of(smp_processor_id())); 60 + } 61 + 55 62 #else 56 63 static inline int mm_is_core_local(struct mm_struct *mm) 64 + { 65 + return 1; 66 + } 67 + 68 + static inline int mm_is_thread_local(struct mm_struct *mm) 57 69 { 58 70 return 1; 59 71 }
+29 -21
arch/powerpc/kernel/exceptions-64s.S
··· 95 95 /* No virt vectors corresponding with 0x0..0x100 */ 96 96 EXC_VIRT_NONE(0x4000, 0x4100) 97 97 98 + 99 + #ifdef CONFIG_PPC_P7_NAP 100 + /* 101 + * If running native on arch 2.06 or later, check if we are waking up 102 + * from nap/sleep/winkle, and branch to idle handler. 103 + */ 104 + #define IDLETEST(n) \ 105 + BEGIN_FTR_SECTION ; \ 106 + mfspr r10,SPRN_SRR1 ; \ 107 + rlwinm. r10,r10,47-31,30,31 ; \ 108 + beq- 1f ; \ 109 + cmpwi cr3,r10,2 ; \ 110 + BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \ 111 + 1: \ 112 + END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 113 + #else 114 + #define IDLETEST NOTEST 115 + #endif 116 + 98 117 EXC_REAL_BEGIN(system_reset, 0x100, 0x200) 99 118 SET_SCRATCH0(r13) 100 - #ifdef CONFIG_PPC_P7_NAP 101 - BEGIN_FTR_SECTION 102 - /* Running native on arch 2.06 or later, check if we are 103 - * waking up from nap/sleep/winkle. 104 - */ 105 - mfspr r13,SPRN_SRR1 106 - rlwinm. r13,r13,47-31,30,31 107 - beq 9f 119 + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 120 + IDLETEST, 0x100) 108 121 109 - cmpwi cr3,r13,2 110 - GET_PACA(r13) 122 + EXC_REAL_END(system_reset, 0x100, 0x200) 123 + EXC_VIRT_NONE(0x4100, 0x4200) 124 + 125 + #ifdef CONFIG_PPC_P7_NAP 126 + EXC_COMMON_BEGIN(system_reset_idle_common) 111 127 bl pnv_restore_hyp_resource 112 128 113 129 li r0,PNV_THREAD_RUNNING ··· 146 130 blt cr3,2f 147 131 b pnv_wakeup_loss 148 132 2: b pnv_wakeup_noloss 133 + #endif 149 134 150 - 9: 151 - END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 152 - #endif /* CONFIG_PPC_P7_NAP */ 153 - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 154 - NOTEST, 0x100) 155 - EXC_REAL_END(system_reset, 0x100, 0x200) 156 - EXC_VIRT_NONE(0x4100, 0x4200) 157 135 EXC_COMMON(system_reset_common, 0x100, system_reset_exception) 158 136 159 137 #ifdef CONFIG_PPC_PSERIES ··· 827 817 TRAMP_KVM(PACA_EXGEN, 0xb00) 828 818 EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) 829 819 830 - 831 - #define LOAD_SYSCALL_HANDLER(reg) \ 832 - ld reg,PACAKBASE(r13); \ 833 - ori reg,reg,(ABS_ADDR(system_call_common))@l; 820 + #define LOAD_SYSCALL_HANDLER(reg) \ 821 + __LOAD_HANDLER(reg, system_call_common) 834 822 835 823 /* Syscall routine is used twice, in reloc-off and reloc-on paths */ 836 824 #define SYSCALL_PSERIES_1 \
+1 -1
arch/powerpc/kernel/hw_breakpoint.c
··· 275 275 if (!stepped) { 276 276 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " 277 277 "0x%lx will be disabled.", info->address); 278 - perf_event_disable(bp); 278 + perf_event_disable_inatomic(bp); 279 279 goto out; 280 280 } 281 281 /*
+29 -6
arch/powerpc/kernel/idle_book3s.S
··· 90 90 * Threads will spin in HMT_LOW until the lock bit is cleared. 91 91 * r14 - pointer to core_idle_state 92 92 * r15 - used to load contents of core_idle_state 93 + * r9 - used as a temporary variable 93 94 */ 94 95 95 96 core_idle_lock_held: ··· 100 99 bne 3b 101 100 HMT_MEDIUM 102 101 lwarx r15,0,r14 102 + andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT 103 + bne core_idle_lock_held 103 104 blr 104 105 105 106 /* ··· 166 163 std r9,_MSR(r1) 167 164 std r1,PACAR1(r13) 168 165 169 - #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 170 - /* Tell KVM we're entering idle */ 171 - li r4,KVM_HWTHREAD_IN_IDLE 172 - stb r4,HSTATE_HWTHREAD_STATE(r13) 173 - #endif 174 - 175 166 /* 176 167 * Go to real mode to do the nap, as required by the architecture. 177 168 * Also, we need to be in real mode before setting hwthread_state, ··· 182 185 183 186 .globl pnv_enter_arch207_idle_mode 184 187 pnv_enter_arch207_idle_mode: 188 + #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 189 + /* Tell KVM we're entering idle */ 190 + li r4,KVM_HWTHREAD_IN_IDLE 191 + /******************************************************/ 192 + /* N O T E W E L L ! ! ! N O T E W E L L */ 193 + /* The following store to HSTATE_HWTHREAD_STATE(r13) */ 194 + /* MUST occur in real mode, i.e. with the MMU off, */ 195 + /* and the MMU must stay off until we clear this flag */ 196 + /* and test HSTATE_HWTHREAD_REQ(r13) in the system */ 197 + /* reset interrupt vector in exceptions-64s.S. */ 198 + /* The reason is that another thread can switch the */ 199 + /* MMU to a guest context whenever this flag is set */ 200 + /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ 201 + /* that would potentially cause this thread to start */ 202 + /* executing instructions from guest memory in */ 203 + /* hypervisor mode, leading to a host crash or data */ 204 + /* corruption, or worse. */ 205 + /******************************************************/ 206 + stb r4,HSTATE_HWTHREAD_STATE(r13) 207 + #endif 185 208 stb r3,PACA_THREAD_IDLE_STATE(r13) 186 209 cmpwi cr3,r3,PNV_THREAD_SLEEP 187 210 bge cr3,2f ··· 267 250 * r3 - requested stop state 268 251 */ 269 252 power_enter_stop: 253 + #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 254 + /* Tell KVM we're entering idle */ 255 + li r4,KVM_HWTHREAD_IN_IDLE 256 + /* DO THIS IN REAL MODE! See comment above. */ 257 + stb r4,HSTATE_HWTHREAD_STATE(r13) 258 + #endif 270 259 /* 271 260 * Check if the requested state is a deep idle state. 272 261 */
+1 -1
arch/powerpc/kernel/process.c
··· 1012 1012 /* Ensure that restore_math() will restore */ 1013 1013 if (msr_diff & MSR_FP) 1014 1014 current->thread.load_fp = 1; 1015 - #ifdef CONFIG_ALIVEC 1015 + #ifdef CONFIG_ALTIVEC 1016 1016 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC) 1017 1017 current->thread.load_vec = 1; 1018 1018 #endif
+1
arch/powerpc/kvm/book3s_hv_rm_xics.c
··· 23 23 #include <asm/ppc-opcode.h> 24 24 #include <asm/pnv-pci.h> 25 25 #include <asm/opal.h> 26 + #include <asm/smp.h> 26 27 27 28 #include "book3s_xics.h" 28 29
+4 -4
arch/powerpc/mm/tlb-radix.c
··· 175 175 if (unlikely(pid == MMU_NO_CONTEXT)) 176 176 goto no_context; 177 177 178 - if (!mm_is_core_local(mm)) { 178 + if (!mm_is_thread_local(mm)) { 179 179 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 180 180 181 181 if (lock_tlbie) ··· 201 201 if (unlikely(pid == MMU_NO_CONTEXT)) 202 202 goto no_context; 203 203 204 - if (!mm_is_core_local(mm)) { 204 + if (!mm_is_thread_local(mm)) { 205 205 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 206 206 207 207 if (lock_tlbie) ··· 226 226 pid = mm ? mm->context.id : 0; 227 227 if (unlikely(pid == MMU_NO_CONTEXT)) 228 228 goto bail; 229 - if (!mm_is_core_local(mm)) { 229 + if (!mm_is_thread_local(mm)) { 230 230 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 231 231 232 232 if (lock_tlbie) ··· 321 321 { 322 322 unsigned long pid; 323 323 unsigned long addr; 324 - int local = mm_is_core_local(mm); 324 + int local = mm_is_thread_local(mm); 325 325 unsigned long ap = mmu_get_ap(psize); 326 326 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 327 327 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
+1 -3
arch/s390/include/asm/ftrace.h
··· 12 12 13 13 #ifndef __ASSEMBLY__ 14 14 15 - unsigned long return_address(int depth); 16 - 17 - #define ftrace_return_address(n) return_address(n) 15 + #define ftrace_return_address(n) __builtin_return_address(n) 18 16 19 17 void _mcount(void); 20 18 void ftrace_caller(void);
+1 -1
arch/s390/include/asm/processor.h
··· 192 192 struct mm_struct; 193 193 struct seq_file; 194 194 195 - typedef int (*dump_trace_func_t)(void *data, unsigned long address); 195 + typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable); 196 196 void dump_trace(dump_trace_func_t func, void *data, 197 197 struct task_struct *task, unsigned long sp); 198 198
+3
arch/s390/include/asm/unistd.h
··· 9 9 #include <uapi/asm/unistd.h> 10 10 11 11 #define __IGNORE_time 12 + #define __IGNORE_pkey_mprotect 13 + #define __IGNORE_pkey_alloc 14 + #define __IGNORE_pkey_free 12 15 13 16 #define __ARCH_WANT_OLD_READDIR 14 17 #define __ARCH_WANT_SYS_ALARM
+2 -2
arch/s390/kernel/dis.c
··· 2014 2014 *ptr++ = '\t'; 2015 2015 ptr += print_insn(ptr, code + start, addr); 2016 2016 start += opsize; 2017 - printk("%s", buffer); 2017 + pr_cont("%s", buffer); 2018 2018 ptr = buffer; 2019 2019 ptr += sprintf(ptr, "\n "); 2020 2020 hops++; 2021 2021 } 2022 - printk("\n"); 2022 + pr_cont("\n"); 2023 2023 } 2024 2024 2025 2025 void print_fn_code(unsigned char *code, unsigned long len)
+22 -41
arch/s390/kernel/dumpstack.c
··· 38 38 if (sp < low || sp > high - sizeof(*sf)) 39 39 return sp; 40 40 sf = (struct stack_frame *) sp; 41 + if (func(data, sf->gprs[8], 0)) 42 + return sp; 41 43 /* Follow the backchain. */ 42 44 while (1) { 43 - if (func(data, sf->gprs[8])) 44 - return sp; 45 45 low = sp; 46 46 sp = sf->back_chain; 47 47 if (!sp) ··· 49 49 if (sp <= low || sp > high - sizeof(*sf)) 50 50 return sp; 51 51 sf = (struct stack_frame *) sp; 52 + if (func(data, sf->gprs[8], 1)) 53 + return sp; 52 54 } 53 55 /* Zero backchain detected, check for interrupt frame. */ 54 56 sp = (unsigned long) (sf + 1); ··· 58 56 return sp; 59 57 regs = (struct pt_regs *) sp; 60 58 if (!user_mode(regs)) { 61 - if (func(data, regs->psw.addr)) 59 + if (func(data, regs->psw.addr, 1)) 62 60 return sp; 63 61 } 64 62 low = sp; ··· 87 85 } 88 86 EXPORT_SYMBOL_GPL(dump_trace); 89 87 90 - struct return_address_data { 91 - unsigned long address; 92 - int depth; 93 - }; 94 - 95 - static int __return_address(void *data, unsigned long address) 88 + static int show_address(void *data, unsigned long address, int reliable) 96 89 { 97 - struct return_address_data *rd = data; 98 - 99 - if (rd->depth--) 100 - return 0; 101 - rd->address = address; 102 - return 1; 103 - } 104 - 105 - unsigned long return_address(int depth) 106 - { 107 - struct return_address_data rd = { .depth = depth + 2 }; 108 - 109 - dump_trace(__return_address, &rd, NULL, current_stack_pointer()); 110 - return rd.address; 111 - } 112 - EXPORT_SYMBOL_GPL(return_address); 113 - 114 - static int show_address(void *data, unsigned long address) 115 - { 116 - printk("([<%016lx>] %pSR)\n", address, (void *)address); 90 + if (reliable) 91 + printk(" [<%016lx>] %pSR \n", address, (void *)address); 92 + else 93 + printk("([<%016lx>] %pSR)\n", address, (void *)address); 117 94 return 0; 118 95 } 119 96 ··· 119 138 else 120 139 stack = (unsigned long *)task->thread.ksp; 121 140 } 141 + printk(KERN_DEFAULT "Stack:\n"); 122 142 for (i = 0; i < 20; i++) { 123 143 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 124 144 break; 125 - if ((i * sizeof(long) % 32) == 0) 126 - printk("%s ", i == 0 ? "" : "\n"); 127 - printk("%016lx ", *stack++); 145 + if (i % 4 == 0) 146 + printk(KERN_DEFAULT " "); 147 + pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' '); 128 148 } 129 - printk("\n"); 130 149 show_trace(task, (unsigned long)sp); 131 150 } 132 151 ··· 144 163 mode = user_mode(regs) ? "User" : "Krnl"; 145 164 printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr); 146 165 if (!user_mode(regs)) 147 - printk(" (%pSR)", (void *)regs->psw.addr); 148 - printk("\n"); 166 + pr_cont(" (%pSR)", (void *)regs->psw.addr); 167 + pr_cont("\n"); 149 168 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 150 169 "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e, 151 170 psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm); 152 - printk(" RI:%x EA:%x", psw->ri, psw->eaba); 153 - printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode, 171 + pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba); 172 + printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode, 154 173 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 155 174 printk(" %016lx %016lx %016lx %016lx\n", 156 175 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); ··· 186 205 printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff, 187 206 regs->int_code >> 17, ++die_counter); 188 207 #ifdef CONFIG_PREEMPT 189 - printk("PREEMPT "); 208 + pr_cont("PREEMPT "); 190 209 #endif 191 210 #ifdef CONFIG_SMP 192 - printk("SMP "); 211 + pr_cont("SMP "); 193 212 #endif 194 213 if (debug_pagealloc_enabled()) 195 - printk("DEBUG_PAGEALLOC"); 196 - printk("\n"); 214 + pr_cont("DEBUG_PAGEALLOC"); 215 + pr_cont("\n"); 197 216 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); 198 217 print_modules(); 199 218 show_regs(regs);
+1 -1
arch/s390/kernel/perf_event.c
··· 222 222 } 223 223 arch_initcall(service_level_perf_register); 224 224 225 - static int __perf_callchain_kernel(void *data, unsigned long address) 225 + static int __perf_callchain_kernel(void *data, unsigned long address, int reliable) 226 226 { 227 227 struct perf_callchain_entry_ctx *entry = data; 228 228
+2 -2
arch/s390/kernel/stacktrace.c
··· 27 27 return 1; 28 28 } 29 29 30 - static int save_address(void *data, unsigned long address) 30 + static int save_address(void *data, unsigned long address, int reliable) 31 31 { 32 32 return __save_address(data, address, 0); 33 33 } 34 34 35 - static int save_address_nosched(void *data, unsigned long address) 35 + static int save_address_nosched(void *data, unsigned long address, int reliable) 36 36 { 37 37 return __save_address(data, address, 1); 38 38 }
+1
arch/s390/mm/hugetlbpage.c
··· 217 217 } else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) { 218 218 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 219 219 } else { 220 + hugetlb_bad_size(); 220 221 pr_err("hugepagesz= specifies an unsupported page size %s\n", 221 222 string); 222 223 return 0;
+21 -17
arch/s390/mm/init.c
··· 151 151 #ifdef CONFIG_MEMORY_HOTPLUG 152 152 int arch_add_memory(int nid, u64 start, u64 size, bool for_device) 153 153 { 154 - unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); 155 - unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS); 154 + unsigned long zone_start_pfn, zone_end_pfn, nr_pages; 156 155 unsigned long start_pfn = PFN_DOWN(start); 157 156 unsigned long size_pages = PFN_DOWN(size); 158 - unsigned long nr_pages; 159 - int rc, zone_enum; 157 + pg_data_t *pgdat = NODE_DATA(nid); 158 + struct zone *zone; 159 + int rc, i; 160 160 161 161 rc = vmem_add_mapping(start, size); 162 162 if (rc) 163 163 return rc; 164 164 165 - while (size_pages > 0) { 166 - if (start_pfn < dma_end_pfn) { 167 - nr_pages = (start_pfn + size_pages > dma_end_pfn) ? 168 - dma_end_pfn - start_pfn : size_pages; 169 - zone_enum = ZONE_DMA; 170 - } else if (start_pfn < normal_end_pfn) { 171 - nr_pages = (start_pfn + size_pages > normal_end_pfn) ? 172 - normal_end_pfn - start_pfn : size_pages; 173 - zone_enum = ZONE_NORMAL; 165 + for (i = 0; i < MAX_NR_ZONES; i++) { 166 + zone = pgdat->node_zones + i; 167 + if (zone_idx(zone) != ZONE_MOVABLE) { 168 + /* Add range within existing zone limits, if possible */ 169 + zone_start_pfn = zone->zone_start_pfn; 170 + zone_end_pfn = zone->zone_start_pfn + 171 + zone->spanned_pages; 174 172 } else { 175 - nr_pages = size_pages; 176 - zone_enum = ZONE_MOVABLE; 173 + /* Add remaining range to ZONE_MOVABLE */ 174 + zone_start_pfn = start_pfn; 175 + zone_end_pfn = start_pfn + size_pages; 177 176 } 178 - rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum, 179 - start_pfn, size_pages); 177 + if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn) 178 + continue; 179 + nr_pages = (start_pfn + size_pages > zone_end_pfn) ? 180 + zone_end_pfn - start_pfn : size_pages; 181 + rc = __add_pages(nid, zone, start_pfn, nr_pages); 180 182 if (rc) 181 183 break; 182 184 start_pfn += nr_pages; 183 185 size_pages -= nr_pages; 186 + if (!size_pages) 187 + break; 184 188 } 185 189 if (rc) 186 190 vmem_remove_mapping(start, size);
+1 -1
arch/s390/oprofile/init.c
··· 13 13 #include <linux/init.h> 14 14 #include <asm/processor.h> 15 15 16 - static int __s390_backtrace(void *data, unsigned long address) 16 + static int __s390_backtrace(void *data, unsigned long address, int reliable) 17 17 { 18 18 unsigned int *depth = data; 19 19
+2 -2
arch/x86/entry/Makefile
··· 5 5 OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y 6 6 OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y 7 7 8 - CFLAGS_syscall_64.o += -Wno-override-init 9 - CFLAGS_syscall_32.o += -Wno-override-init 8 + CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,) 9 + CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,) 10 10 obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o 11 11 obj-y += common.o 12 12
+7 -3
arch/x86/events/intel/core.c
··· 3607 3607 3608 3608 /* 3609 3609 * Quirk: v2 perfmon does not report fixed-purpose events, so 3610 - * assume at least 3 events: 3610 + * assume at least 3 events, when not running in a hypervisor: 3611 3611 */ 3612 - if (version > 1) 3613 - x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 3612 + if (version > 1) { 3613 + int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR); 3614 + 3615 + x86_pmu.num_counters_fixed = 3616 + max((int)edx.split.num_counters_fixed, assume); 3617 + } 3614 3618 3615 3619 if (boot_cpu_has(X86_FEATURE_PDCM)) { 3616 3620 u64 capabilities;
+26 -4
arch/x86/events/intel/cstate.c
··· 48 48 * Scope: Core 49 49 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter 50 50 * perf code: 0x02 51 - * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL 51 + * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW 52 + * SKL,KNL 52 53 * Scope: Core 53 54 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter 54 55 * perf code: 0x03 ··· 57 56 * Scope: Core 58 57 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. 59 58 * perf code: 0x00 60 - * Available model: SNB,IVB,HSW,BDW,SKL 59 + * Available model: SNB,IVB,HSW,BDW,SKL,KNL 61 60 * Scope: Package (physical package) 62 61 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. 63 62 * perf code: 0x01 64 - * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL 63 + * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL 65 64 * Scope: Package (physical package) 66 65 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. 67 66 * perf code: 0x02 68 - * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL 67 + * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW 68 + * SKL,KNL 69 69 * Scope: Package (physical package) 70 70 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. 71 71 * perf code: 0x03 ··· 120 118 121 119 /* Quirk flags */ 122 120 #define SLM_PKG_C6_USE_C7_MSR (1UL << 0) 121 + #define KNL_CORE_C6_MSR (1UL << 1) 123 122 124 123 struct perf_cstate_msr { 125 124 u64 msr; ··· 491 488 .quirks = SLM_PKG_C6_USE_C7_MSR, 492 489 }; 493 490 491 + 492 + static const struct cstate_model knl_cstates __initconst = { 493 + .core_events = BIT(PERF_CSTATE_CORE_C6_RES), 494 + 495 + .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 496 + BIT(PERF_CSTATE_PKG_C3_RES) | 497 + BIT(PERF_CSTATE_PKG_C6_RES), 498 + .quirks = KNL_CORE_C6_MSR, 499 + }; 500 + 501 + 502 + 494 503 #define X86_CSTATES_MODEL(model, states) \ 495 504 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) } 496 505 ··· 538 523 539 524 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates), 540 525 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), 526 + 527 + X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates), 541 528 { }, 542 529 }; 543 530 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); ··· 574 557 /* SLM has different MSR for PKG C6 */ 575 558 if (cm->quirks & SLM_PKG_C6_USE_C7_MSR) 576 559 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY; 560 + 561 + /* KNL has different MSR for CORE C6 */ 562 + if (cm->quirks & KNL_CORE_C6_MSR) 563 + pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY; 564 + 577 565 578 566 has_cstate_core = cstate_probe_msr(cm->core_events, 579 567 PERF_CSTATE_CORE_EVENT_MAX,
+6
arch/x86/include/asm/io.h
··· 351 351 #define arch_phys_wc_add arch_phys_wc_add 352 352 #endif 353 353 354 + #ifdef CONFIG_X86_PAT 355 + extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size); 356 + extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size); 357 + #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc 358 + #endif 359 + 354 360 #endif /* _ASM_X86_IO_H */
+1
arch/x86/kernel/acpi/boot.c
··· 454 454 polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; 455 455 456 456 mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); 457 + acpi_penalize_sci_irq(bus_irq, trigger, polarity); 457 458 458 459 /* 459 460 * stash over-ride to indicate we've been here
+1 -1
arch/x86/kernel/cpu/microcode/amd.c
··· 429 429 * We need the physical address of the container for both bitness since 430 430 * boot_params.hdr.ramdisk_image is a physical address. 431 431 */ 432 - cont = __pa(container); 432 + cont = __pa_nodebug(container); 433 433 cont_va = container; 434 434 #endif 435 435
+2 -1
arch/x86/kernel/mcount_64.S
··· 18 18 19 19 #ifdef CC_USING_FENTRY 20 20 # define function_hook __fentry__ 21 + EXPORT_SYMBOL(__fentry__) 21 22 #else 22 23 # define function_hook mcount 24 + EXPORT_SYMBOL(mcount) 23 25 #endif 24 26 25 27 /* All cases save the original rbp (8 bytes) */ ··· 297 295 jmp fgraph_trace 298 296 END(function_hook) 299 297 #endif /* CONFIG_DYNAMIC_FTRACE */ 300 - EXPORT_SYMBOL(function_hook) 301 298 #endif /* CONFIG_FUNCTION_TRACER */ 302 299 303 300 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+1 -2
arch/x86/kernel/quirks.c
··· 625 625 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3, 626 626 amd_disable_seq_and_redirect_scrub); 627 627 628 - #endif 629 - 630 628 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) 631 629 #include <linux/jump_label.h> 632 630 #include <asm/string_64.h> ··· 654 656 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap); 655 657 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap); 656 658 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap); 659 + #endif 657 660 #endif
+6 -1
arch/x86/kernel/setup.c
··· 1221 1221 */ 1222 1222 get_smp_config(); 1223 1223 1224 + /* 1225 + * Systems w/o ACPI and mptables might not have it mapped the local 1226 + * APIC yet, but prefill_possible_map() might need to access it. 1227 + */ 1228 + init_apic_mappings(); 1229 + 1224 1230 prefill_possible_map(); 1225 1231 1226 1232 init_cpu_to_node(); 1227 1233 1228 - init_apic_mappings(); 1229 1234 io_apic_init_mappings(); 1230 1235 1231 1236 kvm_guest_init();
+8 -1
arch/x86/kernel/unwind_guess.c
··· 47 47 get_stack_info(first_frame, state->task, &state->stack_info, 48 48 &state->stack_mask); 49 49 50 - if (!__kernel_text_address(*first_frame)) 50 + /* 51 + * The caller can provide the address of the first frame directly 52 + * (first_frame) or indirectly (regs->sp) to indicate which stack frame 53 + * to start unwinding at. Skip ahead until we reach it. 54 + */ 55 + if (!unwind_done(state) && 56 + (!on_stack(&state->stack_info, first_frame, sizeof(long)) || 57 + !__kernel_text_address(*first_frame))) 51 58 unwind_next_frame(state); 52 59 } 53 60 EXPORT_SYMBOL_GPL(__unwind_start);
+3 -3
arch/x86/mm/kaslr.c
··· 104 104 * consistent with the vaddr_start/vaddr_end variables. 105 105 */ 106 106 BUILD_BUG_ON(vaddr_start >= vaddr_end); 107 - BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) && 107 + BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && 108 108 vaddr_end >= EFI_VA_START); 109 - BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) || 110 - config_enabled(CONFIG_EFI)) && 109 + BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || 110 + IS_ENABLED(CONFIG_EFI)) && 111 111 vaddr_end >= __START_KERNEL_map); 112 112 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); 113 113
+14
arch/x86/mm/pat.c
··· 730 730 free_memtype(start, end); 731 731 } 732 732 733 + int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size) 734 + { 735 + enum page_cache_mode type = _PAGE_CACHE_MODE_WC; 736 + 737 + return io_reserve_memtype(start, start + size, &type); 738 + } 739 + EXPORT_SYMBOL(arch_io_reserve_memtype_wc); 740 + 741 + void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) 742 + { 743 + io_free_memtype(start, start + size); 744 + } 745 + EXPORT_SYMBOL(arch_io_free_memtype_wc); 746 + 733 747 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 734 748 unsigned long size, pgprot_t vma_prot) 735 749 {
+2
arch/x86/xen/enlighten.c
··· 1837 1837 1838 1838 xen_domain_type = XEN_HVM_DOMAIN; 1839 1839 } 1840 + #endif 1840 1841 1841 1842 static int xen_cpu_up_prepare(unsigned int cpu) 1842 1843 { ··· 1888 1887 return 0; 1889 1888 } 1890 1889 1890 + #ifdef CONFIG_XEN_PVHVM 1891 1891 #ifdef CONFIG_KEXEC_CORE 1892 1892 static void xen_hvm_shutdown(void) 1893 1893 {
+23
block/badblocks.c
··· 133 133 } 134 134 EXPORT_SYMBOL_GPL(badblocks_check); 135 135 136 + static void badblocks_update_acked(struct badblocks *bb) 137 + { 138 + u64 *p = bb->page; 139 + int i; 140 + bool unacked = false; 141 + 142 + if (!bb->unacked_exist) 143 + return; 144 + 145 + for (i = 0; i < bb->count ; i++) { 146 + if (!BB_ACK(p[i])) { 147 + unacked = true; 148 + break; 149 + } 150 + } 151 + 152 + if (!unacked) 153 + bb->unacked_exist = 0; 154 + } 155 + 136 156 /** 137 157 * badblocks_set() - Add a range of bad blocks to the table. 138 158 * @bb: the badblocks structure that holds all badblock information ··· 314 294 bb->changed = 1; 315 295 if (!acknowledged) 316 296 bb->unacked_exist = 1; 297 + else 298 + badblocks_update_acked(bb); 317 299 write_sequnlock_irqrestore(&bb->lock, flags); 318 300 319 301 return rv; ··· 423 401 } 424 402 } 425 403 404 + badblocks_update_acked(bb); 426 405 bb->changed = 1; 427 406 out: 428 407 write_sequnlock_irq(&bb->lock);
+28
block/blk-flush.c
··· 343 343 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 344 344 345 345 /* 346 + * Updating q->in_flight[] here for making this tag usable 347 + * early. Because in blk_queue_start_tag(), 348 + * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and 349 + * reserve tags for sync I/O. 350 + * 351 + * More importantly this way can avoid the following I/O 352 + * deadlock: 353 + * 354 + * - suppose there are 40 fua requests comming to flush queue 355 + * and queue depth is 31 356 + * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc 357 + * tag for async I/O any more 358 + * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT 359 + * and flush_data_end_io() is called 360 + * - the other rqs still can't go ahead if not updating 361 + * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs 362 + * are held in flush data queue and make no progress of 363 + * handling post flush rq 364 + * - only after the post flush rq is handled, all these rqs 365 + * can be completed 366 + */ 367 + 368 + elv_completed_request(q, rq); 369 + 370 + /* for avoiding double accounting */ 371 + rq->cmd_flags &= ~REQ_STARTED; 372 + 373 + /* 346 374 * After populating an empty queue, kick it to avoid stall. Read 347 375 * the comment in flush_end_io(). 348 376 */
+3 -3
block/blk-mq.c
··· 1217 1217 blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); 1218 1218 rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); 1219 1219 1220 - hctx->queued++; 1221 - data->hctx = hctx; 1222 - data->ctx = ctx; 1220 + data->hctx = alloc_data.hctx; 1221 + data->ctx = alloc_data.ctx; 1222 + data->hctx->queued++; 1223 1223 return rq; 1224 1224 } 1225 1225
+3 -8
drivers/acpi/acpica/dsinit.c
··· 46 46 #include "acdispat.h" 47 47 #include "acnamesp.h" 48 48 #include "actables.h" 49 + #include "acinterp.h" 49 50 50 51 #define _COMPONENT ACPI_DISPATCHER 51 52 ACPI_MODULE_NAME("dsinit") ··· 215 214 216 215 /* Walk entire namespace from the supplied root */ 217 216 218 - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 219 - if (ACPI_FAILURE(status)) { 220 - return_ACPI_STATUS(status); 221 - } 222 - 223 217 /* 224 218 * We don't use acpi_walk_namespace since we do not want to acquire 225 219 * the namespace reader lock. 226 220 */ 227 221 status = 228 222 acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX, 229 - ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object, 230 - NULL, &info, NULL); 223 + 0, acpi_ds_init_one_object, NULL, &info, 224 + NULL); 231 225 if (ACPI_FAILURE(status)) { 232 226 ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); 233 227 } 234 - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 235 228 236 229 status = acpi_get_table_by_index(table_index, &table); 237 230 if (ACPI_FAILURE(status)) {
+22 -28
drivers/acpi/acpica/dsmethod.c
··· 99 99 "Method auto-serialization parse [%4.4s] %p\n", 100 100 acpi_ut_get_node_name(node), node)); 101 101 102 - acpi_ex_enter_interpreter(); 103 - 104 102 /* Create/Init a root op for the method parse tree */ 105 103 106 104 op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start); 107 105 if (!op) { 108 - status = AE_NO_MEMORY; 109 - goto unlock; 106 + return_ACPI_STATUS(AE_NO_MEMORY); 110 107 } 111 108 112 109 acpi_ps_set_name(op, node->name.integer); ··· 115 118 acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL); 116 119 if (!walk_state) { 117 120 acpi_ps_free_op(op); 118 - status = AE_NO_MEMORY; 119 - goto unlock; 121 + return_ACPI_STATUS(AE_NO_MEMORY); 120 122 } 121 123 122 124 status = acpi_ds_init_aml_walk(walk_state, op, node, ··· 134 138 status = acpi_ps_parse_aml(walk_state); 135 139 136 140 acpi_ps_delete_parse_tree(op); 137 - unlock: 138 - acpi_ex_exit_interpreter(); 139 141 return_ACPI_STATUS(status); 140 142 } 141 143 ··· 725 731 acpi_ds_method_data_delete_all(walk_state); 726 732 727 733 /* 728 - * If method is serialized, release the mutex and restore the 729 - * current sync level for this thread 730 - */ 731 - if (method_desc->method.mutex) { 732 - 733 - /* Acquisition Depth handles recursive calls */ 734 - 735 - method_desc->method.mutex->mutex.acquisition_depth--; 736 - if (!method_desc->method.mutex->mutex.acquisition_depth) { 737 - walk_state->thread->current_sync_level = 738 - method_desc->method.mutex->mutex. 739 - original_sync_level; 740 - 741 - acpi_os_release_mutex(method_desc->method. 742 - mutex->mutex.os_mutex); 743 - method_desc->method.mutex->mutex.thread_id = 0; 744 - } 745 - } 746 - 747 - /* 748 734 * Delete any namespace objects created anywhere within the 749 735 * namespace by the execution of this method. Unless: 750 736 * 1) This method is a module-level executable code method, in which ··· 758 784 (void)acpi_ex_enter_interpreter(); 759 785 method_desc->method.info_flags &= 760 786 ~ACPI_METHOD_MODIFIED_NAMESPACE; 787 + } 788 + } 789 + 790 + /* 791 + * If method is serialized, release the mutex and restore the 792 + * current sync level for this thread 793 + */ 794 + if (method_desc->method.mutex) { 795 + 796 + /* Acquisition Depth handles recursive calls */ 797 + 798 + method_desc->method.mutex->mutex.acquisition_depth--; 799 + if (!method_desc->method.mutex->mutex.acquisition_depth) { 800 + walk_state->thread->current_sync_level = 801 + method_desc->method.mutex->mutex. 802 + original_sync_level; 803 + 804 + acpi_os_release_mutex(method_desc->method. 805 + mutex->mutex.os_mutex); 806 + method_desc->method.mutex->mutex.thread_id = 0; 761 807 } 762 808 } 763 809 }
-2
drivers/acpi/acpica/dswload2.c
··· 607 607 } 608 608 } 609 609 610 - acpi_ex_exit_interpreter(); 611 610 status = 612 611 acpi_ev_initialize_region 613 612 (acpi_ns_get_attached_object(node), FALSE); 614 - acpi_ex_enter_interpreter(); 615 613 616 614 if (ACPI_FAILURE(status)) { 617 615 /*
+3
drivers/acpi/acpica/evrgnini.c
··· 45 45 #include "accommon.h" 46 46 #include "acevents.h" 47 47 #include "acnamesp.h" 48 + #include "acinterp.h" 48 49 49 50 #define _COMPONENT ACPI_EVENTS 50 51 ACPI_MODULE_NAME("evrgnini") ··· 598 597 } 599 598 } 600 599 600 + acpi_ex_exit_interpreter(); 601 601 status = 602 602 acpi_ev_execute_reg_method(region_obj, 603 603 ACPI_REG_CONNECT); 604 + acpi_ex_enter_interpreter(); 604 605 605 606 if (acpi_ns_locked) { 606 607 status =
+2
drivers/acpi/acpica/nsload.c
··· 137 137 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 138 138 "**** Begin Table Object Initialization\n")); 139 139 140 + acpi_ex_enter_interpreter(); 140 141 status = acpi_ds_initialize_objects(table_index, node); 142 + acpi_ex_exit_interpreter(); 141 143 142 144 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 143 145 "**** Completed Table Object Initialization\n"));
+1 -1
drivers/acpi/apei/ghes.c
··· 662 662 ghes_do_proc(ghes, ghes->estatus); 663 663 out: 664 664 ghes_clear_estatus(ghes); 665 - return 0; 665 + return rc; 666 666 } 667 667 668 668 static void ghes_add_timer(struct ghes *ghes)
+21 -17
drivers/acpi/pci_link.c
··· 87 87 88 88 static LIST_HEAD(acpi_link_list); 89 89 static DEFINE_MUTEX(acpi_link_lock); 90 + static int sci_irq = -1, sci_penalty; 90 91 91 92 /* -------------------------------------------------------------------------- 92 93 PCI Link Device Management ··· 497 496 { 498 497 int penalty = 0; 499 498 500 - /* 501 - * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict 502 - * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be 503 - * use for PCI IRQs. 504 - */ 505 - if (irq == acpi_gbl_FADT.sci_interrupt) { 506 - u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK; 507 - 508 - if (type != IRQ_TYPE_LEVEL_LOW) 509 - penalty += PIRQ_PENALTY_ISA_ALWAYS; 510 - else 511 - penalty += PIRQ_PENALTY_PCI_USING; 512 - } 499 + if (irq == sci_irq) 500 + penalty += sci_penalty; 513 501 514 502 if (irq < ACPI_MAX_ISA_IRQS) 515 503 return penalty + acpi_isa_irq_penalty[irq]; 516 504 517 - penalty += acpi_irq_pci_sharing_penalty(irq); 518 - return penalty; 505 + return penalty + acpi_irq_pci_sharing_penalty(irq); 519 506 } 520 507 521 508 int __init acpi_irq_penalty_init(void) ··· 608 619 acpi_device_bid(link->device)); 609 620 return -ENODEV; 610 621 } else { 622 + if (link->irq.active < ACPI_MAX_ISA_IRQS) 623 + acpi_isa_irq_penalty[link->irq.active] += 624 + PIRQ_PENALTY_PCI_USING; 625 + 611 626 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", 612 627 acpi_device_name(link->device), 613 628 acpi_device_bid(link->device), link->irq.active); ··· 842 849 continue; 843 850 844 851 if (used) 845 - new_penalty = acpi_irq_get_penalty(irq) + 852 + new_penalty = acpi_isa_irq_penalty[irq] + 846 853 PIRQ_PENALTY_ISA_USED; 847 854 else 848 855 new_penalty = 0; ··· 864 871 void acpi_penalize_isa_irq(int irq, int active) 865 872 { 866 873 if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty))) 867 - acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) + 874 + acpi_isa_irq_penalty[irq] += 868 875 (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); 869 876 } 870 877 ··· 872 879 { 873 880 return irq >= 0 && (irq >= ARRAY_SIZE(acpi_isa_irq_penalty) || 874 881 acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS); 882 + } 883 + 884 + void acpi_penalize_sci_irq(int irq, int trigger, int polarity) 885 + { 886 + sci_irq = irq; 887 + 888 + if (trigger == ACPI_MADT_TRIGGER_LEVEL && 889 + polarity == ACPI_MADT_POLARITY_ACTIVE_LOW) 890 + sci_penalty = PIRQ_PENALTY_PCI_USING; 891 + else 892 + sci_penalty = PIRQ_PENALTY_ISA_ALWAYS; 875 893 } 876 894 877 895 /*
+26 -9
drivers/android/binder.c
··· 1002 1002 1003 1003 1004 1004 static struct binder_ref *binder_get_ref(struct binder_proc *proc, 1005 - uint32_t desc) 1005 + u32 desc, bool need_strong_ref) 1006 1006 { 1007 1007 struct rb_node *n = proc->refs_by_desc.rb_node; 1008 1008 struct binder_ref *ref; ··· 1010 1010 while (n) { 1011 1011 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1012 1012 1013 - if (desc < ref->desc) 1013 + if (desc < ref->desc) { 1014 1014 n = n->rb_left; 1015 - else if (desc > ref->desc) 1015 + } else if (desc > ref->desc) { 1016 1016 n = n->rb_right; 1017 - else 1017 + } else if (need_strong_ref && !ref->strong) { 1018 + binder_user_error("tried to use weak ref as strong ref\n"); 1019 + return NULL; 1020 + } else { 1018 1021 return ref; 1022 + } 1019 1023 } 1020 1024 return NULL; 1021 1025 } ··· 1289 1285 } break; 1290 1286 case BINDER_TYPE_HANDLE: 1291 1287 case BINDER_TYPE_WEAK_HANDLE: { 1292 - struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1288 + struct binder_ref *ref; 1289 + 1290 + ref = binder_get_ref(proc, fp->handle, 1291 + fp->type == BINDER_TYPE_HANDLE); 1293 1292 1294 1293 if (ref == NULL) { 1295 1294 pr_err("transaction release %d bad handle %d\n", ··· 1387 1380 if (tr->target.handle) { 1388 1381 struct binder_ref *ref; 1389 1382 1390 - ref = binder_get_ref(proc, tr->target.handle); 1383 + ref = binder_get_ref(proc, tr->target.handle, true); 1391 1384 if (ref == NULL) { 1392 1385 binder_user_error("%d:%d got transaction to invalid handle\n", 1393 1386 proc->pid, thread->pid); ··· 1584 1577 fp->type = BINDER_TYPE_HANDLE; 1585 1578 else 1586 1579 fp->type = BINDER_TYPE_WEAK_HANDLE; 1580 + fp->binder = 0; 1587 1581 fp->handle = ref->desc; 1582 + fp->cookie = 0; 1588 1583 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, 1589 1584 &thread->todo); 1590 1585 ··· 1598 1589 } break; 1599 1590 case BINDER_TYPE_HANDLE: 1600 1591 case BINDER_TYPE_WEAK_HANDLE: { 1601 - struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1592 + struct binder_ref *ref; 1593 + 1594 + ref = binder_get_ref(proc, fp->handle, 1595 + fp->type == BINDER_TYPE_HANDLE); 1602 1596 1603 1597 if (ref == NULL) { 1604 1598 binder_user_error("%d:%d got transaction with invalid handle, %d\n", ··· 1636 1624 return_error = BR_FAILED_REPLY; 1637 1625 goto err_binder_get_ref_for_node_failed; 1638 1626 } 1627 + fp->binder = 0; 1639 1628 fp->handle = new_ref->desc; 1629 + fp->cookie = 0; 1640 1630 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); 1641 1631 trace_binder_transaction_ref_to_ref(t, ref, 1642 1632 new_ref); ··· 1692 1678 binder_debug(BINDER_DEBUG_TRANSACTION, 1693 1679 " fd %d -> %d\n", fp->handle, target_fd); 1694 1680 /* TODO: fput? */ 1681 + fp->binder = 0; 1695 1682 fp->handle = target_fd; 1696 1683 } break; 1697 1684 ··· 1815 1800 ref->desc); 1816 1801 } 1817 1802 } else 1818 - ref = binder_get_ref(proc, target); 1803 + ref = binder_get_ref(proc, target, 1804 + cmd == BC_ACQUIRE || 1805 + cmd == BC_RELEASE); 1819 1806 if (ref == NULL) { 1820 1807 binder_user_error("%d:%d refcount change on invalid ref %d\n", 1821 1808 proc->pid, thread->pid, target); ··· 2013 1996 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2014 1997 return -EFAULT; 2015 1998 ptr += sizeof(binder_uintptr_t); 2016 - ref = binder_get_ref(proc, target); 1999 + ref = binder_get_ref(proc, target, false); 2017 2000 if (ref == NULL) { 2018 2001 binder_user_error("%d:%d %s invalid ref %d\n", 2019 2002 proc->pid, thread->pid,
+22 -19
drivers/ata/ahci.c
··· 1418 1418 * Message mode could be enforced. In this case assume that advantage 1419 1419 * of multipe MSIs is negated and use single MSI mode instead. 1420 1420 */ 1421 - nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX, 1422 - PCI_IRQ_MSIX | PCI_IRQ_MSI); 1423 - if (nvec > 0) { 1424 - if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) { 1425 - hpriv->get_irq_vector = ahci_get_irq_vector; 1426 - hpriv->flags |= AHCI_HFLAG_MULTI_MSI; 1427 - return nvec; 1421 + if (n_ports > 1) { 1422 + nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX, 1423 + PCI_IRQ_MSIX | PCI_IRQ_MSI); 1424 + if (nvec > 0) { 1425 + if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) { 1426 + hpriv->get_irq_vector = ahci_get_irq_vector; 1427 + hpriv->flags |= AHCI_HFLAG_MULTI_MSI; 1428 + return nvec; 1429 + } 1430 + 1431 + /* 1432 + * Fallback to single MSI mode if the controller 1433 + * enforced MRSM mode. 1434 + */ 1435 + printk(KERN_INFO 1436 + "ahci: MRSM is on, fallback to single MSI\n"); 1437 + pci_free_irq_vectors(pdev); 1428 1438 } 1429 1439 1430 1440 /* 1431 - * Fallback to single MSI mode if the controller enforced MRSM 1432 - * mode. 1441 + * -ENOSPC indicated we don't have enough vectors. Don't bother 1442 + * trying a single vectors for any other error: 1433 1443 */ 1434 - printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n"); 1435 - pci_free_irq_vectors(pdev); 1444 + if (nvec < 0 && nvec != -ENOSPC) 1445 + return nvec; 1436 1446 } 1437 - 1438 - /* 1439 - * -ENOSPC indicated we don't have enough vectors. Don't bother trying 1440 - * a single vectors for any other error: 1441 - */ 1442 - if (nvec < 0 && nvec != -ENOSPC) 1443 - return nvec; 1444 1447 1445 1448 /* 1446 1449 * If the host is not capable of supporting per-port vectors, fall ··· 1620 1617 /* legacy intx interrupts */ 1621 1618 pci_intx(pdev, 1); 1622 1619 } 1623 - hpriv->irq = pdev->irq; 1620 + hpriv->irq = pci_irq_vector(pdev, 0); 1624 1621 1625 1622 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 1626 1623 host->flags |= ATA_HOST_PARALLEL_SCAN;
+4 -2
drivers/base/Kconfig
··· 213 213 If you are unsure about this, Say N here. 214 214 215 215 config DEBUG_TEST_DRIVER_REMOVE 216 - bool "Test driver remove calls during probe" 216 + bool "Test driver remove calls during probe (UNSTABLE)" 217 217 depends on DEBUG_KERNEL 218 218 help 219 219 Say Y here if you want the Driver core to test driver remove functions 220 220 by calling probe, remove, probe. This tests the remove path without 221 221 having to unbind the driver or unload the driver module. 222 222 223 - If you are unsure about this, say N here. 223 + This option is expected to find errors and may render your system 224 + unusable. You should say N here unless you are explicitly looking to 225 + test this functionality. 224 226 225 227 config SYS_HYPERVISOR 226 228 bool
+2 -2
drivers/block/DAC960.c
··· 2954 2954 case DAC960_PD_Controller: 2955 2955 if (!request_region(Controller->IO_Address, 0x80, 2956 2956 Controller->FullModelName)) { 2957 - DAC960_Error("IO port 0x%d busy for Controller at\n", 2957 + DAC960_Error("IO port 0x%lx busy for Controller at\n", 2958 2958 Controller, Controller->IO_Address); 2959 2959 goto Failure; 2960 2960 } ··· 2990 2990 case DAC960_P_Controller: 2991 2991 if (!request_region(Controller->IO_Address, 0x80, 2992 2992 Controller->FullModelName)){ 2993 - DAC960_Error("IO port 0x%d busy for Controller at\n", 2993 + DAC960_Error("IO port 0x%lx busy for Controller at\n", 2994 2994 Controller, Controller->IO_Address); 2995 2995 goto Failure; 2996 2996 }
+1 -1
drivers/block/nbd.c
··· 164 164 spin_lock(&nbd->sock_lock); 165 165 166 166 if (!nbd->sock) { 167 - spin_unlock_irq(&nbd->sock_lock); 167 + spin_unlock(&nbd->sock_lock); 168 168 return; 169 169 } 170 170
+1
drivers/bus/Kconfig
··· 111 111 config QCOM_EBI2 112 112 bool "Qualcomm External Bus Interface 2 (EBI2)" 113 113 depends on HAS_IOMEM 114 + depends on ARCH_QCOM || COMPILE_TEST 114 115 help 115 116 Say y here to enable support for the Qualcomm External Bus 116 117 Interface 2, which can be used to connect things like NAND Flash,
+3 -3
drivers/char/hw_random/core.c
··· 84 84 85 85 static void add_early_randomness(struct hwrng *rng) 86 86 { 87 - unsigned char bytes[16]; 88 87 int bytes_read; 88 + size_t size = min_t(size_t, 16, rng_buffer_size()); 89 89 90 90 mutex_lock(&reading_mutex); 91 - bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); 91 + bytes_read = rng_get_data(rng, rng_buffer, size, 1); 92 92 mutex_unlock(&reading_mutex); 93 93 if (bytes_read > 0) 94 - add_device_randomness(bytes, bytes_read); 94 + add_device_randomness(rng_buffer, bytes_read); 95 95 } 96 96 97 97 static inline void cleanup_rng(struct kref *kref)
+1 -1
drivers/clk/at91/clk-programmable.c
··· 203 203 ret = clk_hw_register(NULL, &prog->hw); 204 204 if (ret) { 205 205 kfree(prog); 206 - hw = &prog->hw; 206 + hw = ERR_PTR(ret); 207 207 } 208 208 209 209 return hw;
+4 -7
drivers/clk/bcm/clk-bcm2835.c
··· 502 502 static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate, 503 503 unsigned long *parent_rate) 504 504 { 505 + struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw); 506 + const struct bcm2835_pll_data *data = pll->data; 505 507 u32 ndiv, fdiv; 508 + 509 + rate = clamp(rate, data->min_rate, data->max_rate); 506 510 507 511 bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv); 508 512 ··· 611 607 u32 ndiv, fdiv, a2w_ctl; 612 608 u32 ana[4]; 613 609 int i; 614 - 615 - if (rate < data->min_rate || rate > data->max_rate) { 616 - dev_err(cprman->dev, "%s: rate out of spec: %lu vs (%lu, %lu)\n", 617 - clk_hw_get_name(hw), rate, 618 - data->min_rate, data->max_rate); 619 - return -EINVAL; 620 - } 621 610 622 611 if (rate > data->max_fb_rate) { 623 612 use_fb_prediv = true;
+1
drivers/clk/clk-max77686.c
··· 216 216 return -EINVAL; 217 217 } 218 218 219 + drv_data->num_clks = num_clks; 219 220 drv_data->max_clk_data = devm_kcalloc(dev, num_clks, 220 221 sizeof(*drv_data->max_clk_data), 221 222 GFP_KERNEL);
+2 -2
drivers/clk/hisilicon/clk-hi6220.c
··· 195 195 hi6220_clk_register_divider(hi6220_div_clks_sys, 196 196 ARRAY_SIZE(hi6220_div_clks_sys), clk_data); 197 197 } 198 - CLK_OF_DECLARE(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init); 198 + CLK_OF_DECLARE_DRIVER(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init); 199 199 200 200 201 201 /* clocks in media controller */ ··· 252 252 hi6220_clk_register_divider(hi6220_div_clks_media, 253 253 ARRAY_SIZE(hi6220_div_clks_media), clk_data); 254 254 } 255 - CLK_OF_DECLARE(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init); 255 + CLK_OF_DECLARE_DRIVER(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init); 256 256 257 257 258 258 /* clocks in pmctrl */
+2
drivers/clk/mediatek/Kconfig
··· 8 8 9 9 config COMMON_CLK_MT8135 10 10 bool "Clock driver for Mediatek MT8135" 11 + depends on ARCH_MEDIATEK || COMPILE_TEST 11 12 select COMMON_CLK_MEDIATEK 12 13 default ARCH_MEDIATEK 13 14 ---help--- ··· 16 15 17 16 config COMMON_CLK_MT8173 18 17 bool "Clock driver for Mediatek MT8173" 18 + depends on ARCH_MEDIATEK || COMPILE_TEST 19 19 select COMMON_CLK_MEDIATEK 20 20 default ARCH_MEDIATEK 21 21 ---help---
+6 -5
drivers/clk/mvebu/armada-37xx-periph.c
··· 305 305 }; 306 306 static int armada_3700_add_composite_clk(const struct clk_periph_data *data, 307 307 void __iomem *reg, spinlock_t *lock, 308 - struct device *dev, struct clk_hw *hw) 308 + struct device *dev, struct clk_hw **hw) 309 309 { 310 310 const struct clk_ops *mux_ops = NULL, *gate_ops = NULL, 311 311 *rate_ops = NULL; ··· 329 329 gate->lock = lock; 330 330 gate_ops = gate_hw->init->ops; 331 331 gate->reg = reg + (u64)gate->reg; 332 + gate->flags = CLK_GATE_SET_TO_DISABLE; 332 333 } 333 334 334 335 if (data->rate_hw) { ··· 354 353 } 355 354 } 356 355 357 - hw = clk_hw_register_composite(dev, data->name, data->parent_names, 356 + *hw = clk_hw_register_composite(dev, data->name, data->parent_names, 358 357 data->num_parents, mux_hw, 359 358 mux_ops, rate_hw, rate_ops, 360 359 gate_hw, gate_ops, CLK_IGNORE_UNUSED); 361 360 362 - if (IS_ERR(hw)) 363 - return PTR_ERR(hw); 361 + if (IS_ERR(*hw)) 362 + return PTR_ERR(*hw); 364 363 365 364 return 0; 366 365 } ··· 401 400 spin_lock_init(&driver_data->lock); 402 401 403 402 for (i = 0; i < num_periph; i++) { 404 - struct clk_hw *hw = driver_data->hw_data->hws[i]; 403 + struct clk_hw **hw = &driver_data->hw_data->hws[i]; 405 404 406 405 if (armada_3700_add_composite_clk(&data[i], reg, 407 406 &driver_data->lock, dev, hw))
+1
drivers/clk/samsung/clk-exynos-audss.c
··· 106 106 }, 107 107 { }, 108 108 }; 109 + MODULE_DEVICE_TABLE(of, exynos_audss_clk_of_match); 109 110 110 111 static void exynos_audss_clk_teardown(void) 111 112 {
+12 -8
drivers/clk/uniphier/clk-uniphier-core.c
··· 79 79 hw_data->num = clk_num; 80 80 81 81 /* avoid returning NULL for unused idx */ 82 - for (; clk_num >= 0; clk_num--) 82 + while (--clk_num >= 0) 83 83 hw_data->hws[clk_num] = ERR_PTR(-EINVAL); 84 84 85 85 for (p = data; p->name; p++) { ··· 111 111 static const struct of_device_id uniphier_clk_match[] = { 112 112 /* System clock */ 113 113 { 114 + .compatible = "socionext,uniphier-sld3-clock", 115 + .data = uniphier_sld3_sys_clk_data, 116 + }, 117 + { 114 118 .compatible = "socionext,uniphier-ld4-clock", 115 119 .data = uniphier_ld4_sys_clk_data, 116 120 }, ··· 142 138 .compatible = "socionext,uniphier-ld20-clock", 143 139 .data = uniphier_ld20_sys_clk_data, 144 140 }, 145 - /* Media I/O clock */ 141 + /* Media I/O clock, SD clock */ 146 142 { 147 143 .compatible = "socionext,uniphier-sld3-mio-clock", 148 144 .data = uniphier_sld3_mio_clk_data, ··· 160 156 .data = uniphier_sld3_mio_clk_data, 161 157 }, 162 158 { 163 - .compatible = "socionext,uniphier-pro5-mio-clock", 164 - .data = uniphier_pro5_mio_clk_data, 159 + .compatible = "socionext,uniphier-pro5-sd-clock", 160 + .data = uniphier_pro5_sd_clk_data, 165 161 }, 166 162 { 167 - .compatible = "socionext,uniphier-pxs2-mio-clock", 168 - .data = uniphier_pro5_mio_clk_data, 163 + .compatible = "socionext,uniphier-pxs2-sd-clock", 164 + .data = uniphier_pro5_sd_clk_data, 169 165 }, 170 166 { 171 167 .compatible = "socionext,uniphier-ld11-mio-clock", 172 168 .data = uniphier_sld3_mio_clk_data, 173 169 }, 174 170 { 175 - .compatible = "socionext,uniphier-ld20-mio-clock", 176 - .data = uniphier_pro5_mio_clk_data, 171 + .compatible = "socionext,uniphier-ld20-sd-clock", 172 + .data = uniphier_pro5_sd_clk_data, 177 173 }, 178 174 /* Peripheral clock */ 179 175 {
+1 -1
drivers/clk/uniphier/clk-uniphier-mio.c
··· 93 93 { /* sentinel */ } 94 94 }; 95 95 96 - const struct uniphier_clk_data uniphier_pro5_mio_clk_data[] = { 96 + const struct uniphier_clk_data uniphier_pro5_sd_clk_data[] = { 97 97 UNIPHIER_MIO_CLK_SD_FIXED, 98 98 UNIPHIER_MIO_CLK_SD(0, 0), 99 99 UNIPHIER_MIO_CLK_SD(1, 1),
+1 -1
drivers/clk/uniphier/clk-uniphier-mux.c
··· 42 42 struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw); 43 43 int num_parents = clk_hw_get_num_parents(hw); 44 44 int ret; 45 - u32 val; 45 + unsigned int val; 46 46 u8 i; 47 47 48 48 ret = regmap_read(mux->regmap, mux->reg, &val);
+1 -1
drivers/clk/uniphier/clk-uniphier.h
··· 115 115 extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[]; 116 116 extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[]; 117 117 extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[]; 118 - extern const struct uniphier_clk_data uniphier_pro5_mio_clk_data[]; 118 + extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[]; 119 119 extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[]; 120 120 extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[]; 121 121
+32 -6
drivers/cpufreq/intel_pstate.c
··· 179 179 /** 180 180 * struct cpudata - Per CPU instance data storage 181 181 * @cpu: CPU number for this instance data 182 + * @policy: CPUFreq policy value 182 183 * @update_util: CPUFreq utility callback information 183 184 * @update_util_set: CPUFreq utility callback is set 184 185 * @iowait_boost: iowait-related boost fraction ··· 202 201 struct cpudata { 203 202 int cpu; 204 203 204 + unsigned int policy; 205 205 struct update_util_data update_util; 206 206 bool update_util_set; 207 207 ··· 1144 1142 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 1145 1143 } 1146 1144 1147 - static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1145 + static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1148 1146 { 1149 - int pstate = cpu->pstate.min_pstate; 1150 - 1151 1147 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1152 1148 cpu->pstate.current_pstate = pstate; 1153 1149 /* ··· 1155 1155 */ 1156 1156 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1157 1157 pstate_funcs.get_val(cpu, pstate)); 1158 + } 1159 + 1160 + static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1161 + { 1162 + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1163 + } 1164 + 1165 + static void intel_pstate_max_within_limits(struct cpudata *cpu) 1166 + { 1167 + int min_pstate, max_pstate; 1168 + 1169 + update_turbo_state(); 1170 + intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); 1171 + intel_pstate_set_pstate(cpu, max_pstate); 1158 1172 } 1159 1173 1160 1174 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) ··· 1339 1325 1340 1326 from = cpu->pstate.current_pstate; 1341 1327 1342 - target_pstate = pstate_funcs.get_target_pstate(cpu); 1328 + target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ? 1329 + cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu); 1343 1330 1344 1331 intel_pstate_update_pstate(cpu, target_pstate); 1345 1332 ··· 1506 1491 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 1507 1492 policy->cpuinfo.max_freq, policy->max); 1508 1493 1509 - cpu = all_cpu_data[0]; 1494 + cpu = all_cpu_data[policy->cpu]; 1495 + cpu->policy = policy->policy; 1496 + 1510 1497 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 1511 1498 policy->max < policy->cpuinfo.max_freq && 1512 1499 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { ··· 1516 1499 policy->max = policy->cpuinfo.max_freq; 1517 1500 } 1518 1501 1519 - if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1502 + if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 1520 1503 limits = &performance_limits; 1521 1504 if (policy->max >= policy->cpuinfo.max_freq) { 1522 1505 pr_debug("set performance\n"); ··· 1552 1535 limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1553 1536 1554 1537 out: 1538 + if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 1539 + /* 1540 + * NOHZ_FULL CPUs need this as the governor callback may not 1541 + * be invoked on them. 1542 + */ 1543 + intel_pstate_clear_update_util_hook(policy->cpu); 1544 + intel_pstate_max_within_limits(cpu); 1545 + } 1546 + 1555 1547 intel_pstate_set_update_util_hook(policy->cpu); 1556 1548 1557 1549 intel_pstate_hwp_set_policy(policy);
+1 -1
drivers/dax/Kconfig
··· 14 14 15 15 config DEV_DAX_PMEM 16 16 tristate "PMEM DAX: direct access to persistent memory" 17 - depends on NVDIMM_DAX 17 + depends on LIBNVDIMM && NVDIMM_DAX 18 18 default DEV_DAX 19 19 help 20 20 Support raw access to persistent memory. Note that this
+1 -1
drivers/dax/pmem.c
··· 44 44 45 45 dev_dbg(dax_pmem->dev, "%s\n", __func__); 46 46 percpu_ref_exit(ref); 47 - wait_for_completion(&dax_pmem->cmp); 48 47 } 49 48 50 49 static void dax_pmem_percpu_kill(void *data) ··· 53 54 54 55 dev_dbg(dax_pmem->dev, "%s\n", __func__); 55 56 percpu_ref_kill(ref); 57 + wait_for_completion(&dax_pmem->cmp); 56 58 } 57 59 58 60 static int dax_pmem_probe(struct device *dev)
+1 -1
drivers/extcon/extcon-qcom-spmi-misc.c
··· 51 51 if (ret) 52 52 return; 53 53 54 - extcon_set_state(info->edev, EXTCON_USB_HOST, !id); 54 + extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !id); 55 55 } 56 56 57 57 static irqreturn_t qcom_usb_irq_handler(int irq, void *dev_id)
+1 -1
drivers/gpio/Kconfig
··· 284 284 285 285 config GPIO_MOCKUP 286 286 tristate "GPIO Testing Driver" 287 - depends on GPIOLIB 287 + depends on GPIOLIB && SYSFS 288 288 select GPIO_SYSFS 289 289 help 290 290 This enables GPIO Testing driver, which provides a way to test GPIO
+1
drivers/gpio/gpio-ath79.c
··· 219 219 { .compatible = "qca,ar9340-gpio" }, 220 220 {}, 221 221 }; 222 + MODULE_DEVICE_TABLE(of, ath79_gpio_of_match); 222 223 223 224 static int ath79_gpio_probe(struct platform_device *pdev) 224 225 {
+1 -1
drivers/gpio/gpio-mpc8xxx.c
··· 239 239 irq_hw_number_t hwirq) 240 240 { 241 241 irq_set_chip_data(irq, h->host_data); 242 - irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq); 242 + irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq); 243 243 244 244 return 0; 245 245 }
+6 -2
drivers/gpio/gpio-mxs.c
··· 308 308 writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR); 309 309 310 310 irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id()); 311 - if (irq_base < 0) 312 - return irq_base; 311 + if (irq_base < 0) { 312 + err = irq_base; 313 + goto out_iounmap; 314 + } 313 315 314 316 port->domain = irq_domain_add_legacy(np, 32, irq_base, 0, 315 317 &irq_domain_simple_ops, NULL); ··· 351 349 irq_domain_remove(port->domain); 352 350 out_irqdesc_free: 353 351 irq_free_descs(irq_base, 32); 352 + out_iounmap: 353 + iounmap(port->base); 354 354 return err; 355 355 } 356 356
+1 -1
drivers/gpio/gpio-stmpe.c
··· 409 409 * 801/1801/1600, bits are cleared when read. 410 410 * Edge detect register is not present on 801/1600/1801 411 411 */ 412 - if (stmpe->partnum != STMPE801 || stmpe->partnum != STMPE1600 || 412 + if (stmpe->partnum != STMPE801 && stmpe->partnum != STMPE1600 && 413 413 stmpe->partnum != STMPE1801) { 414 414 stmpe_reg_write(stmpe, statmsbreg + i, status[i]); 415 415 stmpe_reg_write(stmpe,
+1
drivers/gpio/gpio-ts4800.c
··· 66 66 { .compatible = "technologic,ts4800-gpio", }, 67 67 {}, 68 68 }; 69 + MODULE_DEVICE_TABLE(of, ts4800_gpio_of_match); 69 70 70 71 static struct platform_driver ts4800_gpio_driver = { 71 72 .driver = {
+5 -2
drivers/gpio/gpiolib-acpi.c
··· 653 653 { 654 654 int idx, i; 655 655 unsigned int irq_flags; 656 + int ret = -ENOENT; 656 657 657 658 for (i = 0, idx = 0; idx <= index; i++) { 658 659 struct acpi_gpio_info info; 659 660 struct gpio_desc *desc; 660 661 661 662 desc = acpi_get_gpiod_by_index(adev, NULL, i, &info); 662 - if (IS_ERR(desc)) 663 + if (IS_ERR(desc)) { 664 + ret = PTR_ERR(desc); 663 665 break; 666 + } 664 667 if (info.gpioint && idx++ == index) { 665 668 int irq = gpiod_to_irq(desc); 666 669 ··· 682 679 } 683 680 684 681 } 685 - return -ENOENT; 682 + return ret; 686 683 } 687 684 EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get); 688 685
+41 -1
drivers/gpio/gpiolib.c
··· 333 333 u32 numdescs; 334 334 }; 335 335 336 + #define GPIOHANDLE_REQUEST_VALID_FLAGS \ 337 + (GPIOHANDLE_REQUEST_INPUT | \ 338 + GPIOHANDLE_REQUEST_OUTPUT | \ 339 + GPIOHANDLE_REQUEST_ACTIVE_LOW | \ 340 + GPIOHANDLE_REQUEST_OPEN_DRAIN | \ 341 + GPIOHANDLE_REQUEST_OPEN_SOURCE) 342 + 336 343 static long linehandle_ioctl(struct file *filep, unsigned int cmd, 337 344 unsigned long arg) 338 345 { ··· 350 343 351 344 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { 352 345 int val; 346 + 347 + memset(&ghd, 0, sizeof(ghd)); 353 348 354 349 /* TODO: check if descriptors are really input */ 355 350 for (i = 0; i < lh->numdescs; i++) { ··· 453 444 u32 lflags = handlereq.flags; 454 445 struct gpio_desc *desc; 455 446 447 + if (offset >= gdev->ngpio) { 448 + ret = -EINVAL; 449 + goto out_free_descs; 450 + } 451 + 452 + /* Return an error if a unknown flag is set */ 453 + if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) { 454 + ret = -EINVAL; 455 + goto out_free_descs; 456 + } 457 + 456 458 desc = &gdev->descs[offset]; 457 459 ret = gpiod_request(desc, lh->label); 458 460 if (ret) ··· 556 536 struct mutex read_lock; 557 537 }; 558 538 539 + #define GPIOEVENT_REQUEST_VALID_FLAGS \ 540 + (GPIOEVENT_REQUEST_RISING_EDGE | \ 541 + GPIOEVENT_REQUEST_FALLING_EDGE) 542 + 559 543 static unsigned int lineevent_poll(struct file *filep, 560 544 struct poll_table_struct *wait) 561 545 { ··· 646 622 */ 647 623 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { 648 624 int val; 625 + 626 + memset(&ghd, 0, sizeof(ghd)); 649 627 650 628 val = gpiod_get_value_cansleep(le->desc); 651 629 if (val < 0) ··· 752 726 lflags = eventreq.handleflags; 753 727 eflags = eventreq.eventflags; 754 728 729 + if (offset >= gdev->ngpio) { 730 + ret = -EINVAL; 731 + goto out_free_label; 732 + } 733 + 734 + /* Return an error if a unknown flag is set */ 735 + if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) || 736 + (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) { 737 + ret = -EINVAL; 738 + goto out_free_label; 739 + } 740 + 755 741 /* This is just wrong: we don't look for events on output lines */ 756 742 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 757 743 ret = -EINVAL; ··· 861 823 if (cmd == GPIO_GET_CHIPINFO_IOCTL) { 862 824 struct gpiochip_info chipinfo; 863 825 826 + memset(&chipinfo, 0, sizeof(chipinfo)); 827 + 864 828 strncpy(chipinfo.name, dev_name(&gdev->dev), 865 829 sizeof(chipinfo.name)); 866 830 chipinfo.name[sizeof(chipinfo.name)-1] = '\0'; ··· 879 839 880 840 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 881 841 return -EFAULT; 882 - if (lineinfo.line_offset > gdev->ngpio) 842 + if (lineinfo.line_offset >= gdev->ngpio) 883 843 return -EINVAL; 884 844 885 845 desc = &gdev->descs[lineinfo.line_offset];
+5
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 754 754 755 755 int amdgpu_bo_init(struct amdgpu_device *adev) 756 756 { 757 + /* reserve PAT memory space to WC for VRAM */ 758 + arch_io_reserve_memtype_wc(adev->mc.aper_base, 759 + adev->mc.aper_size); 760 + 757 761 /* Add an MTRR for the VRAM */ 758 762 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, 759 763 adev->mc.aper_size); ··· 773 769 { 774 770 amdgpu_ttm_fini(adev); 775 771 arch_phys_wc_del(adev->mc.vram_mtrr); 772 + arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size); 776 773 } 777 774 778 775 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
+6
drivers/gpu/drm/ast/ast_ttm.c
··· 267 267 return ret; 268 268 } 269 269 270 + arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0), 271 + pci_resource_len(dev->pdev, 0)); 270 272 ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), 271 273 pci_resource_len(dev->pdev, 0)); 272 274 ··· 277 275 278 276 void ast_mm_fini(struct ast_private *ast) 279 277 { 278 + struct drm_device *dev = ast->dev; 279 + 280 280 ttm_bo_device_release(&ast->ttm.bdev); 281 281 282 282 ast_ttm_global_release(ast); 283 283 284 284 arch_phys_wc_del(ast->fb_mtrr); 285 + arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), 286 + pci_resource_len(dev->pdev, 0)); 285 287 } 286 288 287 289 void ast_ttm_placement(struct ast_bo *bo, int domain)
+7
drivers/gpu/drm/cirrus/cirrus_ttm.c
··· 267 267 return ret; 268 268 } 269 269 270 + arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0), 271 + pci_resource_len(dev->pdev, 0)); 272 + 270 273 cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), 271 274 pci_resource_len(dev->pdev, 0)); 272 275 ··· 279 276 280 277 void cirrus_mm_fini(struct cirrus_device *cirrus) 281 278 { 279 + struct drm_device *dev = cirrus->dev; 280 + 282 281 if (!cirrus->mm_inited) 283 282 return; 284 283 ··· 290 285 291 286 arch_phys_wc_del(cirrus->fb_mtrr); 292 287 cirrus->fb_mtrr = 0; 288 + arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), 289 + pci_resource_len(dev->pdev, 0)); 293 290 } 294 291 295 292 void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
+7
drivers/gpu/drm/mgag200/mgag200_ttm.c
··· 266 266 return ret; 267 267 } 268 268 269 + arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0), 270 + pci_resource_len(dev->pdev, 0)); 271 + 269 272 mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), 270 273 pci_resource_len(dev->pdev, 0)); 271 274 ··· 277 274 278 275 void mgag200_mm_fini(struct mga_device *mdev) 279 276 { 277 + struct drm_device *dev = mdev->dev; 278 + 280 279 ttm_bo_device_release(&mdev->ttm.bdev); 281 280 282 281 mgag200_ttm_global_release(mdev); 283 282 283 + arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), 284 + pci_resource_len(dev->pdev, 0)); 284 285 arch_phys_wc_del(mdev->fb_mtrr); 285 286 mdev->fb_mtrr = 0; 286 287 }
+8
drivers/gpu/drm/nouveau/nouveau_ttm.c
··· 398 398 /* VRAM init */ 399 399 drm->gem.vram_available = drm->device.info.ram_user; 400 400 401 + arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1), 402 + device->func->resource_size(device, 1)); 403 + 401 404 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, 402 405 drm->gem.vram_available >> PAGE_SHIFT); 403 406 if (ret) { ··· 433 430 void 434 431 nouveau_ttm_fini(struct nouveau_drm *drm) 435 432 { 433 + struct nvkm_device *device = nvxx_device(&drm->device); 434 + 436 435 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); 437 436 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); 438 437 ··· 444 439 445 440 arch_phys_wc_del(drm->ttm.mtrr); 446 441 drm->ttm.mtrr = 0; 442 + arch_io_free_memtype_wc(device->func->resource_addr(device, 1), 443 + device->func->resource_size(device, 1)); 444 + 447 445 }
+5
drivers/gpu/drm/radeon/radeon_object.c
··· 446 446 447 447 int radeon_bo_init(struct radeon_device *rdev) 448 448 { 449 + /* reserve PAT memory space to WC for VRAM */ 450 + arch_io_reserve_memtype_wc(rdev->mc.aper_base, 451 + rdev->mc.aper_size); 452 + 449 453 /* Add an MTRR for the VRAM */ 450 454 if (!rdev->fastfb_working) { 451 455 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, ··· 467 463 { 468 464 radeon_ttm_fini(rdev); 469 465 arch_phys_wc_del(rdev->mc.vram_mtrr); 466 + arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size); 470 467 } 471 468 472 469 /* Returns how many bytes TTM can move per IB.
+7 -3
drivers/hv/hv_util.c
··· 314 314 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; 315 315 struct icmsg_negotiate *negop = NULL; 316 316 317 - vmbus_recvpacket(channel, hbeat_txf_buf, 318 - PAGE_SIZE, &recvlen, &requestid); 317 + while (1) { 319 318 320 - if (recvlen > 0) { 319 + vmbus_recvpacket(channel, hbeat_txf_buf, 320 + PAGE_SIZE, &recvlen, &requestid); 321 + 322 + if (!recvlen) 323 + break; 324 + 321 325 icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[ 322 326 sizeof(struct vmbuspipe_hdr)]; 323 327
+6 -6
drivers/i2c/busses/Kconfig
··· 79 79 80 80 config I2C_HIX5HD2 81 81 tristate "Hix5hd2 high-speed I2C driver" 82 - depends on ARCH_HIX5HD2 || COMPILE_TEST 82 + depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST 83 83 help 84 - Say Y here to include support for high-speed I2C controller in the 85 - Hisilicon based hix5hd2 SoCs. 84 + Say Y here to include support for the high-speed I2C controller 85 + used in HiSilicon hix5hd2 SoCs. 86 86 87 - This driver can also be built as a module. If so, the module 87 + This driver can also be built as a module. If so, the module 88 88 will be called i2c-hix5hd2. 89 89 90 90 config I2C_I801 ··· 589 589 590 590 config I2C_IMX 591 591 tristate "IMX I2C interface" 592 - depends on ARCH_MXC || ARCH_LAYERSCAPE 592 + depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE 593 593 help 594 594 Say Y here if you want to use the IIC bus controller on 595 - the Freescale i.MX/MXC or Layerscape processors. 595 + the Freescale i.MX/MXC, Layerscape or ColdFire processors. 596 596 597 597 This driver can also be built as a module. If so, the module 598 598 will be called i2c-imx.
+14 -3
drivers/i2c/busses/i2c-designware-core.c
··· 95 95 #define DW_IC_STATUS_TFE BIT(2) 96 96 #define DW_IC_STATUS_MST_ACTIVITY BIT(5) 97 97 98 + #define DW_IC_SDA_HOLD_RX_SHIFT 16 99 + #define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT) 100 + 98 101 #define DW_IC_ERR_TX_ABRT 0x1 99 102 100 103 #define DW_IC_TAR_10BITADDR_MASTER BIT(12) ··· 423 420 /* Configure SDA Hold Time if required */ 424 421 reg = dw_readl(dev, DW_IC_COMP_VERSION); 425 422 if (reg >= DW_IC_SDA_HOLD_MIN_VERS) { 426 - if (dev->sda_hold_time) { 427 - dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD); 428 - } else { 423 + if (!dev->sda_hold_time) { 429 424 /* Keep previous hold time setting if no one set it */ 430 425 dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD); 431 426 } 427 + /* 428 + * Workaround for avoiding TX arbitration lost in case I2C 429 + * slave pulls SDA down "too quickly" after falling egde of 430 + * SCL by enabling non-zero SDA RX hold. Specification says it 431 + * extends incoming SDA low to high transition while SCL is 432 + * high but it apprears to help also above issue. 433 + */ 434 + if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK)) 435 + dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT; 436 + dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD); 432 437 } else { 433 438 dev_warn(dev->dev, 434 439 "Hardware too old to adjust SDA hold time.\n");
+1
drivers/i2c/busses/i2c-digicolor.c
··· 368 368 { .compatible = "cnxt,cx92755-i2c" }, 369 369 { }, 370 370 }; 371 + MODULE_DEVICE_TABLE(of, dc_i2c_match); 371 372 372 373 static struct platform_driver dc_i2c_driver = { 373 374 .probe = dc_i2c_probe,
+13 -3
drivers/i2c/busses/i2c-i801.c
··· 146 146 #define SMBHSTCFG_HST_EN 1 147 147 #define SMBHSTCFG_SMB_SMI_EN 2 148 148 #define SMBHSTCFG_I2C_EN 4 149 + #define SMBHSTCFG_SPD_WD 0x10 149 150 150 151 /* TCO configuration bits for TCOCTL */ 151 152 #define TCOCTL_EN 0x0100 ··· 866 865 block = 1; 867 866 break; 868 867 case I2C_SMBUS_I2C_BLOCK_DATA: 869 - /* NB: page 240 of ICH5 datasheet shows that the R/#W 870 - * bit should be cleared here, even when reading */ 871 - outb_p((addr & 0x7f) << 1, SMBHSTADD(priv)); 868 + /* 869 + * NB: page 240 of ICH5 datasheet shows that the R/#W 870 + * bit should be cleared here, even when reading. 871 + * However if SPD Write Disable is set (Lynx Point and later), 872 + * the read will fail if we don't set the R/#W bit. 873 + */ 874 + outb_p(((addr & 0x7f) << 1) | 875 + ((priv->original_hstcfg & SMBHSTCFG_SPD_WD) ? 876 + (read_write & 0x01) : 0), 877 + SMBHSTADD(priv)); 872 878 if (read_write == I2C_SMBUS_READ) { 873 879 /* NB: page 240 of ICH5 datasheet also shows 874 880 * that DATA1 is the cmd field when reading */ ··· 1581 1573 /* Disable SMBus interrupt feature if SMBus using SMI# */ 1582 1574 priv->features &= ~FEATURE_IRQ; 1583 1575 } 1576 + if (temp & SMBHSTCFG_SPD_WD) 1577 + dev_info(&dev->dev, "SPD Write Disable is set\n"); 1584 1578 1585 1579 /* Clear special mode bits */ 1586 1580 if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
+7 -4
drivers/i2c/busses/i2c-imx.c
··· 1009 1009 rinfo->sda_gpio = of_get_named_gpio(pdev->dev.of_node, "sda-gpios", 0); 1010 1010 rinfo->scl_gpio = of_get_named_gpio(pdev->dev.of_node, "scl-gpios", 0); 1011 1011 1012 - if (!gpio_is_valid(rinfo->sda_gpio) || 1013 - !gpio_is_valid(rinfo->scl_gpio) || 1014 - IS_ERR(i2c_imx->pinctrl_pins_default) || 1015 - IS_ERR(i2c_imx->pinctrl_pins_gpio)) { 1012 + if (rinfo->sda_gpio == -EPROBE_DEFER || 1013 + rinfo->scl_gpio == -EPROBE_DEFER) { 1014 + return -EPROBE_DEFER; 1015 + } else if (!gpio_is_valid(rinfo->sda_gpio) || 1016 + !gpio_is_valid(rinfo->scl_gpio) || 1017 + IS_ERR(i2c_imx->pinctrl_pins_default) || 1018 + IS_ERR(i2c_imx->pinctrl_pins_gpio)) { 1016 1019 dev_dbg(&pdev->dev, "recovery information incomplete\n"); 1017 1020 return 0; 1018 1021 }
+1
drivers/i2c/busses/i2c-jz4780.c
··· 729 729 { .compatible = "ingenic,jz4780-i2c", }, 730 730 { /* sentinel */ } 731 731 }; 732 + MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches); 732 733 733 734 static int jz4780_i2c_probe(struct platform_device *pdev) 734 735 {
+2
drivers/i2c/busses/i2c-rk3x.c
··· 694 694 t_calc->div_low--; 695 695 t_calc->div_high--; 696 696 697 + /* Give the tuning value 0, that would not update con register */ 698 + t_calc->tuning = 0; 697 699 /* Maximum divider supported by hw is 0xffff */ 698 700 if (t_calc->div_low > 0xffff) { 699 701 t_calc->div_low = 0xffff;
+1 -1
drivers/i2c/busses/i2c-xgene-slimpro.c
··· 105 105 struct mbox_chan *mbox_chan; 106 106 struct mbox_client mbox_client; 107 107 struct completion rd_complete; 108 - u8 dma_buffer[I2C_SMBUS_BLOCK_MAX]; 108 + u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */ 109 109 u32 *resp_msg; 110 110 }; 111 111
+1
drivers/i2c/busses/i2c-xlp9xx.c
··· 426 426 { .compatible = "netlogic,xlp980-i2c", }, 427 427 { /* sentinel */ }, 428 428 }; 429 + MODULE_DEVICE_TABLE(of, xlp9xx_i2c_of_match); 429 430 430 431 #ifdef CONFIG_ACPI 431 432 static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = {
+1
drivers/i2c/busses/i2c-xlr.c
··· 358 358 }, 359 359 { } 360 360 }; 361 + MODULE_DEVICE_TABLE(of, xlr_i2c_dt_ids); 361 362 362 363 static int xlr_i2c_probe(struct platform_device *pdev) 363 364 {
+10 -1
drivers/i2c/i2c-core.c
··· 1681 1681 static void of_i2c_register_devices(struct i2c_adapter *adap) 1682 1682 { 1683 1683 struct device_node *bus, *node; 1684 + struct i2c_client *client; 1684 1685 1685 1686 /* Only register child devices if the adapter has a node pointer set */ 1686 1687 if (!adap->dev.of_node) ··· 1696 1695 for_each_available_child_of_node(bus, node) { 1697 1696 if (of_node_test_and_set_flag(node, OF_POPULATED)) 1698 1697 continue; 1699 - of_i2c_register_device(adap, node); 1698 + 1699 + client = of_i2c_register_device(adap, node); 1700 + if (IS_ERR(client)) { 1701 + dev_warn(&adap->dev, 1702 + "Failed to create I2C device for %s\n", 1703 + node->full_name); 1704 + of_node_clear_flag(node, OF_POPULATED); 1705 + } 1700 1706 } 1701 1707 1702 1708 of_node_put(bus); ··· 2307 2299 if (IS_ERR(client)) { 2308 2300 dev_err(&adap->dev, "failed to create client for '%s'\n", 2309 2301 rd->dn->full_name); 2302 + of_node_clear_flag(rd->dn, OF_POPULATED); 2310 2303 return notifier_from_errno(PTR_ERR(client)); 2311 2304 } 2312 2305 break;
+2
drivers/iio/adc/Kconfig
··· 437 437 config TI_ADC081C 438 438 tristate "Texas Instruments ADC081C/ADC101C/ADC121C family" 439 439 depends on I2C 440 + select IIO_BUFFER 441 + select IIO_TRIGGERED_BUFFER 440 442 help 441 443 If you say yes here you get support for Texas Instruments ADC081C, 442 444 ADC101C and ADC121C ADC chips.
+4 -3
drivers/iio/chemical/atlas-ph-sensor.c
··· 213 213 struct device *dev = &data->client->dev; 214 214 int ret; 215 215 unsigned int val; 216 + __be16 rval; 216 217 217 - ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2); 218 + ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2); 218 219 if (ret) 219 220 return ret; 220 221 221 - dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100, 222 - be16_to_cpu(val) % 100); 222 + val = be16_to_cpu(rval); 223 + dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100); 223 224 224 225 ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val); 225 226 if (ret)
+9 -7
drivers/iio/temperature/maxim_thermocouple.c
··· 123 123 { 124 124 unsigned int storage_bytes = data->chip->read_size; 125 125 unsigned int shift = chan->scan_type.shift + (chan->address * 8); 126 - unsigned int buf; 126 + __be16 buf16; 127 + __be32 buf32; 127 128 int ret; 128 - 129 - ret = spi_read(data->spi, (void *) &buf, storage_bytes); 130 - if (ret) 131 - return ret; 132 129 133 130 switch (storage_bytes) { 134 131 case 2: 135 - *val = be16_to_cpu(buf); 132 + ret = spi_read(data->spi, (void *)&buf16, storage_bytes); 133 + *val = be16_to_cpu(buf16); 136 134 break; 137 135 case 4: 138 - *val = be32_to_cpu(buf); 136 + ret = spi_read(data->spi, (void *)&buf32, storage_bytes); 137 + *val = be32_to_cpu(buf32); 139 138 break; 140 139 } 140 + 141 + if (ret) 142 + return ret; 141 143 142 144 /* check to be sure this is a valid reading */ 143 145 if (*val & data->chip->status_bit)
+1 -1
drivers/ipack/ipack.c
··· 178 178 idev->id_vendor, idev->id_device); 179 179 } 180 180 181 - ipack_device_attr(id_format, "0x%hhu\n"); 181 + ipack_device_attr(id_format, "0x%hhx\n"); 182 182 183 183 static DEVICE_ATTR_RO(id); 184 184 static DEVICE_ATTR_RO(id_device);
+9 -6
drivers/md/dm-raid.c
··· 266 266 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET}, 267 267 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR}, 268 268 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT}, 269 - {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */ 269 + {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */ 270 270 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N}, 271 271 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, 272 272 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, ··· 2087 2087 /* 2088 2088 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata 2089 2089 */ 2090 - if (le32_to_cpu(sb->level) != mddev->level) { 2090 + if (le32_to_cpu(sb->level) != mddev->new_level) { 2091 2091 DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)"); 2092 2092 return -EINVAL; 2093 2093 } 2094 - if (le32_to_cpu(sb->layout) != mddev->layout) { 2094 + if (le32_to_cpu(sb->layout) != mddev->new_layout) { 2095 2095 DMERR("Reshaping raid sets not yet supported. (raid layout change)"); 2096 2096 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout); 2097 2097 DMERR(" Old layout: %s w/ %d copies", ··· 2102 2102 raid10_md_layout_to_copies(mddev->layout)); 2103 2103 return -EINVAL; 2104 2104 } 2105 - if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) { 2105 + if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) { 2106 2106 DMERR("Reshaping raid sets not yet supported. (stripe sectors change)"); 2107 2107 return -EINVAL; 2108 2108 } ··· 2114 2114 sb->num_devices, mddev->raid_disks); 2115 2115 return -EINVAL; 2116 2116 } 2117 + 2118 + DMINFO("Discovered old metadata format; upgrading to extended metadata format"); 2117 2119 2118 2120 /* Table line is checked vs. authoritative superblock */ 2119 2121 rs_set_new(rs); ··· 2260 2258 if (!mddev->events && super_init_validation(rs, rdev)) 2261 2259 return -EINVAL; 2262 2260 2263 - if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) { 2261 + if (le32_to_cpu(sb->compat_features) && 2262 + le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) { 2264 2263 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags"; 2265 2264 return -EINVAL; 2266 2265 } ··· 3649 3646 3650 3647 static struct target_type raid_target = { 3651 3648 .name = "raid", 3652 - .version = {1, 9, 0}, 3649 + .version = {1, 9, 1}, 3653 3650 .module = THIS_MODULE, 3654 3651 .ctr = raid_ctr, 3655 3652 .dtr = raid_dtr,
+3 -19
drivers/md/dm-raid1.c
··· 145 145 146 146 struct dm_raid1_bio_record { 147 147 struct mirror *m; 148 - /* if details->bi_bdev == NULL, details were not saved */ 149 148 struct dm_bio_details details; 150 149 region_t write_region; 151 150 }; ··· 1199 1200 struct dm_raid1_bio_record *bio_record = 1200 1201 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1201 1202 1202 - bio_record->details.bi_bdev = NULL; 1203 - 1204 1203 if (rw == WRITE) { 1205 1204 /* Save region for mirror_end_io() handler */ 1206 1205 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); ··· 1257 1260 } 1258 1261 1259 1262 if (error == -EOPNOTSUPP) 1260 - goto out; 1263 + return error; 1261 1264 1262 1265 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) 1263 - goto out; 1266 + return error; 1264 1267 1265 1268 if (unlikely(error)) { 1266 - if (!bio_record->details.bi_bdev) { 1267 - /* 1268 - * There wasn't enough memory to record necessary 1269 - * information for a retry or there was no other 1270 - * mirror in-sync. 1271 - */ 1272 - DMERR_LIMIT("Mirror read failed."); 1273 - return -EIO; 1274 - } 1275 - 1276 1269 m = bio_record->m; 1277 1270 1278 1271 DMERR("Mirror read failed from %s. Trying alternative device.", ··· 1278 1291 bd = &bio_record->details; 1279 1292 1280 1293 dm_bio_restore(bd, bio); 1281 - bio_record->details.bi_bdev = NULL; 1294 + bio->bi_error = 0; 1282 1295 1283 1296 queue_bio(ms, bio, rw); 1284 1297 return DM_ENDIO_INCOMPLETE; 1285 1298 } 1286 1299 DMERR("All replicated volumes dead, failing I/O"); 1287 1300 } 1288 - 1289 - out: 1290 - bio_record->details.bi_bdev = NULL; 1291 1301 1292 1302 return error; 1293 1303 }
+5 -2
drivers/md/dm-rq.c
··· 856 856 kthread_init_worker(&md->kworker); 857 857 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 858 858 "kdmwork-%s", dm_device_name(md)); 859 - if (IS_ERR(md->kworker_task)) 860 - return PTR_ERR(md->kworker_task); 859 + if (IS_ERR(md->kworker_task)) { 860 + int error = PTR_ERR(md->kworker_task); 861 + md->kworker_task = NULL; 862 + return error; 863 + } 861 864 862 865 elv_register_queue(md->queue); 863 866
+9 -15
drivers/md/dm-table.c
··· 695 695 696 696 tgt->type = dm_get_target_type(type); 697 697 if (!tgt->type) { 698 - DMERR("%s: %s: unknown target type", dm_device_name(t->md), 699 - type); 698 + DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); 700 699 return -EINVAL; 701 700 } 702 701 703 702 if (dm_target_needs_singleton(tgt->type)) { 704 703 if (t->num_targets) { 705 - DMERR("%s: target type %s must appear alone in table", 706 - dm_device_name(t->md), type); 707 - return -EINVAL; 704 + tgt->error = "singleton target type must appear alone in table"; 705 + goto bad; 708 706 } 709 707 t->singleton = true; 710 708 } 711 709 712 710 if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { 713 - DMERR("%s: target type %s may not be included in read-only tables", 714 - dm_device_name(t->md), type); 715 - return -EINVAL; 711 + tgt->error = "target type may not be included in a read-only table"; 712 + goto bad; 716 713 } 717 714 718 715 if (t->immutable_target_type) { 719 716 if (t->immutable_target_type != tgt->type) { 720 - DMERR("%s: immutable target type %s cannot be mixed with other target types", 721 - dm_device_name(t->md), t->immutable_target_type->name); 722 - return -EINVAL; 717 + tgt->error = "immutable target type cannot be mixed with other target types"; 718 + goto bad; 723 719 } 724 720 } else if (dm_target_is_immutable(tgt->type)) { 725 721 if (t->num_targets) { 726 - DMERR("%s: immutable target type %s cannot be mixed with other target types", 727 - dm_device_name(t->md), tgt->type->name); 728 - return -EINVAL; 722 + tgt->error = "immutable target type cannot be mixed with other target types"; 723 + goto bad; 729 724 } 730 725 t->immutable_target_type = tgt->type; 731 726 } ··· 735 740 */ 736 741 if (!adjoin(t, tgt)) { 737 742 tgt->error = "Gap in table"; 738 - r = -EINVAL; 739 743 goto bad; 740 744 } 741 745
+2 -2
drivers/md/dm.c
··· 1423 1423 if (md->bs) 1424 1424 bioset_free(md->bs); 1425 1425 1426 - cleanup_srcu_struct(&md->io_barrier); 1427 - 1428 1426 if (md->disk) { 1429 1427 spin_lock(&_minor_lock); 1430 1428 md->disk->private_data = NULL; ··· 1433 1435 1434 1436 if (md->queue) 1435 1437 blk_cleanup_queue(md->queue); 1438 + 1439 + cleanup_srcu_struct(&md->io_barrier); 1436 1440 1437 1441 if (md->bdev) { 1438 1442 bdput(md->bdev);
+2
drivers/misc/cxl/api.c
··· 247 247 cxl_ctx_get(); 248 248 249 249 if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { 250 + put_pid(ctx->glpid); 250 251 put_pid(ctx->pid); 252 + ctx->glpid = ctx->pid = NULL; 251 253 cxl_adapter_context_put(ctx->afu->adapter); 252 254 cxl_ctx_put(); 253 255 goto out;
+13 -9
drivers/misc/cxl/file.c
··· 194 194 ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF); 195 195 196 196 /* 197 + * Increment the mapped context count for adapter. This also checks 198 + * if adapter_context_lock is taken. 199 + */ 200 + rc = cxl_adapter_context_get(ctx->afu->adapter); 201 + if (rc) { 202 + afu_release_irqs(ctx, ctx); 203 + goto out; 204 + } 205 + 206 + /* 197 207 * We grab the PID here and not in the file open to allow for the case 198 208 * where a process (master, some daemon, etc) has opened the chardev on 199 209 * behalf of another process, so the AFU's mm gets bound to the process ··· 215 205 ctx->pid = get_task_pid(current, PIDTYPE_PID); 216 206 ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID); 217 207 218 - /* 219 - * Increment the mapped context count for adapter. This also checks 220 - * if adapter_context_lock is taken. 221 - */ 222 - rc = cxl_adapter_context_get(ctx->afu->adapter); 223 - if (rc) { 224 - afu_release_irqs(ctx, ctx); 225 - goto out; 226 - } 227 208 228 209 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); 229 210 ··· 222 221 amr))) { 223 222 afu_release_irqs(ctx, ctx); 224 223 cxl_adapter_context_put(ctx->afu->adapter); 224 + put_pid(ctx->glpid); 225 + put_pid(ctx->pid); 226 + ctx->glpid = ctx->pid = NULL; 225 227 goto out; 226 228 } 227 229
+11 -1
drivers/misc/genwqe/card_utils.c
··· 352 352 if (copy_from_user(sgl->lpage, user_addr + user_size - 353 353 sgl->lpage_size, sgl->lpage_size)) { 354 354 rc = -EFAULT; 355 - goto err_out1; 355 + goto err_out2; 356 356 } 357 357 } 358 358 return 0; 359 359 360 + err_out2: 361 + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage, 362 + sgl->lpage_dma_addr); 363 + sgl->lpage = NULL; 364 + sgl->lpage_dma_addr = 0; 360 365 err_out1: 361 366 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage, 362 367 sgl->fpage_dma_addr); 368 + sgl->fpage = NULL; 369 + sgl->fpage_dma_addr = 0; 363 370 err_out: 364 371 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl, 365 372 sgl->sgl_dma_addr); 373 + sgl->sgl = NULL; 374 + sgl->sgl_dma_addr = 0; 375 + sgl->sgl_size = 0; 366 376 return -ENOMEM; 367 377 } 368 378
+4 -2
drivers/misc/mei/hw-txe.c
··· 981 981 hisr = mei_txe_br_reg_read(hw, HISR_REG); 982 982 983 983 aliveness = mei_txe_aliveness_get(dev); 984 - if (hhisr & IPC_HHIER_SEC && aliveness) 984 + if (hhisr & IPC_HHIER_SEC && aliveness) { 985 985 ipc_isr = mei_txe_sec_reg_read_silent(hw, 986 986 SEC_IPC_HOST_INT_STATUS_REG); 987 - else 987 + } else { 988 988 ipc_isr = 0; 989 + hhisr &= ~IPC_HHIER_SEC; 990 + } 989 991 990 992 generated = generated || 991 993 (hisr & HISR_INT_STS_MSK) ||
+1 -1
drivers/misc/sgi-gru/grumain.c
··· 283 283 spin_lock(&gru->gs_asid_lock); 284 284 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); 285 285 asids->mt_ctxbitmap ^= ctxbitmap; 286 - gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", 286 + gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n", 287 287 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); 288 288 spin_unlock(&gru->gs_asid_lock); 289 289 spin_unlock(&gms->ms_asid_lock);
+7 -1
drivers/misc/vmw_vmci/vmci_doorbell.c
··· 431 431 if (vmci_handle_is_invalid(*handle)) { 432 432 u32 context_id = vmci_get_context_id(); 433 433 434 + if (context_id == VMCI_INVALID_ID) { 435 + pr_warn("Failed to get context ID\n"); 436 + result = VMCI_ERROR_NO_RESOURCES; 437 + goto free_mem; 438 + } 439 + 434 440 /* Let resource code allocate a free ID for us */ 435 441 new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID); 436 442 } else { ··· 531 525 532 526 entry = container_of(resource, struct dbell_entry, resource); 533 527 534 - if (vmci_guest_code_active()) { 528 + if (!hlist_unhashed(&entry->node)) { 535 529 int result; 536 530 537 531 dbell_index_table_remove(entry);
+1 -1
drivers/misc/vmw_vmci/vmci_driver.c
··· 113 113 114 114 MODULE_AUTHOR("VMware, Inc."); 115 115 MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); 116 - MODULE_VERSION("1.1.4.0-k"); 116 + MODULE_VERSION("1.1.5.0-k"); 117 117 MODULE_LICENSE("GPL v2");
+5 -5
drivers/mtd/ubi/fastmap.c
··· 707 707 fmvhdr->vol_type, 708 708 be32_to_cpu(fmvhdr->last_eb_bytes)); 709 709 710 - if (!av) 711 - goto fail_bad; 712 - if (PTR_ERR(av) == -EINVAL) { 713 - ubi_err(ubi, "volume (ID %i) already exists", 714 - fmvhdr->vol_id); 710 + if (IS_ERR(av)) { 711 + if (PTR_ERR(av) == -EEXIST) 712 + ubi_err(ubi, "volume (ID %i) already exists", 713 + fmvhdr->vol_id); 714 + 715 715 goto fail_bad; 716 716 } 717 717
+1 -1
drivers/nvdimm/Kconfig
··· 89 89 Select Y if unsure 90 90 91 91 config NVDIMM_DAX 92 - tristate "NVDIMM DAX: Raw access to persistent memory" 92 + bool "NVDIMM DAX: Raw access to persistent memory" 93 93 default LIBNVDIMM 94 94 depends on NVDIMM_PFN 95 95 help
+8 -6
drivers/nvdimm/namespace_devs.c
··· 2176 2176 return devs; 2177 2177 2178 2178 err: 2179 - for (i = 0; devs[i]; i++) 2180 - if (is_nd_blk(&nd_region->dev)) 2181 - namespace_blk_release(devs[i]); 2182 - else 2183 - namespace_pmem_release(devs[i]); 2184 - kfree(devs); 2179 + if (devs) { 2180 + for (i = 0; devs[i]; i++) 2181 + if (is_nd_blk(&nd_region->dev)) 2182 + namespace_blk_release(devs[i]); 2183 + else 2184 + namespace_pmem_release(devs[i]); 2185 + kfree(devs); 2186 + } 2185 2187 return NULL; 2186 2188 } 2187 2189
+6 -2
drivers/nvdimm/pmem.c
··· 47 47 return to_nd_region(to_dev(pmem)->parent); 48 48 } 49 49 50 - static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, 50 + static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, 51 51 unsigned int len) 52 52 { 53 53 struct device *dev = to_dev(pmem); ··· 62 62 __func__, (unsigned long long) sector, 63 63 cleared / 512, cleared / 512 > 1 ? "s" : ""); 64 64 badblocks_clear(&pmem->bb, sector, cleared / 512); 65 + } else { 66 + return -EIO; 65 67 } 68 + 66 69 invalidate_pmem(pmem->virt_addr + offset, len); 70 + return 0; 67 71 } 68 72 69 73 static void write_pmem(void *pmem_addr, struct page *page, ··· 127 123 flush_dcache_page(page); 128 124 write_pmem(pmem_addr, page, off, len); 129 125 if (unlikely(bad_pmem)) { 130 - pmem_clear_poison(pmem, pmem_off, len); 126 + rc = pmem_clear_poison(pmem, pmem_off, len); 131 127 write_pmem(pmem_addr, page, off, len); 132 128 } 133 129 }
+2
drivers/pci/msi.c
··· 610 610 * msi_capability_init - configure device's MSI capability structure 611 611 * @dev: pointer to the pci_dev data structure of MSI device function 612 612 * @nvec: number of interrupts to allocate 613 + * @affinity: flag to indicate cpu irq affinity mask should be set 613 614 * 614 615 * Setup the MSI capability structure of the device with the requested 615 616 * number of interrupts. A return value of zero indicates the successful ··· 753 752 * @dev: pointer to the pci_dev data structure of MSI-X device function 754 753 * @entries: pointer to an array of struct msix_entry entries 755 754 * @nvec: number of @entries 755 + * @affinity: flag to indicate cpu irq affinity mask should be set 756 756 * 757 757 * Setup the MSI-X capability structure of device function with a 758 758 * single MSI-X irq. A return of zero indicates the successful setup of
+8 -8
drivers/reset/reset-uniphier.c
··· 154 154 UNIPHIER_RESET_END, 155 155 }; 156 156 157 - const struct uniphier_reset_data uniphier_pro5_mio_reset_data[] = { 157 + const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = { 158 158 UNIPHIER_MIO_RESET_SD(0, 0), 159 159 UNIPHIER_MIO_RESET_SD(1, 1), 160 160 UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1), ··· 360 360 .compatible = "socionext,uniphier-ld20-reset", 361 361 .data = uniphier_ld20_sys_reset_data, 362 362 }, 363 - /* Media I/O reset */ 363 + /* Media I/O reset, SD reset */ 364 364 { 365 365 .compatible = "socionext,uniphier-sld3-mio-reset", 366 366 .data = uniphier_sld3_mio_reset_data, ··· 378 378 .data = uniphier_sld3_mio_reset_data, 379 379 }, 380 380 { 381 - .compatible = "socionext,uniphier-pro5-mio-reset", 382 - .data = uniphier_pro5_mio_reset_data, 381 + .compatible = "socionext,uniphier-pro5-sd-reset", 382 + .data = uniphier_pro5_sd_reset_data, 383 383 }, 384 384 { 385 - .compatible = "socionext,uniphier-pxs2-mio-reset", 386 - .data = uniphier_pro5_mio_reset_data, 385 + .compatible = "socionext,uniphier-pxs2-sd-reset", 386 + .data = uniphier_pro5_sd_reset_data, 387 387 }, 388 388 { 389 389 .compatible = "socionext,uniphier-ld11-mio-reset", 390 390 .data = uniphier_sld3_mio_reset_data, 391 391 }, 392 392 { 393 - .compatible = "socionext,uniphier-ld20-mio-reset", 394 - .data = uniphier_pro5_mio_reset_data, 393 + .compatible = "socionext,uniphier-ld20-sd-reset", 394 + .data = uniphier_pro5_sd_reset_data, 395 395 }, 396 396 /* Peripheral reset */ 397 397 {
+2 -2
drivers/s390/block/dasd_eckd.c
··· 1205 1205 mdc, lpm); 1206 1206 return mdc; 1207 1207 } 1208 - fcx_max_data = mdc * FCX_MAX_DATA_FACTOR; 1208 + fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR; 1209 1209 if (fcx_max_data < private->fcx_max_data) { 1210 1210 dev_warn(&device->cdev->dev, 1211 1211 "The maximum data size for zHPF requests %u " ··· 1675 1675 " data size for zHPF requests failed\n"); 1676 1676 return 0; 1677 1677 } else 1678 - return mdc * FCX_MAX_DATA_FACTOR; 1678 + return (u32)mdc * FCX_MAX_DATA_FACTOR; 1679 1679 } 1680 1680 1681 1681 /*
+4 -2
drivers/s390/cio/chp.c
··· 780 780 static int __init chp_init(void) 781 781 { 782 782 struct chp_id chpid; 783 - int ret; 783 + int state, ret; 784 784 785 785 ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw); 786 786 if (ret) ··· 791 791 return 0; 792 792 /* Register available channel-paths. */ 793 793 chp_id_for_each(&chpid) { 794 - if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED) 794 + state = chp_info_get_status(chpid); 795 + if (state == CHP_STATUS_CONFIGURED || 796 + state == CHP_STATUS_STANDBY) 795 797 chp_new(chpid); 796 798 } 797 799
+3 -3
drivers/scsi/NCR5380.c
··· 353 353 #endif 354 354 355 355 356 - static int probe_irq __initdata; 356 + static int probe_irq; 357 357 358 358 /** 359 359 * probe_intr - helper for IRQ autoprobe ··· 365 365 * used by the IRQ probe code. 366 366 */ 367 367 368 - static irqreturn_t __init probe_intr(int irq, void *dev_id) 368 + static irqreturn_t probe_intr(int irq, void *dev_id) 369 369 { 370 370 probe_irq = irq; 371 371 return IRQ_HANDLED; ··· 380 380 * and then looking to see what interrupt actually turned up. 381 381 */ 382 382 383 - static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, 383 + static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, 384 384 int possible) 385 385 { 386 386 struct NCR5380_hostdata *hostdata = shost_priv(instance);
+23 -14
drivers/scsi/be2iscsi/be_main.c
··· 900 900 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 901 901 { 902 902 struct sgl_handle *psgl_handle; 903 + unsigned long flags; 903 904 904 - spin_lock_bh(&phba->io_sgl_lock); 905 + spin_lock_irqsave(&phba->io_sgl_lock, flags); 905 906 if (phba->io_sgl_hndl_avbl) { 906 907 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 907 908 "BM_%d : In alloc_io_sgl_handle," ··· 920 919 phba->io_sgl_alloc_index++; 921 920 } else 922 921 psgl_handle = NULL; 923 - spin_unlock_bh(&phba->io_sgl_lock); 922 + spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 924 923 return psgl_handle; 925 924 } 926 925 927 926 static void 928 927 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 929 928 { 930 - spin_lock_bh(&phba->io_sgl_lock); 929 + unsigned long flags; 930 + 931 + spin_lock_irqsave(&phba->io_sgl_lock, flags); 931 932 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 932 933 "BM_%d : In free_,io_sgl_free_index=%d\n", 933 934 phba->io_sgl_free_index); ··· 944 941 "value there=%p\n", phba->io_sgl_free_index, 945 942 phba->io_sgl_hndl_base 946 943 [phba->io_sgl_free_index]); 947 - spin_unlock_bh(&phba->io_sgl_lock); 944 + spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 948 945 return; 949 946 } 950 947 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; ··· 953 950 phba->io_sgl_free_index = 0; 954 951 else 955 952 phba->io_sgl_free_index++; 956 - spin_unlock_bh(&phba->io_sgl_lock); 953 + spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 957 954 } 958 955 959 956 static inline struct wrb_handle * ··· 961 958 unsigned int wrbs_per_cxn) 962 959 { 963 960 struct wrb_handle *pwrb_handle; 961 + unsigned long flags; 964 962 965 - spin_lock_bh(&pwrb_context->wrb_lock); 963 + spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 966 964 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; 967 965 pwrb_context->wrb_handles_available--; 968 966 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) 969 967 pwrb_context->alloc_index = 0; 970 968 else 971 969 pwrb_context->alloc_index++; 972 - spin_unlock_bh(&pwrb_context->wrb_lock); 970 + spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 973 971 974 972 if (pwrb_handle) 975 973 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); ··· 1005 1001 struct wrb_handle *pwrb_handle, 1006 1002 unsigned int wrbs_per_cxn) 1007 1003 { 1008 - spin_lock_bh(&pwrb_context->wrb_lock); 1004 + unsigned long flags; 1005 + 1006 + spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 1009 1007 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1010 1008 pwrb_context->wrb_handles_available++; 1011 1009 if (pwrb_context->free_index == (wrbs_per_cxn - 1)) 1012 1010 pwrb_context->free_index = 0; 1013 1011 else 1014 1012 pwrb_context->free_index++; 1015 - spin_unlock_bh(&pwrb_context->wrb_lock); 1013 + spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 1016 1014 } 1017 1015 1018 1016 /** ··· 1043 1037 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1044 1038 { 1045 1039 struct sgl_handle *psgl_handle; 1040 + unsigned long flags; 1046 1041 1047 - spin_lock_bh(&phba->mgmt_sgl_lock); 1042 + spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1048 1043 if (phba->eh_sgl_hndl_avbl) { 1049 1044 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1050 1045 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; ··· 1063 1056 phba->eh_sgl_alloc_index++; 1064 1057 } else 1065 1058 psgl_handle = NULL; 1066 - spin_unlock_bh(&phba->mgmt_sgl_lock); 1059 + spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1067 1060 return psgl_handle; 1068 1061 } 1069 1062 1070 1063 void 1071 1064 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1072 1065 { 1073 - spin_lock_bh(&phba->mgmt_sgl_lock); 1066 + unsigned long flags; 1067 + 1068 + spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1074 1069 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1075 1070 "BM_%d : In free_mgmt_sgl_handle," 1076 1071 "eh_sgl_free_index=%d\n", ··· 1087 1078 "BM_%d : Double Free in eh SGL ," 1088 1079 "eh_sgl_free_index=%d\n", 1089 1080 phba->eh_sgl_free_index); 1090 - spin_unlock_bh(&phba->mgmt_sgl_lock); 1081 + spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1091 1082 return; 1092 1083 } 1093 1084 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; ··· 1097 1088 phba->eh_sgl_free_index = 0; 1098 1089 else 1099 1090 phba->eh_sgl_free_index++; 1100 - spin_unlock_bh(&phba->mgmt_sgl_lock); 1091 + spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1101 1092 } 1102 1093 1103 1094 static void
+2 -2
drivers/scsi/libiscsi.c
··· 791 791 792 792 free_task: 793 793 /* regular RX path uses back_lock */ 794 - spin_lock_bh(&session->back_lock); 794 + spin_lock(&session->back_lock); 795 795 __iscsi_put_task(task); 796 - spin_unlock_bh(&session->back_lock); 796 + spin_unlock(&session->back_lock); 797 797 return NULL; 798 798 } 799 799
+4 -2
drivers/staging/android/ion/ion.c
··· 1187 1187 hdata.type = heap->type; 1188 1188 hdata.heap_id = heap->id; 1189 1189 1190 - ret = copy_to_user(&buffer[cnt], 1191 - &hdata, sizeof(hdata)); 1190 + if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) { 1191 + ret = -EFAULT; 1192 + goto out; 1193 + } 1192 1194 1193 1195 cnt++; 1194 1196 if (cnt >= max_cnt)
+1 -1
drivers/staging/android/ion/ion_of.c
··· 107 107 108 108 heap_pdev = of_platform_device_create(node, heaps[i].name, 109 109 &pdev->dev); 110 - if (!pdev) 110 + if (!heap_pdev) 111 111 return ERR_PTR(-ENOMEM); 112 112 heap_pdev->dev.platform_data = &heaps[i]; 113 113
+1
drivers/staging/greybus/arche-platform.c
··· 128 128 pdev = of_find_device_by_node(np); 129 129 if (!pdev) { 130 130 pr_err("arche-platform device not found\n"); 131 + of_node_put(np); 131 132 return -ENODEV; 132 133 } 133 134
+2 -1
drivers/staging/greybus/es2.c
··· 1548 1548 INIT_LIST_HEAD(&es2->arpcs); 1549 1549 spin_lock_init(&es2->arpc_lock); 1550 1550 1551 - if (es2_arpc_in_enable(es2)) 1551 + retval = es2_arpc_in_enable(es2); 1552 + if (retval) 1552 1553 goto error; 1553 1554 1554 1555 retval = gb_hd_add(hd);
+2 -4
drivers/staging/greybus/gpio.c
··· 702 702 ret = gb_gpio_irqchip_add(gpio, irqc, 0, 703 703 handle_level_irq, IRQ_TYPE_NONE); 704 704 if (ret) { 705 - dev_err(&connection->bundle->dev, 706 - "failed to add irq chip: %d\n", ret); 705 + dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret); 707 706 goto exit_line_free; 708 707 } 709 708 710 709 ret = gpiochip_add(gpio); 711 710 if (ret) { 712 - dev_err(&connection->bundle->dev, 713 - "failed to add gpio chip: %d\n", ret); 711 + dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret); 714 712 goto exit_gpio_irqchip_remove; 715 713 } 716 714
+1 -1
drivers/staging/greybus/module.c
··· 127 127 return module; 128 128 129 129 err_put_interfaces: 130 - for (--i; i > 0; --i) 130 + for (--i; i >= 0; --i) 131 131 gb_interface_put(module->interfaces[i]); 132 132 133 133 put_device(&module->dev);
+1 -1
drivers/staging/greybus/uart.c
··· 888 888 minor = alloc_minor(gb_tty); 889 889 if (minor < 0) { 890 890 if (minor == -ENOSPC) { 891 - dev_err(&connection->bundle->dev, 891 + dev_err(&gbphy_dev->dev, 892 892 "no more free minor numbers\n"); 893 893 retval = -ENODEV; 894 894 } else {
+2
drivers/staging/iio/accel/sca3000_core.c
··· 468 468 case SCA3000_MEAS_MODE_OP_2: 469 469 *base_freq = info->option_mode_2_freq; 470 470 break; 471 + default: 472 + ret = -EINVAL; 471 473 } 472 474 error_ret: 473 475 return ret;
+17 -17
drivers/staging/lustre/lustre/llite/lproc_llite.c
··· 871 871 } 872 872 LUSTRE_RW_ATTR(xattr_cache); 873 873 874 - static ssize_t unstable_stats_show(struct kobject *kobj, 875 - struct attribute *attr, 876 - char *buf) 874 + static int ll_unstable_stats_seq_show(struct seq_file *m, void *v) 877 875 { 878 - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, 879 - ll_kobj); 876 + struct super_block *sb = m->private; 877 + struct ll_sb_info *sbi = ll_s2sbi(sb); 880 878 struct cl_client_cache *cache = sbi->ll_cache; 881 879 long pages; 882 880 int mb; ··· 882 884 pages = atomic_long_read(&cache->ccc_unstable_nr); 883 885 mb = (pages * PAGE_SIZE) >> 20; 884 886 885 - return sprintf(buf, "unstable_check: %8d\n" 886 - "unstable_pages: %12ld\n" 887 - "unstable_mb: %8d\n", 888 - cache->ccc_unstable_check, pages, mb); 887 + seq_printf(m, 888 + "unstable_check: %8d\n" 889 + "unstable_pages: %12ld\n" 890 + "unstable_mb: %8d\n", 891 + cache->ccc_unstable_check, pages, mb); 892 + 893 + return 0; 889 894 } 890 895 891 - static ssize_t unstable_stats_store(struct kobject *kobj, 892 - struct attribute *attr, 893 - const char *buffer, 894 - size_t count) 896 + static ssize_t ll_unstable_stats_seq_write(struct file *file, 897 + const char __user *buffer, 898 + size_t count, loff_t *off) 895 899 { 896 - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, 897 - ll_kobj); 900 + struct super_block *sb = ((struct seq_file *)file->private_data)->private; 901 + struct ll_sb_info *sbi = ll_s2sbi(sb); 898 902 char kernbuf[128]; 899 903 int val, rc; 900 904 ··· 922 922 923 923 return count; 924 924 } 925 - LUSTRE_RW_ATTR(unstable_stats); 925 + LPROC_SEQ_FOPS(ll_unstable_stats); 926 926 927 927 static ssize_t root_squash_show(struct kobject *kobj, struct attribute *attr, 928 928 char *buf) ··· 995 995 /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */ 996 996 { "max_cached_mb", &ll_max_cached_mb_fops, NULL }, 997 997 { "statahead_stats", &ll_statahead_stats_fops, NULL, 0 }, 998 + { "unstable_stats", &ll_unstable_stats_fops, NULL }, 998 999 { "sbi_flags", &ll_sbi_flags_fops, NULL, 0 }, 999 1000 { .name = "nosquash_nids", 1000 1001 .fops = &ll_nosquash_nids_fops }, ··· 1027 1026 &lustre_attr_max_easize.attr, 1028 1027 &lustre_attr_default_easize.attr, 1029 1028 &lustre_attr_xattr_cache.attr, 1030 - &lustre_attr_unstable_stats.attr, 1031 1029 &lustre_attr_root_squash.attr, 1032 1030 NULL, 1033 1031 };
-1
drivers/staging/wilc1000/host_interface.c
··· 3388 3388 3389 3389 clients_count++; 3390 3390 3391 - destroy_workqueue(hif_workqueue); 3392 3391 _fail_: 3393 3392 return result; 3394 3393 }
+60
drivers/thermal/intel_pch_thermal.c
··· 20 20 #include <linux/types.h> 21 21 #include <linux/init.h> 22 22 #include <linux/pci.h> 23 + #include <linux/acpi.h> 23 24 #include <linux/thermal.h> 24 25 #include <linux/pm.h> 25 26 26 27 /* Intel PCH thermal Device IDs */ 28 + #define PCH_THERMAL_DID_HSW_1 0x9C24 /* Haswell PCH */ 29 + #define PCH_THERMAL_DID_HSW_2 0x8C24 /* Haswell PCH */ 27 30 #define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */ 28 31 #define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */ 29 32 ··· 69 66 unsigned long crt_temp; 70 67 int hot_trip_id; 71 68 unsigned long hot_temp; 69 + int psv_trip_id; 70 + unsigned long psv_temp; 72 71 bool bios_enabled; 73 72 }; 73 + 74 + #ifdef CONFIG_ACPI 75 + 76 + /* 77 + * On some platforms, there is a companion ACPI device, which adds 78 + * passive trip temperature using _PSV method. There is no specific 79 + * passive temperature setting in MMIO interface of this PCI device. 80 + */ 81 + static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd, 82 + int *nr_trips) 83 + { 84 + struct acpi_device *adev; 85 + 86 + ptd->psv_trip_id = -1; 87 + 88 + adev = ACPI_COMPANION(&ptd->pdev->dev); 89 + if (adev) { 90 + unsigned long long r; 91 + acpi_status status; 92 + 93 + status = acpi_evaluate_integer(adev->handle, "_PSV", NULL, 94 + &r); 95 + if (ACPI_SUCCESS(status)) { 96 + unsigned long trip_temp; 97 + 98 + trip_temp = DECI_KELVIN_TO_MILLICELSIUS(r); 99 + if (trip_temp) { 100 + ptd->psv_temp = trip_temp; 101 + ptd->psv_trip_id = *nr_trips; 102 + ++(*nr_trips); 103 + } 104 + } 105 + } 106 + } 107 + #else 108 + static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd, 109 + int *nr_trips) 110 + { 111 + ptd->psv_trip_id = -1; 112 + 113 + } 114 + #endif 74 115 75 116 static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) 76 117 { ··· 165 118 ptd->hot_trip_id = *nr_trips; 166 119 ++(*nr_trips); 167 120 } 121 + 122 + pch_wpt_add_acpi_psv_trip(ptd, nr_trips); 168 123 169 124 return 0; 170 125 } ··· 243 194 *type = THERMAL_TRIP_CRITICAL; 244 195 else if (ptd->hot_trip_id == trip) 245 196 *type = THERMAL_TRIP_HOT; 197 + else if (ptd->psv_trip_id == trip) 198 + *type = THERMAL_TRIP_PASSIVE; 246 199 else 247 200 return -EINVAL; 248 201 ··· 259 208 *temp = ptd->crt_temp; 260 209 else if (ptd->hot_trip_id == trip) 261 210 *temp = ptd->hot_temp; 211 + else if (ptd->psv_trip_id == trip) 212 + *temp = ptd->psv_temp; 262 213 else 263 214 return -EINVAL; 264 215 ··· 294 241 case PCH_THERMAL_DID_SKL: 295 242 ptd->ops = &pch_dev_ops_wpt; 296 243 dev_name = "pch_skylake"; 244 + break; 245 + case PCH_THERMAL_DID_HSW_1: 246 + case PCH_THERMAL_DID_HSW_2: 247 + ptd->ops = &pch_dev_ops_wpt; 248 + dev_name = "pch_haswell"; 297 249 break; 298 250 default: 299 251 dev_err(&pdev->dev, "unknown pch thermal device\n"); ··· 382 324 static struct pci_device_id intel_pch_thermal_id[] = { 383 325 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) }, 384 326 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) }, 327 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1) }, 328 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2) }, 385 329 { 0, }, 386 330 }; 387 331 MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
+2 -12
drivers/thermal/intel_powerclamp.c
··· 669 669 .set_cur_state = powerclamp_set_cur_state, 670 670 }; 671 671 672 - static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = { 673 - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT }, 674 - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT }, 675 - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC }, 676 - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC}, 677 - {} 678 - }; 679 - MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); 680 - 681 672 static int __init powerclamp_probe(void) 682 673 { 683 - if (!x86_match_cpu(intel_powerclamp_ids)) { 684 - pr_err("Intel powerclamp does not run on family %d model %d\n", 685 - boot_cpu_data.x86, boot_cpu_data.x86_model); 674 + if (!boot_cpu_has(X86_FEATURE_MWAIT)) { 675 + pr_err("CPU does not support MWAIT"); 686 676 return -ENODEV; 687 677 } 688 678
+1 -1
drivers/tty/serial/8250/8250_lpss.c
··· 216 216 struct pci_dev *pdev = to_pci_dev(port->dev); 217 217 int ret; 218 218 219 - ret = pci_alloc_irq_vectors(pdev, 1, 1, 0); 219 + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 220 220 if (ret < 0) 221 221 return ret; 222 222
+2 -1
drivers/tty/serial/8250/8250_port.c
··· 83 83 .name = "16550A", 84 84 .fifo_size = 16, 85 85 .tx_loadsz = 16, 86 - .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, 86 + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 | 87 + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT, 87 88 .rxtrig_bytes = {1, 4, 8, 14}, 88 89 .flags = UART_CAP_FIFO, 89 90 },
+2 -2
drivers/tty/serial/8250/8250_uniphier.c
··· 99 99 case UART_LCR: 100 100 valshift = UNIPHIER_UART_LCR_SHIFT; 101 101 /* Divisor latch access bit does not exist. */ 102 - value &= ~(UART_LCR_DLAB << valshift); 102 + value &= ~UART_LCR_DLAB; 103 103 /* fall through */ 104 104 case UART_MCR: 105 105 offset = UNIPHIER_UART_LCR_MCR; ··· 199 199 200 200 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 201 201 if (!regs) { 202 - dev_err(dev, "failed to get memory resource"); 202 + dev_err(dev, "failed to get memory resource\n"); 203 203 return -EINVAL; 204 204 } 205 205
+1
drivers/tty/serial/Kconfig
··· 1638 1638 config SERIAL_STM32 1639 1639 tristate "STMicroelectronics STM32 serial port support" 1640 1640 select SERIAL_CORE 1641 + depends on HAS_DMA 1641 1642 depends on ARM || COMPILE_TEST 1642 1643 help 1643 1644 This driver is for the on-chip Serial Controller on
+22 -4
drivers/tty/serial/atmel_serial.c
··· 2132 2132 mode |= ATMEL_US_USMODE_RS485; 2133 2133 } else if (termios->c_cflag & CRTSCTS) { 2134 2134 /* RS232 with hardware handshake (RTS/CTS) */ 2135 - if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) { 2136 - dev_info(port->dev, "not enabling hardware flow control because DMA is used"); 2137 - termios->c_cflag &= ~CRTSCTS; 2138 - } else { 2135 + if (atmel_use_fifo(port) && 2136 + !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) { 2137 + /* 2138 + * with ATMEL_US_USMODE_HWHS set, the controller will 2139 + * be able to drive the RTS pin high/low when the RX 2140 + * FIFO is above RXFTHRES/below RXFTHRES2. 2141 + * It will also disable the transmitter when the CTS 2142 + * pin is high. 2143 + * This mode is not activated if CTS pin is a GPIO 2144 + * because in this case, the transmitter is always 2145 + * disabled (there must be an internal pull-up 2146 + * responsible for this behaviour). 2147 + * If the RTS pin is a GPIO, the controller won't be 2148 + * able to drive it according to the FIFO thresholds, 2149 + * but it will be handled by the driver. 2150 + */ 2139 2151 mode |= ATMEL_US_USMODE_HWHS; 2152 + } else { 2153 + /* 2154 + * For platforms without FIFO, the flow control is 2155 + * handled by the driver. 2156 + */ 2157 + mode |= ATMEL_US_USMODE_NORMAL; 2140 2158 } 2141 2159 } else { 2142 2160 /* RS232 without hadware handshake */
+1 -2
drivers/tty/serial/fsl_lpuart.c
··· 328 328 329 329 sport->dma_tx_bytes = uart_circ_chars_pending(xmit); 330 330 331 - if (xmit->tail < xmit->head) { 331 + if (xmit->tail < xmit->head || xmit->head == 0) { 332 332 sport->dma_tx_nents = 1; 333 333 sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes); 334 334 } else { ··· 359 359 sport->dma_tx_in_progress = true; 360 360 sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc); 361 361 dma_async_issue_pending(sport->dma_tx_chan); 362 - 363 362 } 364 363 365 364 static void lpuart_dma_tx_complete(void *arg)
+1
drivers/tty/serial/pch_uart.c
··· 419 419 }, 420 420 (void *)MINNOW_UARTCLK, 421 421 }, 422 + { } 422 423 }; 423 424 424 425 /* Return UART clock, checking for board specific clocks. */
+6 -2
drivers/tty/serial/sc16is7xx.c
··· 1130 1130 { 1131 1131 struct sc16is7xx_port *s = gpiochip_get_data(chip); 1132 1132 struct uart_port *port = &s->p[0].port; 1133 + u8 state = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG); 1133 1134 1134 - sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset), 1135 - val ? BIT(offset) : 0); 1135 + if (val) 1136 + state |= BIT(offset); 1137 + else 1138 + state &= ~BIT(offset); 1139 + sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state); 1136 1140 sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset), 1137 1141 BIT(offset)); 1138 1142
+4 -4
drivers/tty/serial/serial_core.c
··· 111 111 * closed. No cookie for you. 112 112 */ 113 113 BUG_ON(!state); 114 - tty_wakeup(state->port.tty); 114 + tty_port_tty_wakeup(&state->port); 115 115 } 116 116 117 117 static void uart_stop(struct tty_struct *tty) ··· 632 632 if (port->ops->flush_buffer) 633 633 port->ops->flush_buffer(port); 634 634 uart_port_unlock(port, flags); 635 - tty_wakeup(tty); 635 + tty_port_tty_wakeup(&state->port); 636 636 } 637 637 638 638 /* ··· 2746 2746 uport->cons = drv->cons; 2747 2747 uport->minor = drv->tty_driver->minor_start + uport->line; 2748 2748 2749 - port->console = uart_console(uport); 2750 - 2751 2749 /* 2752 2750 * If this port is a console, then the spinlock is already 2753 2751 * initialised. ··· 2758 2760 of_console_check(uport->dev->of_node, uport->cons->name, uport->line); 2759 2761 2760 2762 uart_configure_port(drv, state, uport); 2763 + 2764 + port->console = uart_console(uport); 2761 2765 2762 2766 num_groups = 2; 2763 2767 if (uport->attr_group)
+1 -1
drivers/tty/serial/stm32-usart.h
··· 31 31 struct stm32_usart_config cfg; 32 32 }; 33 33 34 - #define UNDEF_REG ~0 34 + #define UNDEF_REG 0xff 35 35 36 36 /* Register offsets */ 37 37 struct stm32_usart_info stm32f4_info = {
+2
drivers/tty/serial/xilinx_uartps.c
··· 1200 1200 OF_EARLYCON_DECLARE(cdns, "xlnx,xuartps", cdns_early_console_setup); 1201 1201 OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p8", cdns_early_console_setup); 1202 1202 OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p12", cdns_early_console_setup); 1203 + OF_EARLYCON_DECLARE(cdns, "xlnx,zynqmp-uart", cdns_early_console_setup); 1203 1204 1204 1205 /** 1205 1206 * cdns_uart_console_write - perform write operation ··· 1439 1438 { .compatible = "xlnx,xuartps", }, 1440 1439 { .compatible = "cdns,uart-r1p8", }, 1441 1440 { .compatible = "cdns,uart-r1p12", .data = &zynqmp_uart_def }, 1441 + { .compatible = "xlnx,zynqmp-uart", .data = &zynqmp_uart_def }, 1442 1442 {} 1443 1443 }; 1444 1444 MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
+6 -1
drivers/tty/vt/vt.c
··· 859 859 if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) 860 860 return 0; 861 861 862 + if (new_screen_size > (4 << 20)) 863 + return -EINVAL; 862 864 newscreen = kmalloc(new_screen_size, GFP_USER); 863 865 if (!newscreen) 864 866 return -ENOMEM; 867 + 868 + if (vc == sel_cons) 869 + clear_selection(); 865 870 866 871 old_rows = vc->vc_rows; 867 872 old_row_size = vc->vc_size_row; ··· 1170 1165 break; 1171 1166 case 3: /* erase scroll-back buffer (and whole display) */ 1172 1167 scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, 1173 - vc->vc_screenbuf_size >> 1); 1168 + vc->vc_screenbuf_size); 1174 1169 set_origin(vc); 1175 1170 if (con_is_visible(vc)) 1176 1171 update_screen(vc);
+2
drivers/usb/chipidea/host.c
··· 188 188 189 189 if (hcd) { 190 190 usb_remove_hcd(hcd); 191 + ci->role = CI_ROLE_END; 192 + synchronize_irq(ci->irq); 191 193 usb_put_hcd(hcd); 192 194 if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) && 193 195 (ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON))
+10 -1
drivers/usb/dwc2/core.c
··· 463 463 */ 464 464 void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg) 465 465 { 466 + bool ret; 467 + 466 468 switch (hsotg->dr_mode) { 467 469 case USB_DR_MODE_HOST: 468 - dwc2_force_mode(hsotg, true); 470 + ret = dwc2_force_mode(hsotg, true); 471 + /* 472 + * NOTE: This is required for some rockchip soc based 473 + * platforms on their host-only dwc2. 474 + */ 475 + if (!ret) 476 + msleep(50); 477 + 469 478 break; 470 479 case USB_DR_MODE_PERIPHERAL: 471 480 dwc2_force_mode(hsotg, false);
+7
drivers/usb/dwc2/core.h
··· 259 259 DWC2_L3, /* Off state */ 260 260 }; 261 261 262 + /* 263 + * Gadget periodic tx fifo sizes as used by legacy driver 264 + * EP0 is not included 265 + */ 266 + #define DWC2_G_P_LEGACY_TX_FIFO_SIZE {256, 256, 256, 256, 768, 768, 768, \ 267 + 768, 0, 0, 0, 0, 0, 0, 0} 268 + 262 269 /* Gadget ep0 states */ 263 270 enum dwc2_ep0_state { 264 271 DWC2_EP0_SETUP,
+42 -11
drivers/usb/dwc2/gadget.c
··· 186 186 */ 187 187 static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) 188 188 { 189 - unsigned int fifo; 189 + unsigned int ep; 190 190 unsigned int addr; 191 191 int timeout; 192 - u32 dptxfsizn; 193 192 u32 val; 194 193 195 194 /* Reset fifo map if not correctly cleared during previous session */ ··· 216 217 * them to endpoints dynamically according to maxpacket size value of 217 218 * given endpoint. 218 219 */ 219 - for (fifo = 1; fifo < MAX_EPS_CHANNELS; fifo++) { 220 - dptxfsizn = dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)); 220 + for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) { 221 + if (!hsotg->g_tx_fifo_sz[ep]) 222 + continue; 223 + val = addr; 224 + val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT; 225 + WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem, 226 + "insufficient fifo memory"); 227 + addr += hsotg->g_tx_fifo_sz[ep]; 221 228 222 - val = (dptxfsizn & FIFOSIZE_DEPTH_MASK) | addr; 223 - addr += dptxfsizn >> FIFOSIZE_DEPTH_SHIFT; 224 - 225 - if (addr > hsotg->fifo_mem) 226 - break; 227 - 228 - dwc2_writel(val, hsotg->regs + DPTXFSIZN(fifo)); 229 + dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep)); 229 230 } 230 231 231 232 /* ··· 3806 3807 static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg) 3807 3808 { 3808 3809 struct device_node *np = hsotg->dev->of_node; 3810 + u32 len = 0; 3811 + u32 i = 0; 3809 3812 3810 3813 /* Enable dma if requested in device tree */ 3811 3814 hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma"); 3812 3815 3816 + /* 3817 + * Register TX periodic fifo size per endpoint. 3818 + * EP0 is excluded since it has no fifo configuration. 3819 + */ 3820 + if (!of_find_property(np, "g-tx-fifo-size", &len)) 3821 + goto rx_fifo; 3822 + 3823 + len /= sizeof(u32); 3824 + 3825 + /* Read tx fifo sizes other than ep0 */ 3826 + if (of_property_read_u32_array(np, "g-tx-fifo-size", 3827 + &hsotg->g_tx_fifo_sz[1], len)) 3828 + goto rx_fifo; 3829 + 3830 + /* Add ep0 */ 3831 + len++; 3832 + 3833 + /* Make remaining TX fifos unavailable */ 3834 + if (len < MAX_EPS_CHANNELS) { 3835 + for (i = len; i < MAX_EPS_CHANNELS; i++) 3836 + hsotg->g_tx_fifo_sz[i] = 0; 3837 + } 3838 + 3839 + rx_fifo: 3813 3840 /* Register RX fifo size */ 3814 3841 of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz); 3815 3842 ··· 3857 3832 struct device *dev = hsotg->dev; 3858 3833 int epnum; 3859 3834 int ret; 3835 + int i; 3836 + u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE; 3860 3837 3861 3838 /* Initialize to legacy fifo configuration values */ 3862 3839 hsotg->g_rx_fifo_sz = 2048; 3863 3840 hsotg->g_np_g_tx_fifo_sz = 1024; 3841 + memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo)); 3864 3842 /* Device tree specific probe */ 3865 3843 dwc2_hsotg_of_probe(hsotg); 3866 3844 ··· 3881 3853 dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n", 3882 3854 hsotg->g_np_g_tx_fifo_sz); 3883 3855 dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz); 3856 + for (i = 0; i < MAX_EPS_CHANNELS; i++) 3857 + dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i, 3858 + hsotg->g_tx_fifo_sz[i]); 3884 3859 3885 3860 hsotg->gadget.max_speed = USB_SPEED_HIGH; 3886 3861 hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
+18 -8
drivers/usb/dwc3/gadget.c
··· 783 783 req->trb = trb; 784 784 req->trb_dma = dwc3_trb_dma_offset(dep, trb); 785 785 req->first_trb_index = dep->trb_enqueue; 786 + dep->queued_requests++; 786 787 } 787 788 788 789 dwc3_ep_inc_enq(dep); ··· 833 832 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 834 833 835 834 trb->ctrl |= DWC3_TRB_CTRL_HWO; 836 - 837 - dep->queued_requests++; 838 835 839 836 trace_dwc3_prepare_trb(dep, trb); 840 837 } ··· 1073 1074 1074 1075 list_add_tail(&req->list, &dep->pending_list); 1075 1076 1076 - if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1077 - dep->flags & DWC3_EP_PENDING_REQUEST) { 1078 - if (list_empty(&dep->started_list)) { 1077 + /* 1078 + * NOTICE: Isochronous endpoints should NEVER be prestarted. We must 1079 + * wait for a XferNotReady event so we will know what's the current 1080 + * (micro-)frame number. 1081 + * 1082 + * Without this trick, we are very, very likely gonna get Bus Expiry 1083 + * errors which will force us issue EndTransfer command. 1084 + */ 1085 + if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { 1086 + if ((dep->flags & DWC3_EP_PENDING_REQUEST) && 1087 + list_empty(&dep->started_list)) { 1079 1088 dwc3_stop_active_transfer(dwc, dep->number, true); 1080 1089 dep->flags = DWC3_EP_ENABLED; 1081 1090 } ··· 1868 1861 unsigned int s_pkt = 0; 1869 1862 unsigned int trb_status; 1870 1863 1871 - dep->queued_requests--; 1872 1864 dwc3_ep_inc_deq(dep); 1865 + 1866 + if (req->trb == trb) 1867 + dep->queued_requests--; 1868 + 1873 1869 trace_dwc3_complete_trb(dep, trb); 1874 1870 1875 1871 /* ··· 2990 2980 kfree(dwc->setup_buf); 2991 2981 2992 2982 err2: 2993 - dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 2983 + dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, 2994 2984 dwc->ep0_trb, dwc->ep0_trb_addr); 2995 2985 2996 2986 err1: ··· 3015 3005 kfree(dwc->setup_buf); 3016 3006 kfree(dwc->zlp_buf); 3017 3007 3018 - dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), 3008 + dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, 3019 3009 dwc->ep0_trb, dwc->ep0_trb_addr); 3020 3010 3021 3011 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+92 -15
drivers/usb/gadget/function/f_fs.c
··· 136 136 /* 137 137 * Buffer for holding data from partial reads which may happen since 138 138 * we’re rounding user read requests to a multiple of a max packet size. 139 + * 140 + * The pointer is initialised with NULL value and may be set by 141 + * __ffs_epfile_read_data function to point to a temporary buffer. 142 + * 143 + * In normal operation, calls to __ffs_epfile_read_buffered will consume 144 + * data from said buffer and eventually free it. Importantly, while the 145 + * function is using the buffer, it sets the pointer to NULL. This is 146 + * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered 147 + * can never run concurrently (they are synchronised by epfile->mutex) 148 + * so the latter will not assign a new value to the pointer. 149 + * 150 + * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is 151 + * valid) and sets the pointer to READ_BUFFER_DROP value. This special 152 + * value is crux of the synchronisation between ffs_func_eps_disable and 153 + * __ffs_epfile_read_data. 154 + * 155 + * Once __ffs_epfile_read_data is about to finish it will try to set the 156 + * pointer back to its old value (as described above), but seeing as the 157 + * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free 158 + * the buffer. 159 + * 160 + * == State transitions == 161 + * 162 + * • ptr == NULL: (initial state) 163 + * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP 164 + * ◦ __ffs_epfile_read_buffered: nop 165 + * ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf 166 + * ◦ reading finishes: n/a, not in ‘and reading’ state 167 + * • ptr == DROP: 168 + * ◦ __ffs_epfile_read_buffer_free: nop 169 + * ◦ __ffs_epfile_read_buffered: go to ptr == NULL 170 + * ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop 171 + * ◦ reading finishes: n/a, not in ‘and reading’ state 172 + * • ptr == buf: 173 + * ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP 174 + * ◦ __ffs_epfile_read_buffered: go to ptr == NULL and reading 175 + * ◦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered 176 + * is always called first 177 + * ◦ reading finishes: n/a, not in ‘and reading’ state 178 + * • ptr == NULL and reading: 179 + * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading 180 + * ◦ __ffs_epfile_read_buffered: n/a, mutex is held 181 + * ◦ __ffs_epfile_read_data: n/a, mutex is held 182 + * ◦ reading finishes and … 183 + * … all data read: free buf, go to ptr == NULL 184 + * … otherwise: go to ptr == buf and reading 185 + * • ptr == DROP and reading: 186 + * ◦ __ffs_epfile_read_buffer_free: nop 187 + * ◦ __ffs_epfile_read_buffered: n/a, mutex is held 188 + * ◦ __ffs_epfile_read_data: n/a, mutex is held 189 + * ◦ reading finishes: free buf, go to ptr == DROP 139 190 */ 140 - struct ffs_buffer *read_buffer; /* P: epfile->mutex */ 191 + struct ffs_buffer *read_buffer; 192 + #define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN)) 141 193 142 194 char name[5]; 143 195 ··· 788 736 schedule_work(&io_data->work); 789 737 } 790 738 739 + static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile) 740 + { 741 + /* 742 + * See comment in struct ffs_epfile for full read_buffer pointer 743 + * synchronisation story. 744 + */ 745 + struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP); 746 + if (buf && buf != READ_BUFFER_DROP) 747 + kfree(buf); 748 + } 749 + 791 750 /* Assumes epfile->mutex is held. */ 792 751 static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile, 793 752 struct iov_iter *iter) 794 753 { 795 - struct ffs_buffer *buf = epfile->read_buffer; 754 + /* 755 + * Null out epfile->read_buffer so ffs_func_eps_disable does not free 756 + * the buffer while we are using it. See comment in struct ffs_epfile 757 + * for full read_buffer pointer synchronisation story. 758 + */ 759 + struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL); 796 760 ssize_t ret; 797 - if (!buf) 761 + if (!buf || buf == READ_BUFFER_DROP) 798 762 return 0; 799 763 800 764 ret = copy_to_iter(buf->data, buf->length, iter); 801 765 if (buf->length == ret) { 802 766 kfree(buf); 803 - epfile->read_buffer = NULL; 804 - } else if (unlikely(iov_iter_count(iter))) { 767 + return ret; 768 + } 769 + 770 + if (unlikely(iov_iter_count(iter))) { 805 771 ret = -EFAULT; 806 772 } else { 807 773 buf->length -= ret; 808 774 buf->data += ret; 809 775 } 776 + 777 + if (cmpxchg(&epfile->read_buffer, NULL, buf)) 778 + kfree(buf); 779 + 810 780 return ret; 811 781 } 812 782 ··· 857 783 buf->length = data_len; 858 784 buf->data = buf->storage; 859 785 memcpy(buf->storage, data + ret, data_len); 860 - epfile->read_buffer = buf; 786 + 787 + /* 788 + * At this point read_buffer is NULL or READ_BUFFER_DROP (if 789 + * ffs_func_eps_disable has been called in the meanwhile). See comment 790 + * in struct ffs_epfile for full read_buffer pointer synchronisation 791 + * story. 792 + */ 793 + if (unlikely(cmpxchg(&epfile->read_buffer, NULL, buf))) 794 + kfree(buf); 861 795 862 796 return ret; 863 797 } ··· 1179 1097 1180 1098 ENTER(); 1181 1099 1182 - kfree(epfile->read_buffer); 1183 - epfile->read_buffer = NULL; 1100 + __ffs_epfile_read_buffer_free(epfile); 1184 1101 ffs_data_closed(epfile->ffs); 1185 1102 1186 1103 return 0; ··· 1805 1724 unsigned count = func->ffs->eps_count; 1806 1725 unsigned long flags; 1807 1726 1727 + spin_lock_irqsave(&func->ffs->eps_lock, flags); 1808 1728 do { 1809 - if (epfile) 1810 - mutex_lock(&epfile->mutex); 1811 - spin_lock_irqsave(&func->ffs->eps_lock, flags); 1812 1729 /* pending requests get nuked */ 1813 1730 if (likely(ep->ep)) 1814 1731 usb_ep_disable(ep->ep); 1815 1732 ++ep; 1816 - spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 1817 1733 1818 1734 if (epfile) { 1819 1735 epfile->ep = NULL; 1820 - kfree(epfile->read_buffer); 1821 - epfile->read_buffer = NULL; 1822 - mutex_unlock(&epfile->mutex); 1736 + __ffs_epfile_read_buffer_free(epfile); 1823 1737 ++epfile; 1824 1738 } 1825 1739 } while (--count); 1740 + spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 1826 1741 } 1827 1742 1828 1743 static int ffs_func_eps_enable(struct ffs_function *func)
+3 -2
drivers/usb/gadget/function/u_ether.c
··· 590 590 591 591 /* throttle high/super speed IRQ rate back slightly */ 592 592 if (gadget_is_dualspeed(dev->gadget)) 593 - req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH || 594 - dev->gadget->speed == USB_SPEED_SUPER) 593 + req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH || 594 + dev->gadget->speed == USB_SPEED_SUPER)) && 595 + !list_empty(&dev->tx_reqs)) 595 596 ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) 596 597 : 0; 597 598
+1 -1
drivers/usb/gadget/udc/atmel_usba_udc.c
··· 1978 1978 dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret); 1979 1979 goto err; 1980 1980 } 1981 - ep->ep.name = name; 1981 + ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index); 1982 1982 1983 1983 ep->ep_regs = udc->regs + USBA_EPT_BASE(i); 1984 1984 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
+1 -1
drivers/usb/host/ehci-platform.c
··· 39 39 40 40 #define DRIVER_DESC "EHCI generic platform driver" 41 41 #define EHCI_MAX_CLKS 4 42 - #define EHCI_MAX_RSTS 3 42 + #define EHCI_MAX_RSTS 4 43 43 #define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv) 44 44 45 45 struct ehci_platform_priv {
+6 -3
drivers/usb/host/ohci-at91.c
··· 221 221 ohci->num_ports = board->ports; 222 222 at91_start_hc(pdev); 223 223 224 + /* 225 + * The RemoteWakeupConnected bit has to be set explicitly 226 + * before calling ohci_run. The reset value of this bit is 0. 227 + */ 228 + ohci->hc_control = OHCI_CTRL_RWC; 229 + 224 230 retval = usb_add_hcd(hcd, irq, IRQF_SHARED); 225 231 if (retval == 0) { 226 232 device_wakeup_enable(hcd->self.controller); ··· 683 677 * REVISIT: some boards will be able to turn VBUS off... 684 678 */ 685 679 if (!ohci_at91->wakeup) { 686 - ohci->hc_control = ohci_readl(ohci, &ohci->regs->control); 687 - ohci->hc_control &= OHCI_CTRL_RWC; 688 - ohci_writel(ohci, ohci->hc_control, &ohci->regs->control); 689 680 ohci->rh_state = OHCI_RH_HALTED; 690 681 691 682 /* flush the writes */
+1 -1
drivers/usb/host/ohci-hcd.c
··· 72 72 static const char hcd_name [] = "ohci_hcd"; 73 73 74 74 #define STATECHANGE_DELAY msecs_to_jiffies(300) 75 - #define IO_WATCHDOG_DELAY msecs_to_jiffies(250) 75 + #define IO_WATCHDOG_DELAY msecs_to_jiffies(275) 76 76 77 77 #include "ohci.h" 78 78 #include "pci-quirks.h"
+39 -2
drivers/usb/host/xhci-hub.c
··· 1166 1166 xhci_set_link_state(xhci, port_array, wIndex, 1167 1167 XDEV_RESUME); 1168 1168 spin_unlock_irqrestore(&xhci->lock, flags); 1169 - msleep(20); 1169 + msleep(USB_RESUME_TIMEOUT); 1170 1170 spin_lock_irqsave(&xhci->lock, flags); 1171 1171 xhci_set_link_state(xhci, port_array, wIndex, 1172 1172 XDEV_U0); ··· 1355 1355 return 0; 1356 1356 } 1357 1357 1358 + /* 1359 + * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3. 1360 + * warm reset a USB3 device stuck in polling or compliance mode after resume. 1361 + * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8 1362 + */ 1363 + static bool xhci_port_missing_cas_quirk(int port_index, 1364 + __le32 __iomem **port_array) 1365 + { 1366 + u32 portsc; 1367 + 1368 + portsc = readl(port_array[port_index]); 1369 + 1370 + /* if any of these are set we are not stuck */ 1371 + if (portsc & (PORT_CONNECT | PORT_CAS)) 1372 + return false; 1373 + 1374 + if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) && 1375 + ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE)) 1376 + return false; 1377 + 1378 + /* clear wakeup/change bits, and do a warm port reset */ 1379 + portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS); 1380 + portsc |= PORT_WR; 1381 + writel(portsc, port_array[port_index]); 1382 + /* flush write */ 1383 + readl(port_array[port_index]); 1384 + return true; 1385 + } 1386 + 1358 1387 int xhci_bus_resume(struct usb_hcd *hcd) 1359 1388 { 1360 1389 struct xhci_hcd *xhci = hcd_to_xhci(hcd); ··· 1421 1392 u32 temp; 1422 1393 1423 1394 temp = readl(port_array[port_index]); 1395 + 1396 + /* warm reset CAS limited ports stuck in polling/compliance */ 1397 + if ((xhci->quirks & XHCI_MISSING_CAS) && 1398 + (hcd->speed >= HCD_USB3) && 1399 + xhci_port_missing_cas_quirk(port_index, port_array)) { 1400 + xhci_dbg(xhci, "reset stuck port %d\n", port_index); 1401 + continue; 1402 + } 1424 1403 if (DEV_SUPERSPEED_ANY(temp)) 1425 1404 temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS); 1426 1405 else ··· 1447 1410 1448 1411 if (need_usb2_u3_exit) { 1449 1412 spin_unlock_irqrestore(&xhci->lock, flags); 1450 - msleep(20); 1413 + msleep(USB_RESUME_TIMEOUT); 1451 1414 spin_lock_irqsave(&xhci->lock, flags); 1452 1415 } 1453 1416
+9 -1
drivers/usb/host/xhci-pci.c
··· 45 45 46 46 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 47 47 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 48 + #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1 48 49 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 49 50 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f 50 51 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f 51 52 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 52 53 #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 54 + #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 53 55 54 56 static const char hcd_name[] = "xhci_hcd"; 55 57 ··· 155 153 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 156 154 } 157 155 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 158 - pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { 156 + (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI || 157 + pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) { 159 158 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 160 159 xhci->quirks |= XHCI_SPURIOUS_WAKEUP; 161 160 } ··· 172 169 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { 173 170 xhci->quirks |= XHCI_SSIC_PORT_UNUSED; 174 171 } 172 + if (pdev->vendor == PCI_VENDOR_ID_INTEL && 173 + (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 174 + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) 175 + xhci->quirks |= XHCI_MISSING_CAS; 176 + 175 177 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 176 178 pdev->device == PCI_DEVICE_ID_EJ168) { 177 179 xhci->quirks |= XHCI_RESET_ON_RESUME;
+3
drivers/usb/host/xhci.h
··· 314 314 #define XDEV_U2 (0x2 << 5) 315 315 #define XDEV_U3 (0x3 << 5) 316 316 #define XDEV_INACTIVE (0x6 << 5) 317 + #define XDEV_POLLING (0x7 << 5) 318 + #define XDEV_COMP_MODE (0xa << 5) 317 319 #define XDEV_RESUME (0xf << 5) 318 320 /* true: port has power (see HCC_PPC) */ 319 321 #define PORT_POWER (1 << 9) ··· 1655 1653 #define XHCI_MTK_HOST (1 << 21) 1656 1654 #define XHCI_SSIC_PORT_UNUSED (1 << 22) 1657 1655 #define XHCI_NO_64BIT_SUPPORT (1 << 23) 1656 + #define XHCI_MISSING_CAS (1 << 24) 1658 1657 unsigned int num_active_eps; 1659 1658 unsigned int limit_active_eps; 1660 1659 /* There are two roothubs to keep track of bus suspend info for */
+4
drivers/usb/musb/musb_gadget.c
··· 1255 1255 1256 1256 map_dma_buffer(request, musb, musb_ep); 1257 1257 1258 + pm_runtime_get_sync(musb->controller); 1258 1259 spin_lock_irqsave(&musb->lock, lockflags); 1259 1260 1260 1261 /* don't queue if the ep is down */ ··· 1276 1275 1277 1276 unlock: 1278 1277 spin_unlock_irqrestore(&musb->lock, lockflags); 1278 + pm_runtime_mark_last_busy(musb->controller); 1279 + pm_runtime_put_autosuspend(musb->controller); 1280 + 1279 1281 return status; 1280 1282 } 1281 1283
+2 -5
drivers/usb/musb/omap2430.c
··· 287 287 } 288 288 musb->isr = omap2430_musb_interrupt; 289 289 phy_init(musb->phy); 290 + phy_power_on(musb->phy); 290 291 291 292 l = musb_readl(musb->mregs, OTG_INTERFSEL); 292 293 ··· 324 323 struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); 325 324 struct omap_musb_board_data *data = pdata->board_data; 326 325 327 - if (!WARN_ON(!musb->phy)) 328 - phy_power_on(musb->phy); 329 326 330 327 switch (glue->status) { 331 328 ··· 360 361 struct device *dev = musb->controller; 361 362 struct omap2430_glue *glue = dev_get_drvdata(dev->parent); 362 363 363 - if (!WARN_ON(!musb->phy)) 364 - phy_power_off(musb->phy); 365 - 366 364 if (glue->status != MUSB_UNKNOWN) 367 365 omap_control_usb_set_mode(glue->control_otghs, 368 366 USB_MODE_DISCONNECT); ··· 371 375 struct omap2430_glue *glue = dev_get_drvdata(dev->parent); 372 376 373 377 omap2430_low_level_exit(musb); 378 + phy_power_off(musb->phy); 374 379 phy_exit(musb->phy); 375 380 musb->phy = NULL; 376 381 cancel_work_sync(&glue->omap_musb_mailbox_work);
+6 -2
drivers/usb/renesas_usbhs/rcar3.c
··· 9 9 * 10 10 */ 11 11 12 + #include <linux/delay.h> 12 13 #include <linux/io.h> 13 14 #include "common.h" 14 15 #include "rcar3.h" ··· 36 35 37 36 usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG); 38 37 39 - if (enable) 38 + if (enable) { 40 39 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM); 41 - else 40 + /* The controller on R-Car Gen3 needs to wait up to 45 usec */ 41 + udelay(45); 42 + } else { 42 43 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0); 44 + } 43 45 44 46 return 0; 45 47 }
+3 -1
drivers/usb/serial/cp210x.c
··· 1077 1077 u8 control; 1078 1078 int result; 1079 1079 1080 - cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control); 1080 + result = cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control); 1081 + if (result) 1082 + return result; 1081 1083 1082 1084 result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0) 1083 1085 |((control & CONTROL_RTS) ? TIOCM_RTS : 0)
+2 -1
drivers/usb/serial/ftdi_sio.c
··· 986 986 /* ekey Devices */ 987 987 { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, 988 988 /* Infineon Devices */ 989 - { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, 989 + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) }, 990 + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) }, 990 991 /* GE Healthcare devices */ 991 992 { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, 992 993 /* Active Research (Actisense) devices */
+3 -2
drivers/usb/serial/ftdi_sio_ids.h
··· 626 626 /* 627 627 * Infineon Technologies 628 628 */ 629 - #define INFINEON_VID 0x058b 630 - #define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ 629 + #define INFINEON_VID 0x058b 630 + #define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ 631 + #define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */ 631 632 632 633 /* 633 634 * Acton Research Corp.
+2 -1
drivers/usb/serial/usb-serial.c
··· 1078 1078 1079 1079 serial->disconnected = 0; 1080 1080 1081 - usb_serial_console_init(serial->port[0]->minor); 1081 + if (num_ports > 0) 1082 + usb_serial_console_init(serial->port[0]->minor); 1082 1083 exit: 1083 1084 module_put(type->driver.owner); 1084 1085 return 0;
+39 -22
drivers/usb/wusbcore/crypto.c
··· 133 133 bo[itr] = bi1[itr] ^ bi2[itr]; 134 134 } 135 135 136 + /* Scratch space for MAC calculations. */ 137 + struct wusb_mac_scratch { 138 + struct aes_ccm_b0 b0; 139 + struct aes_ccm_b1 b1; 140 + struct aes_ccm_a ax; 141 + }; 142 + 136 143 /* 137 144 * CC-MAC function WUSB1.0[6.5] 138 145 * ··· 204 197 * what sg[4] is for. Maybe there is a smarter way to do this. 205 198 */ 206 199 static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc, 207 - struct crypto_cipher *tfm_aes, void *mic, 200 + struct crypto_cipher *tfm_aes, 201 + struct wusb_mac_scratch *scratch, 202 + void *mic, 208 203 const struct aes_ccm_nonce *n, 209 204 const struct aes_ccm_label *a, const void *b, 210 205 size_t blen) 211 206 { 212 207 int result = 0; 213 208 SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc); 214 - struct aes_ccm_b0 b0; 215 - struct aes_ccm_b1 b1; 216 - struct aes_ccm_a ax; 217 209 struct scatterlist sg[4], sg_dst; 218 210 void *dst_buf; 219 211 size_t dst_size; ··· 224 218 * These checks should be compile time optimized out 225 219 * ensure @a fills b1's mac_header and following fields 226 220 */ 227 - WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la)); 228 - WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block)); 229 - WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block)); 230 - WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block)); 221 + WARN_ON(sizeof(*a) != sizeof(scratch->b1) - sizeof(scratch->b1.la)); 222 + WARN_ON(sizeof(scratch->b0) != sizeof(struct aes_ccm_block)); 223 + WARN_ON(sizeof(scratch->b1) != sizeof(struct aes_ccm_block)); 224 + WARN_ON(sizeof(scratch->ax) != sizeof(struct aes_ccm_block)); 231 225 232 226 result = -ENOMEM; 233 227 zero_padding = blen % sizeof(struct aes_ccm_block); 234 228 if (zero_padding) 235 229 zero_padding = sizeof(struct aes_ccm_block) - zero_padding; 236 - dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding; 230 + dst_size = blen + sizeof(scratch->b0) + sizeof(scratch->b1) + 231 + zero_padding; 237 232 dst_buf = kzalloc(dst_size, GFP_KERNEL); 238 233 if (!dst_buf) 239 234 goto error_dst_buf; ··· 242 235 memset(iv, 0, sizeof(iv)); 243 236 244 237 /* Setup B0 */ 245 - b0.flags = 0x59; /* Format B0 */ 246 - b0.ccm_nonce = *n; 247 - b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */ 238 + scratch->b0.flags = 0x59; /* Format B0 */ 239 + scratch->b0.ccm_nonce = *n; 240 + scratch->b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */ 248 241 249 242 /* Setup B1 250 243 * ··· 253 246 * 14'--after clarification, it means to use A's contents 254 247 * for MAC Header, EO, sec reserved and padding. 255 248 */ 256 - b1.la = cpu_to_be16(blen + 14); 257 - memcpy(&b1.mac_header, a, sizeof(*a)); 249 + scratch->b1.la = cpu_to_be16(blen + 14); 250 + memcpy(&scratch->b1.mac_header, a, sizeof(*a)); 258 251 259 252 sg_init_table(sg, ARRAY_SIZE(sg)); 260 - sg_set_buf(&sg[0], &b0, sizeof(b0)); 261 - sg_set_buf(&sg[1], &b1, sizeof(b1)); 253 + sg_set_buf(&sg[0], &scratch->b0, sizeof(scratch->b0)); 254 + sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1)); 262 255 sg_set_buf(&sg[2], b, blen); 263 256 /* 0 if well behaved :) */ 264 257 sg_set_buf(&sg[3], bzero, zero_padding); ··· 283 276 * POS Crypto API: size is assumed to be AES's block size. 284 277 * Thanks for documenting it -- tip taken from airo.c 285 278 */ 286 - ax.flags = 0x01; /* as per WUSB 1.0 spec */ 287 - ax.ccm_nonce = *n; 288 - ax.counter = 0; 289 - crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax); 290 - bytewise_xor(mic, &ax, iv, 8); 279 + scratch->ax.flags = 0x01; /* as per WUSB 1.0 spec */ 280 + scratch->ax.ccm_nonce = *n; 281 + scratch->ax.counter = 0; 282 + crypto_cipher_encrypt_one(tfm_aes, (void *)&scratch->ax, 283 + (void *)&scratch->ax); 284 + bytewise_xor(mic, &scratch->ax, iv, 8); 291 285 result = 8; 292 286 error_cbc_crypt: 293 287 kfree(dst_buf); ··· 311 303 struct aes_ccm_nonce n = *_n; 312 304 struct crypto_skcipher *tfm_cbc; 313 305 struct crypto_cipher *tfm_aes; 306 + struct wusb_mac_scratch *scratch; 314 307 u64 sfn = 0; 315 308 __le64 sfn_le; 316 309 ··· 338 329 printk(KERN_ERR "E: can't set AES key: %d\n", (int)result); 339 330 goto error_setkey_aes; 340 331 } 332 + scratch = kmalloc(sizeof(*scratch), GFP_KERNEL); 333 + if (!scratch) { 334 + result = -ENOMEM; 335 + goto error_alloc_scratch; 336 + } 341 337 342 338 for (bitr = 0; bitr < (len + 63) / 64; bitr++) { 343 339 sfn_le = cpu_to_le64(sfn++); 344 340 memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */ 345 - result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes, 341 + result = wusb_ccm_mac(tfm_cbc, tfm_aes, scratch, out + bytes, 346 342 &n, a, b, blen); 347 343 if (result < 0) 348 344 goto error_ccm_mac; 349 345 bytes += result; 350 346 } 351 347 result = bytes; 348 + 349 + kfree(scratch); 350 + error_alloc_scratch: 352 351 error_ccm_mac: 353 352 error_setkey_aes: 354 353 crypto_free_cipher(tfm_aes);
+4
drivers/vme/vme.c
··· 156 156 case VME_MASTER: 157 157 retval = vme_master_get(resource, &enabled, &base, &size, 158 158 &aspace, &cycle, &dwidth); 159 + if (retval) 160 + return 0; 159 161 160 162 return size; 161 163 break; 162 164 case VME_SLAVE: 163 165 retval = vme_slave_get(resource, &enabled, &base, &size, 164 166 &buf_base, &aspace, &cycle); 167 + if (retval) 168 + return 0; 165 169 166 170 return size; 167 171 break;
+29 -16
drivers/xen/manage.c
··· 168 168 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 169 169 170 170 struct shutdown_handler { 171 - const char *command; 171 + #define SHUTDOWN_CMD_SIZE 11 172 + const char command[SHUTDOWN_CMD_SIZE]; 173 + bool flag; 172 174 void (*cb)(void); 173 175 }; 174 176 ··· 208 206 ctrl_alt_del(); 209 207 } 210 208 209 + static struct shutdown_handler shutdown_handlers[] = { 210 + { "poweroff", true, do_poweroff }, 211 + { "halt", false, do_poweroff }, 212 + { "reboot", true, do_reboot }, 213 + #ifdef CONFIG_HIBERNATE_CALLBACKS 214 + { "suspend", true, do_suspend }, 215 + #endif 216 + }; 217 + 211 218 static void shutdown_handler(struct xenbus_watch *watch, 212 219 const char **vec, unsigned int len) 213 220 { 214 221 char *str; 215 222 struct xenbus_transaction xbt; 216 223 int err; 217 - static struct shutdown_handler handlers[] = { 218 - { "poweroff", do_poweroff }, 219 - { "halt", do_poweroff }, 220 - { "reboot", do_reboot }, 221 - #ifdef CONFIG_HIBERNATE_CALLBACKS 222 - { "suspend", do_suspend }, 223 - #endif 224 - {NULL, NULL}, 225 - }; 226 - static struct shutdown_handler *handler; 224 + int idx; 227 225 228 226 if (shutting_down != SHUTDOWN_INVALID) 229 227 return; ··· 240 238 return; 241 239 } 242 240 243 - for (handler = &handlers[0]; handler->command; handler++) { 244 - if (strcmp(str, handler->command) == 0) 241 + for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) { 242 + if (strcmp(str, shutdown_handlers[idx].command) == 0) 245 243 break; 246 244 } 247 245 248 246 /* Only acknowledge commands which we are prepared to handle. */ 249 - if (handler->cb) 247 + if (idx < ARRAY_SIZE(shutdown_handlers)) 250 248 xenbus_write(xbt, "control", "shutdown", ""); 251 249 252 250 err = xenbus_transaction_end(xbt, 0); ··· 255 253 goto again; 256 254 } 257 255 258 - if (handler->cb) { 259 - handler->cb(); 256 + if (idx < ARRAY_SIZE(shutdown_handlers)) { 257 + shutdown_handlers[idx].cb(); 260 258 } else { 261 259 pr_info("Ignoring shutdown request: %s\n", str); 262 260 shutting_down = SHUTDOWN_INVALID; ··· 312 310 static int setup_shutdown_watcher(void) 313 311 { 314 312 int err; 313 + int idx; 314 + #define FEATURE_PATH_SIZE (SHUTDOWN_CMD_SIZE + sizeof("feature-")) 315 + char node[FEATURE_PATH_SIZE]; 315 316 316 317 err = register_xenbus_watch(&shutdown_watch); 317 318 if (err) { ··· 330 325 return err; 331 326 } 332 327 #endif 328 + 329 + for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) { 330 + if (!shutdown_handlers[idx].flag) 331 + continue; 332 + snprintf(node, FEATURE_PATH_SIZE, "feature-%s", 333 + shutdown_handlers[idx].command); 334 + xenbus_printf(XBT_NIL, "control", node, "%u", 1); 335 + } 333 336 334 337 return 0; 335 338 }
+2 -2
drivers/xen/xenbus/xenbus_dev_frontend.c
··· 364 364 365 365 static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) 366 366 { 367 - struct watch_adapter *watch, *tmp_watch; 367 + struct watch_adapter *watch; 368 368 char *path, *token; 369 369 int err, rc; 370 370 LIST_HEAD(staging_q); ··· 399 399 } 400 400 list_add(&watch->list, &u->watches); 401 401 } else { 402 - list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { 402 + list_for_each_entry(watch, &u->watches, list) { 403 403 if (!strcmp(watch->token, token) && 404 404 !strcmp(watch->watch.node, path)) { 405 405 unregister_xenbus_watch(&watch->watch);
+3 -1
drivers/xen/xenbus/xenbus_probe_frontend.c
··· 335 335 static void xenbus_reset_backend_state_changed(struct xenbus_watch *w, 336 336 const char **v, unsigned int l) 337 337 { 338 - xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state); 338 + if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", 339 + &backend_state) != 1) 340 + backend_state = XenbusStateUnknown; 339 341 printk(KERN_DEBUG "XENBUS: backend %s %s\n", 340 342 v[XS_WATCH_PATH], xenbus_strstate(backend_state)); 341 343 wake_up(&backend_state_wq);
+58
fs/btrfs/send.c
··· 5805 5805 int ret = 0; 5806 5806 5807 5807 if (sctx->cur_ino != sctx->cmp_key->objectid) { 5808 + 5809 + if (result == BTRFS_COMPARE_TREE_CHANGED) { 5810 + struct extent_buffer *leaf_l; 5811 + struct extent_buffer *leaf_r; 5812 + struct btrfs_file_extent_item *ei_l; 5813 + struct btrfs_file_extent_item *ei_r; 5814 + 5815 + leaf_l = sctx->left_path->nodes[0]; 5816 + leaf_r = sctx->right_path->nodes[0]; 5817 + ei_l = btrfs_item_ptr(leaf_l, 5818 + sctx->left_path->slots[0], 5819 + struct btrfs_file_extent_item); 5820 + ei_r = btrfs_item_ptr(leaf_r, 5821 + sctx->right_path->slots[0], 5822 + struct btrfs_file_extent_item); 5823 + 5824 + /* 5825 + * We may have found an extent item that has changed 5826 + * only its disk_bytenr field and the corresponding 5827 + * inode item was not updated. This case happens due to 5828 + * very specific timings during relocation when a leaf 5829 + * that contains file extent items is COWed while 5830 + * relocation is ongoing and its in the stage where it 5831 + * updates data pointers. So when this happens we can 5832 + * safely ignore it since we know it's the same extent, 5833 + * but just at different logical and physical locations 5834 + * (when an extent is fully replaced with a new one, we 5835 + * know the generation number must have changed too, 5836 + * since snapshot creation implies committing the current 5837 + * transaction, and the inode item must have been updated 5838 + * as well). 5839 + * This replacement of the disk_bytenr happens at 5840 + * relocation.c:replace_file_extents() through 5841 + * relocation.c:btrfs_reloc_cow_block(). 5842 + */ 5843 + if (btrfs_file_extent_generation(leaf_l, ei_l) == 5844 + btrfs_file_extent_generation(leaf_r, ei_r) && 5845 + btrfs_file_extent_ram_bytes(leaf_l, ei_l) == 5846 + btrfs_file_extent_ram_bytes(leaf_r, ei_r) && 5847 + btrfs_file_extent_compression(leaf_l, ei_l) == 5848 + btrfs_file_extent_compression(leaf_r, ei_r) && 5849 + btrfs_file_extent_encryption(leaf_l, ei_l) == 5850 + btrfs_file_extent_encryption(leaf_r, ei_r) && 5851 + btrfs_file_extent_other_encoding(leaf_l, ei_l) == 5852 + btrfs_file_extent_other_encoding(leaf_r, ei_r) && 5853 + btrfs_file_extent_type(leaf_l, ei_l) == 5854 + btrfs_file_extent_type(leaf_r, ei_r) && 5855 + btrfs_file_extent_disk_bytenr(leaf_l, ei_l) != 5856 + btrfs_file_extent_disk_bytenr(leaf_r, ei_r) && 5857 + btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) == 5858 + btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) && 5859 + btrfs_file_extent_offset(leaf_l, ei_l) == 5860 + btrfs_file_extent_offset(leaf_r, ei_r) && 5861 + btrfs_file_extent_num_bytes(leaf_l, ei_l) == 5862 + btrfs_file_extent_num_bytes(leaf_r, ei_r)) 5863 + return 0; 5864 + } 5865 + 5808 5866 inconsistent_snapshot_error(sctx, result, "extent"); 5809 5867 return -EIO; 5810 5868 }
+6 -14
fs/btrfs/tree-log.c
··· 2713 2713 int index, int error) 2714 2714 { 2715 2715 struct btrfs_log_ctx *ctx; 2716 + struct btrfs_log_ctx *safe; 2716 2717 2717 - if (!error) { 2718 - INIT_LIST_HEAD(&root->log_ctxs[index]); 2719 - return; 2720 - } 2721 - 2722 - list_for_each_entry(ctx, &root->log_ctxs[index], list) 2718 + list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { 2719 + list_del_init(&ctx->list); 2723 2720 ctx->log_ret = error; 2721 + } 2724 2722 2725 2723 INIT_LIST_HEAD(&root->log_ctxs[index]); 2726 2724 } ··· 2959 2961 mutex_unlock(&root->log_mutex); 2960 2962 2961 2963 out_wake_log_root: 2962 - /* 2963 - * We needn't get log_mutex here because we are sure all 2964 - * the other tasks are blocked. 2965 - */ 2964 + mutex_lock(&log_root_tree->log_mutex); 2966 2965 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); 2967 2966 2968 - mutex_lock(&log_root_tree->log_mutex); 2969 2967 log_root_tree->log_transid_committed++; 2970 2968 atomic_set(&log_root_tree->log_commit[index2], 0); 2971 2969 mutex_unlock(&log_root_tree->log_mutex); ··· 2972 2978 if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) 2973 2979 wake_up(&log_root_tree->log_commit_wait[index2]); 2974 2980 out: 2975 - /* See above. */ 2976 - btrfs_remove_all_log_ctxs(root, index1, ret); 2977 - 2978 2981 mutex_lock(&root->log_mutex); 2982 + btrfs_remove_all_log_ctxs(root, index1, ret); 2979 2983 root->log_transid_committed++; 2980 2984 atomic_set(&root->log_commit[index1], 0); 2981 2985 mutex_unlock(&root->log_mutex);
+1 -1
fs/exofs/dir.c
··· 137 137 bad_entry: 138 138 EXOFS_ERR( 139 139 "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - " 140 - "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n", 140 + "offset=%lu, inode=0x%llx, rec_len=%d, name_len=%d\n", 141 141 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, 142 142 _LLU(le64_to_cpu(p->inode_no)), 143 143 rec_len, p->name_len);
+2 -3
fs/iomap.c
··· 433 433 struct page *page = data; 434 434 int ret; 435 435 436 - ret = __block_write_begin_int(page, pos & ~PAGE_MASK, length, 437 - NULL, iomap); 436 + ret = __block_write_begin_int(page, pos, length, NULL, iomap); 438 437 if (ret) 439 438 return ret; 440 439 ··· 560 561 } 561 562 562 563 while (len > 0) { 563 - ret = iomap_apply(inode, start, len, 0, ops, &ctx, 564 + ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx, 564 565 iomap_fiemap_actor); 565 566 /* inode with no (attribute) mapping will give ENOENT */ 566 567 if (ret == -ENOENT)
+1
fs/kernfs/file.c
··· 911 911 .open = kernfs_fop_open, 912 912 .release = kernfs_fop_release, 913 913 .poll = kernfs_fop_poll, 914 + .fsync = noop_fsync, 914 915 }; 915 916 916 917 /**
+3 -2
fs/orangefs/dcache.c
··· 73 73 } 74 74 } 75 75 76 - dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 76 + orangefs_set_timeout(dentry); 77 77 ret = 1; 78 78 out_release_op: 79 79 op_release(new_op); ··· 94 94 static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags) 95 95 { 96 96 int ret; 97 + unsigned long time = (unsigned long) dentry->d_fsdata; 97 98 98 - if (time_before(jiffies, dentry->d_time)) 99 + if (time_before(jiffies, time)) 99 100 return 1; 100 101 101 102 if (flags & LOOKUP_RCU)
+7 -7
fs/orangefs/file.c
··· 621 621 * readahead cache (if any); this forces an expensive refresh of 622 622 * data for the next caller of mmap (or 'get_block' accesses) 623 623 */ 624 - if (file->f_path.dentry->d_inode && 625 - file->f_path.dentry->d_inode->i_mapping && 626 - mapping_nrpages(&file->f_path.dentry->d_inode->i_data)) { 624 + if (file_inode(file) && 625 + file_inode(file)->i_mapping && 626 + mapping_nrpages(&file_inode(file)->i_data)) { 627 627 if (orangefs_features & ORANGEFS_FEATURE_READAHEAD) { 628 628 gossip_debug(GOSSIP_INODE_DEBUG, 629 629 "calling flush_racache on %pU\n", ··· 632 632 gossip_debug(GOSSIP_INODE_DEBUG, 633 633 "flush_racache finished\n"); 634 634 } 635 - truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping, 635 + truncate_inode_pages(file_inode(file)->i_mapping, 636 636 0); 637 637 } 638 638 return 0; ··· 648 648 { 649 649 int ret = -EINVAL; 650 650 struct orangefs_inode_s *orangefs_inode = 651 - ORANGEFS_I(file->f_path.dentry->d_inode); 651 + ORANGEFS_I(file_inode(file)); 652 652 struct orangefs_kernel_op_s *new_op = NULL; 653 653 654 654 /* required call */ ··· 661 661 662 662 ret = service_operation(new_op, 663 663 "orangefs_fsync", 664 - get_interruptible_flag(file->f_path.dentry->d_inode)); 664 + get_interruptible_flag(file_inode(file))); 665 665 666 666 gossip_debug(GOSSIP_FILE_DEBUG, 667 667 "orangefs_fsync got return value of %d\n", ··· 669 669 670 670 op_release(new_op); 671 671 672 - orangefs_flush_inode(file->f_path.dentry->d_inode); 672 + orangefs_flush_inode(file_inode(file)); 673 673 return ret; 674 674 } 675 675
+4 -4
fs/orangefs/namei.c
··· 72 72 73 73 d_instantiate(dentry, inode); 74 74 unlock_new_inode(inode); 75 - dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 75 + orangefs_set_timeout(dentry); 76 76 ORANGEFS_I(inode)->getattr_time = jiffies - 1; 77 77 78 78 gossip_debug(GOSSIP_NAME_DEBUG, ··· 183 183 goto out; 184 184 } 185 185 186 - dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 186 + orangefs_set_timeout(dentry); 187 187 188 188 inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn); 189 189 if (IS_ERR(inode)) { ··· 322 322 323 323 d_instantiate(dentry, inode); 324 324 unlock_new_inode(inode); 325 - dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 325 + orangefs_set_timeout(dentry); 326 326 ORANGEFS_I(inode)->getattr_time = jiffies - 1; 327 327 328 328 gossip_debug(GOSSIP_NAME_DEBUG, ··· 386 386 387 387 d_instantiate(dentry, inode); 388 388 unlock_new_inode(inode); 389 - dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 389 + orangefs_set_timeout(dentry); 390 390 ORANGEFS_I(inode)->getattr_time = jiffies - 1; 391 391 392 392 gossip_debug(GOSSIP_NAME_DEBUG,
+7
fs/orangefs/orangefs-kernel.h
··· 580 580 #endif 581 581 } 582 582 583 + static inline void orangefs_set_timeout(struct dentry *dentry) 584 + { 585 + unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 586 + 587 + dentry->d_fsdata = (void *) time; 588 + } 589 + 583 590 #endif /* __ORANGEFSKERNEL_H */
+11 -10
fs/proc/base.c
··· 252 252 * Inherently racy -- command line shares address space 253 253 * with code and data. 254 254 */ 255 - rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_FORCE); 255 + rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0); 256 256 if (rv <= 0) 257 257 goto out_free_page; 258 258 ··· 270 270 int nr_read; 271 271 272 272 _count = min3(count, len, PAGE_SIZE); 273 - nr_read = access_remote_vm(mm, p, page, _count, 274 - FOLL_FORCE); 273 + nr_read = access_remote_vm(mm, p, page, _count, 0); 275 274 if (nr_read < 0) 276 275 rv = nr_read; 277 276 if (nr_read <= 0) ··· 305 306 bool final; 306 307 307 308 _count = min3(count, len, PAGE_SIZE); 308 - nr_read = access_remote_vm(mm, p, page, _count, 309 - FOLL_FORCE); 309 + nr_read = access_remote_vm(mm, p, page, _count, 0); 310 310 if (nr_read < 0) 311 311 rv = nr_read; 312 312 if (nr_read <= 0) ··· 354 356 bool final; 355 357 356 358 _count = min3(count, len, PAGE_SIZE); 357 - nr_read = access_remote_vm(mm, p, page, _count, 358 - FOLL_FORCE); 359 + nr_read = access_remote_vm(mm, p, page, _count, 0); 359 360 if (nr_read < 0) 360 361 rv = nr_read; 361 362 if (nr_read <= 0) ··· 832 835 unsigned long addr = *ppos; 833 836 ssize_t copied; 834 837 char *page; 835 - unsigned int flags = FOLL_FORCE; 838 + unsigned int flags; 836 839 837 840 if (!mm) 838 841 return 0; ··· 845 848 if (!atomic_inc_not_zero(&mm->mm_users)) 846 849 goto free; 847 850 851 + /* Maybe we should limit FOLL_FORCE to actual ptrace users? */ 852 + flags = FOLL_FORCE; 848 853 if (write) 849 854 flags |= FOLL_WRITE; 850 855 ··· 970 971 max_len = min_t(size_t, PAGE_SIZE, count); 971 972 this_len = min(max_len, this_len); 972 973 973 - retval = access_remote_vm(mm, (env_start + src), 974 - page, this_len, FOLL_FORCE); 974 + retval = access_remote_vm(mm, (env_start + src), page, this_len, 0); 975 975 976 976 if (retval <= 0) { 977 977 ret = retval; ··· 1012 1014 { 1013 1015 struct mm_struct *mm = file->private_data; 1014 1016 unsigned int nwords = 0; 1017 + 1018 + if (!mm) 1019 + return 0; 1015 1020 do { 1016 1021 nwords += 2; 1017 1022 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
+8
fs/ubifs/dir.c
··· 543 543 544 544 if (err != -ENOENT) 545 545 ubifs_err(c, "cannot find next direntry, error %d", err); 546 + else 547 + /* 548 + * -ENOENT is a non-fatal error in this context, the TNC uses 549 + * it to indicate that the cursor moved past the current directory 550 + * and readdir() has to stop. 551 + */ 552 + err = 0; 553 + 546 554 547 555 /* 2 is a special value indicating that there are no more direntries */ 548 556 ctx->pos = 2;
+246 -172
fs/xfs/libxfs/xfs_bmap.c
··· 3974 3974 * allocating, so skip that check by pretending to be freeing. 3975 3975 */ 3976 3976 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING); 3977 - if (error) 3978 - goto error0; 3979 - error0: 3980 3977 xfs_perag_put(args.pag); 3981 3978 if (error) 3982 3979 trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_); ··· 3994 3997 xfs_alloc_is_userdata(ap->datatype)) 3995 3998 return xfs_bmap_rtalloc(ap); 3996 3999 return xfs_bmap_btalloc(ap); 4000 + } 4001 + 4002 + /* Trim extent to fit a logical block range. */ 4003 + void 4004 + xfs_trim_extent( 4005 + struct xfs_bmbt_irec *irec, 4006 + xfs_fileoff_t bno, 4007 + xfs_filblks_t len) 4008 + { 4009 + xfs_fileoff_t distance; 4010 + xfs_fileoff_t end = bno + len; 4011 + 4012 + if (irec->br_startoff + irec->br_blockcount <= bno || 4013 + irec->br_startoff >= end) { 4014 + irec->br_blockcount = 0; 4015 + return; 4016 + } 4017 + 4018 + if (irec->br_startoff < bno) { 4019 + distance = bno - irec->br_startoff; 4020 + if (isnullstartblock(irec->br_startblock)) 4021 + irec->br_startblock = DELAYSTARTBLOCK; 4022 + if (irec->br_startblock != DELAYSTARTBLOCK && 4023 + irec->br_startblock != HOLESTARTBLOCK) 4024 + irec->br_startblock += distance; 4025 + irec->br_startoff += distance; 4026 + irec->br_blockcount -= distance; 4027 + } 4028 + 4029 + if (end < irec->br_startoff + irec->br_blockcount) { 4030 + distance = irec->br_startoff + irec->br_blockcount - end; 4031 + irec->br_blockcount -= distance; 4032 + } 3997 4033 } 3998 4034 3999 4035 /* ··· 4859 4829 return stolen; 4860 4830 } 4861 4831 4832 + int 4833 + xfs_bmap_del_extent_delay( 4834 + struct xfs_inode *ip, 4835 + int whichfork, 4836 + xfs_extnum_t *idx, 4837 + struct xfs_bmbt_irec *got, 4838 + struct xfs_bmbt_irec *del) 4839 + { 4840 + struct xfs_mount *mp = ip->i_mount; 4841 + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4842 + struct xfs_bmbt_irec new; 4843 + int64_t da_old, da_new, da_diff = 0; 4844 + xfs_fileoff_t del_endoff, got_endoff; 4845 + xfs_filblks_t got_indlen, new_indlen, stolen; 4846 + int error = 0, state = 0; 4847 + bool isrt; 4848 + 4849 + XFS_STATS_INC(mp, xs_del_exlist); 4850 + 4851 + isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4852 + del_endoff = del->br_startoff + del->br_blockcount; 4853 + got_endoff = got->br_startoff + got->br_blockcount; 4854 + da_old = startblockval(got->br_startblock); 4855 + da_new = 0; 4856 + 4857 + ASSERT(*idx >= 0); 4858 + ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); 4859 + ASSERT(del->br_blockcount > 0); 4860 + ASSERT(got->br_startoff <= del->br_startoff); 4861 + ASSERT(got_endoff >= del_endoff); 4862 + 4863 + if (isrt) { 4864 + int64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); 4865 + 4866 + do_div(rtexts, mp->m_sb.sb_rextsize); 4867 + xfs_mod_frextents(mp, rtexts); 4868 + } 4869 + 4870 + /* 4871 + * Update the inode delalloc counter now and wait to update the 4872 + * sb counters as we might have to borrow some blocks for the 4873 + * indirect block accounting. 4874 + */ 4875 + xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del->br_blockcount), 0, 4876 + isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4877 + ip->i_delayed_blks -= del->br_blockcount; 4878 + 4879 + if (whichfork == XFS_COW_FORK) 4880 + state |= BMAP_COWFORK; 4881 + 4882 + if (got->br_startoff == del->br_startoff) 4883 + state |= BMAP_LEFT_CONTIG; 4884 + if (got_endoff == del_endoff) 4885 + state |= BMAP_RIGHT_CONTIG; 4886 + 4887 + switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 4888 + case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 4889 + /* 4890 + * Matches the whole extent. Delete the entry. 4891 + */ 4892 + xfs_iext_remove(ip, *idx, 1, state); 4893 + --*idx; 4894 + break; 4895 + case BMAP_LEFT_CONTIG: 4896 + /* 4897 + * Deleting the first part of the extent. 4898 + */ 4899 + trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4900 + got->br_startoff = del_endoff; 4901 + got->br_blockcount -= del->br_blockcount; 4902 + da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4903 + got->br_blockcount), da_old); 4904 + got->br_startblock = nullstartblock((int)da_new); 4905 + xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 4906 + trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 4907 + break; 4908 + case BMAP_RIGHT_CONTIG: 4909 + /* 4910 + * Deleting the last part of the extent. 4911 + */ 4912 + trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4913 + got->br_blockcount = got->br_blockcount - del->br_blockcount; 4914 + da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4915 + got->br_blockcount), da_old); 4916 + got->br_startblock = nullstartblock((int)da_new); 4917 + xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 4918 + trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 4919 + break; 4920 + case 0: 4921 + /* 4922 + * Deleting the middle of the extent. 4923 + * 4924 + * Distribute the original indlen reservation across the two new 4925 + * extents. Steal blocks from the deleted extent if necessary. 4926 + * Stealing blocks simply fudges the fdblocks accounting below. 4927 + * Warn if either of the new indlen reservations is zero as this 4928 + * can lead to delalloc problems. 4929 + */ 4930 + trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4931 + 4932 + got->br_blockcount = del->br_startoff - got->br_startoff; 4933 + got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4934 + 4935 + new.br_blockcount = got_endoff - del_endoff; 4936 + new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4937 + 4938 + WARN_ON_ONCE(!got_indlen || !new_indlen); 4939 + stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4940 + del->br_blockcount); 4941 + 4942 + got->br_startblock = nullstartblock((int)got_indlen); 4943 + xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 4944 + trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_); 4945 + 4946 + new.br_startoff = del_endoff; 4947 + new.br_state = got->br_state; 4948 + new.br_startblock = nullstartblock((int)new_indlen); 4949 + 4950 + ++*idx; 4951 + xfs_iext_insert(ip, *idx, 1, &new, state); 4952 + 4953 + da_new = got_indlen + new_indlen - stolen; 4954 + del->br_blockcount -= stolen; 4955 + break; 4956 + } 4957 + 4958 + ASSERT(da_old >= da_new); 4959 + da_diff = da_old - da_new; 4960 + if (!isrt) 4961 + da_diff += del->br_blockcount; 4962 + if (da_diff) 4963 + xfs_mod_fdblocks(mp, da_diff, false); 4964 + return error; 4965 + } 4966 + 4967 + void 4968 + xfs_bmap_del_extent_cow( 4969 + struct xfs_inode *ip, 4970 + xfs_extnum_t *idx, 4971 + struct xfs_bmbt_irec *got, 4972 + struct xfs_bmbt_irec *del) 4973 + { 4974 + struct xfs_mount *mp = ip->i_mount; 4975 + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 4976 + struct xfs_bmbt_irec new; 4977 + xfs_fileoff_t del_endoff, got_endoff; 4978 + int state = BMAP_COWFORK; 4979 + 4980 + XFS_STATS_INC(mp, xs_del_exlist); 4981 + 4982 + del_endoff = del->br_startoff + del->br_blockcount; 4983 + got_endoff = got->br_startoff + got->br_blockcount; 4984 + 4985 + ASSERT(*idx >= 0); 4986 + ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); 4987 + ASSERT(del->br_blockcount > 0); 4988 + ASSERT(got->br_startoff <= del->br_startoff); 4989 + ASSERT(got_endoff >= del_endoff); 4990 + ASSERT(!isnullstartblock(got->br_startblock)); 4991 + 4992 + if (got->br_startoff == del->br_startoff) 4993 + state |= BMAP_LEFT_CONTIG; 4994 + if (got_endoff == del_endoff) 4995 + state |= BMAP_RIGHT_CONTIG; 4996 + 4997 + switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 4998 + case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 4999 + /* 5000 + * Matches the whole extent. Delete the entry. 5001 + */ 5002 + xfs_iext_remove(ip, *idx, 1, state); 5003 + --*idx; 5004 + break; 5005 + case BMAP_LEFT_CONTIG: 5006 + /* 5007 + * Deleting the first part of the extent. 5008 + */ 5009 + trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5010 + got->br_startoff = del_endoff; 5011 + got->br_blockcount -= del->br_blockcount; 5012 + got->br_startblock = del->br_startblock + del->br_blockcount; 5013 + xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 5014 + trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5015 + break; 5016 + case BMAP_RIGHT_CONTIG: 5017 + /* 5018 + * Deleting the last part of the extent. 5019 + */ 5020 + trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5021 + got->br_blockcount -= del->br_blockcount; 5022 + xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 5023 + trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5024 + break; 5025 + case 0: 5026 + /* 5027 + * Deleting the middle of the extent. 5028 + */ 5029 + trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5030 + got->br_blockcount = del->br_startoff - got->br_startoff; 5031 + xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 5032 + trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5033 + 5034 + new.br_startoff = del_endoff; 5035 + new.br_blockcount = got_endoff - del_endoff; 5036 + new.br_state = got->br_state; 5037 + new.br_startblock = del->br_startblock + del->br_blockcount; 5038 + 5039 + ++*idx; 5040 + xfs_iext_insert(ip, *idx, 1, &new, state); 5041 + break; 5042 + } 5043 + } 5044 + 4862 5045 /* 4863 5046 * Called by xfs_bmapi to update file extent records and the btree 4864 5047 * after removing space (or undoing a delayed allocation). ··· 5411 5168 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false); 5412 5169 done: 5413 5170 *logflagsp = flags; 5414 - return error; 5415 - } 5416 - 5417 - /* Remove an extent from the CoW fork. Similar to xfs_bmap_del_extent. */ 5418 - int 5419 - xfs_bunmapi_cow( 5420 - struct xfs_inode *ip, 5421 - struct xfs_bmbt_irec *del) 5422 - { 5423 - xfs_filblks_t da_new; 5424 - xfs_filblks_t da_old; 5425 - xfs_fsblock_t del_endblock = 0; 5426 - xfs_fileoff_t del_endoff; 5427 - int delay; 5428 - struct xfs_bmbt_rec_host *ep; 5429 - int error; 5430 - struct xfs_bmbt_irec got; 5431 - xfs_fileoff_t got_endoff; 5432 - struct xfs_ifork *ifp; 5433 - struct xfs_mount *mp; 5434 - xfs_filblks_t nblks; 5435 - struct xfs_bmbt_irec new; 5436 - /* REFERENCED */ 5437 - uint qfield; 5438 - xfs_filblks_t temp; 5439 - xfs_filblks_t temp2; 5440 - int state = BMAP_COWFORK; 5441 - int eof; 5442 - xfs_extnum_t eidx; 5443 - 5444 - mp = ip->i_mount; 5445 - XFS_STATS_INC(mp, xs_del_exlist); 5446 - 5447 - ep = xfs_bmap_search_extents(ip, del->br_startoff, XFS_COW_FORK, &eof, 5448 - &eidx, &got, &new); 5449 - 5450 - ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); ifp = ifp; 5451 - ASSERT((eidx >= 0) && (eidx < ifp->if_bytes / 5452 - (uint)sizeof(xfs_bmbt_rec_t))); 5453 - ASSERT(del->br_blockcount > 0); 5454 - ASSERT(got.br_startoff <= del->br_startoff); 5455 - del_endoff = del->br_startoff + del->br_blockcount; 5456 - got_endoff = got.br_startoff + got.br_blockcount; 5457 - ASSERT(got_endoff >= del_endoff); 5458 - delay = isnullstartblock(got.br_startblock); 5459 - ASSERT(isnullstartblock(del->br_startblock) == delay); 5460 - qfield = 0; 5461 - error = 0; 5462 - /* 5463 - * If deleting a real allocation, must free up the disk space. 5464 - */ 5465 - if (!delay) { 5466 - nblks = del->br_blockcount; 5467 - qfield = XFS_TRANS_DQ_BCOUNT; 5468 - /* 5469 - * Set up del_endblock and cur for later. 5470 - */ 5471 - del_endblock = del->br_startblock + del->br_blockcount; 5472 - da_old = da_new = 0; 5473 - } else { 5474 - da_old = startblockval(got.br_startblock); 5475 - da_new = 0; 5476 - nblks = 0; 5477 - } 5478 - qfield = qfield; 5479 - nblks = nblks; 5480 - 5481 - /* 5482 - * Set flag value to use in switch statement. 5483 - * Left-contig is 2, right-contig is 1. 5484 - */ 5485 - switch (((got.br_startoff == del->br_startoff) << 1) | 5486 - (got_endoff == del_endoff)) { 5487 - case 3: 5488 - /* 5489 - * Matches the whole extent. Delete the entry. 5490 - */ 5491 - xfs_iext_remove(ip, eidx, 1, BMAP_COWFORK); 5492 - --eidx; 5493 - break; 5494 - 5495 - case 2: 5496 - /* 5497 - * Deleting the first part of the extent. 5498 - */ 5499 - trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_); 5500 - xfs_bmbt_set_startoff(ep, del_endoff); 5501 - temp = got.br_blockcount - del->br_blockcount; 5502 - xfs_bmbt_set_blockcount(ep, temp); 5503 - if (delay) { 5504 - temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 5505 - da_old); 5506 - xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5507 - trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_); 5508 - da_new = temp; 5509 - break; 5510 - } 5511 - xfs_bmbt_set_startblock(ep, del_endblock); 5512 - trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_); 5513 - break; 5514 - 5515 - case 1: 5516 - /* 5517 - * Deleting the last part of the extent. 5518 - */ 5519 - temp = got.br_blockcount - del->br_blockcount; 5520 - trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_); 5521 - xfs_bmbt_set_blockcount(ep, temp); 5522 - if (delay) { 5523 - temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 5524 - da_old); 5525 - xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5526 - trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_); 5527 - da_new = temp; 5528 - break; 5529 - } 5530 - trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_); 5531 - break; 5532 - 5533 - case 0: 5534 - /* 5535 - * Deleting the middle of the extent. 5536 - */ 5537 - temp = del->br_startoff - got.br_startoff; 5538 - trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_); 5539 - xfs_bmbt_set_blockcount(ep, temp); 5540 - new.br_startoff = del_endoff; 5541 - temp2 = got_endoff - del_endoff; 5542 - new.br_blockcount = temp2; 5543 - new.br_state = got.br_state; 5544 - if (!delay) { 5545 - new.br_startblock = del_endblock; 5546 - } else { 5547 - temp = xfs_bmap_worst_indlen(ip, temp); 5548 - xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5549 - temp2 = xfs_bmap_worst_indlen(ip, temp2); 5550 - new.br_startblock = nullstartblock((int)temp2); 5551 - da_new = temp + temp2; 5552 - while (da_new > da_old) { 5553 - if (temp) { 5554 - temp--; 5555 - da_new--; 5556 - xfs_bmbt_set_startblock(ep, 5557 - nullstartblock((int)temp)); 5558 - } 5559 - if (da_new == da_old) 5560 - break; 5561 - if (temp2) { 5562 - temp2--; 5563 - da_new--; 5564 - new.br_startblock = 5565 - nullstartblock((int)temp2); 5566 - } 5567 - } 5568 - } 5569 - trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_); 5570 - xfs_iext_insert(ip, eidx + 1, 1, &new, state); 5571 - ++eidx; 5572 - break; 5573 - } 5574 - 5575 - /* 5576 - * Account for change in delayed indirect blocks. 5577 - * Nothing to do for disk quota accounting here. 5578 - */ 5579 - ASSERT(da_old >= da_new); 5580 - if (da_old > da_new) 5581 - xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false); 5582 - 5583 5171 return error; 5584 5172 } 5585 5173
+7 -1
fs/xfs/libxfs/xfs_bmap.h
··· 190 190 #define XFS_BMAP_TRACE_EXLIST(ip,c,w) 191 191 #endif 192 192 193 + void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno, 194 + xfs_filblks_t len); 193 195 int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd); 194 196 void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork); 195 197 void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops, ··· 223 221 xfs_fileoff_t bno, xfs_filblks_t len, int flags, 224 222 xfs_extnum_t nexts, xfs_fsblock_t *firstblock, 225 223 struct xfs_defer_ops *dfops, int *done); 226 - int xfs_bunmapi_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *del); 224 + int xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork, 225 + xfs_extnum_t *idx, struct xfs_bmbt_irec *got, 226 + struct xfs_bmbt_irec *del); 227 + void xfs_bmap_del_extent_cow(struct xfs_inode *ip, xfs_extnum_t *idx, 228 + struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *del); 227 229 int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx, 228 230 xfs_extnum_t num); 229 231 uint xfs_default_attroffset(struct xfs_inode *ip);
+1 -1
fs/xfs/libxfs/xfs_btree.c
··· 4826 4826 return rval; 4827 4827 } 4828 4828 4829 - int 4829 + static int 4830 4830 xfs_btree_count_blocks_helper( 4831 4831 struct xfs_btree_cur *cur, 4832 4832 int level,
+1 -2
fs/xfs/libxfs/xfs_dquot_buf.c
··· 191 191 if (mp->m_quotainfo) 192 192 ndquots = mp->m_quotainfo->qi_dqperchunk; 193 193 else 194 - ndquots = xfs_calc_dquots_per_chunk( 195 - XFS_BB_TO_FSB(mp, bp->b_length)); 194 + ndquots = xfs_calc_dquots_per_chunk(bp->b_length); 196 195 197 196 for (i = 0; i < ndquots; i++, d++) { 198 197 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
-1
fs/xfs/libxfs/xfs_format.h
··· 865 865 * padding field for v3 inodes. 866 866 */ 867 867 #define XFS_DINODE_MAGIC 0x494e /* 'IN' */ 868 - #define XFS_DINODE_GOOD_VERSION(v) ((v) >= 1 && (v) <= 3) 869 868 typedef struct xfs_dinode { 870 869 __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */ 871 870 __be16 di_mode; /* mode and type of file */
+12 -1
fs/xfs/libxfs/xfs_inode_buf.c
··· 57 57 } 58 58 #endif 59 59 60 + bool 61 + xfs_dinode_good_version( 62 + struct xfs_mount *mp, 63 + __u8 version) 64 + { 65 + if (xfs_sb_version_hascrc(&mp->m_sb)) 66 + return version == 3; 67 + 68 + return version == 1 || version == 2; 69 + } 70 + 60 71 /* 61 72 * If we are doing readahead on an inode buffer, we might be in log recovery 62 73 * reading an inode allocation buffer that hasn't yet been replayed, and hence ··· 102 91 103 92 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); 104 93 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) && 105 - XFS_DINODE_GOOD_VERSION(dip->di_version); 94 + xfs_dinode_good_version(mp, dip->di_version); 106 95 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 107 96 XFS_ERRTAG_ITOBP_INOTOBP, 108 97 XFS_RANDOM_ITOBP_INOTOBP))) {
+2
fs/xfs/libxfs/xfs_inode_buf.h
··· 74 74 void xfs_log_dinode_to_disk(struct xfs_log_dinode *from, 75 75 struct xfs_dinode *to); 76 76 77 + bool xfs_dinode_good_version(struct xfs_mount *mp, __u8 version); 78 + 77 79 #if defined(DEBUG) 78 80 void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *); 79 81 #else
+33 -199
fs/xfs/xfs_file.c
··· 249 249 struct xfs_inode *ip = XFS_I(inode); 250 250 loff_t isize = i_size_read(inode); 251 251 size_t count = iov_iter_count(to); 252 + loff_t end = iocb->ki_pos + count - 1; 252 253 struct iov_iter data; 253 254 struct xfs_buftarg *target; 254 255 ssize_t ret = 0; ··· 273 272 274 273 file_accessed(iocb->ki_filp); 275 274 276 - /* 277 - * Locking is a bit tricky here. If we take an exclusive lock for direct 278 - * IO, we effectively serialise all new concurrent read IO to this file 279 - * and block it behind IO that is currently in progress because IO in 280 - * progress holds the IO lock shared. We only need to hold the lock 281 - * exclusive to blow away the page cache, so only take lock exclusively 282 - * if the page cache needs invalidation. This allows the normal direct 283 - * IO case of no page cache pages to proceeed concurrently without 284 - * serialisation. 285 - */ 286 275 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 287 276 if (mapping->nrpages) { 288 - xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 289 - xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 277 + ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end); 278 + if (ret) 279 + goto out_unlock; 290 280 291 281 /* 292 - * The generic dio code only flushes the range of the particular 293 - * I/O. Because we take an exclusive lock here, this whole 294 - * sequence is considerably more expensive for us. This has a 295 - * noticeable performance impact for any file with cached pages, 296 - * even when outside of the range of the particular I/O. 297 - * 298 - * Hence, amortize the cost of the lock against a full file 299 - * flush and reduce the chances of repeated iolock cycles going 300 - * forward. 282 + * Invalidate whole pages. This can return an error if we fail 283 + * to invalidate a page, but this should never happen on XFS. 284 + * Warn if it does fail. 301 285 */ 302 - if (mapping->nrpages) { 303 - ret = filemap_write_and_wait(mapping); 304 - if (ret) { 305 - xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); 306 - return ret; 307 - } 308 - 309 - /* 310 - * Invalidate whole pages. This can return an error if 311 - * we fail to invalidate a page, but this should never 312 - * happen on XFS. Warn if it does fail. 313 - */ 314 - ret = invalidate_inode_pages2(mapping); 315 - WARN_ON_ONCE(ret); 316 - ret = 0; 317 - } 318 - xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 286 + ret = invalidate_inode_pages2_range(mapping, 287 + iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT); 288 + WARN_ON_ONCE(ret); 289 + ret = 0; 319 290 } 320 291 321 292 data = *to; ··· 297 324 iocb->ki_pos += ret; 298 325 iov_iter_advance(to, ret); 299 326 } 300 - xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 301 327 328 + out_unlock: 329 + xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 302 330 return ret; 303 331 } 304 332 ··· 544 570 if ((iocb->ki_pos | count) & target->bt_logical_sectormask) 545 571 return -EINVAL; 546 572 547 - /* "unaligned" here means not aligned to a filesystem block */ 573 + /* 574 + * Don't take the exclusive iolock here unless the I/O is unaligned to 575 + * the file system block size. We don't need to consider the EOF 576 + * extension case here because xfs_file_aio_write_checks() will relock 577 + * the inode as necessary for EOF zeroing cases and fill out the new 578 + * inode size as appropriate. 579 + */ 548 580 if ((iocb->ki_pos & mp->m_blockmask) || 549 - ((iocb->ki_pos + count) & mp->m_blockmask)) 581 + ((iocb->ki_pos + count) & mp->m_blockmask)) { 550 582 unaligned_io = 1; 551 - 552 - /* 553 - * We don't need to take an exclusive lock unless there page cache needs 554 - * to be invalidated or unaligned IO is being executed. We don't need to 555 - * consider the EOF extension case here because 556 - * xfs_file_aio_write_checks() will relock the inode as necessary for 557 - * EOF zeroing cases and fill out the new inode size as appropriate. 558 - */ 559 - if (unaligned_io || mapping->nrpages) 560 583 iolock = XFS_IOLOCK_EXCL; 561 - else 584 + } else { 562 585 iolock = XFS_IOLOCK_SHARED; 563 - xfs_rw_ilock(ip, iolock); 564 - 565 - /* 566 - * Recheck if there are cached pages that need invalidate after we got 567 - * the iolock to protect against other threads adding new pages while 568 - * we were waiting for the iolock. 569 - */ 570 - if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { 571 - xfs_rw_iunlock(ip, iolock); 572 - iolock = XFS_IOLOCK_EXCL; 573 - xfs_rw_ilock(ip, iolock); 574 586 } 587 + 588 + xfs_rw_ilock(ip, iolock); 575 589 576 590 ret = xfs_file_aio_write_checks(iocb, from, &iolock); 577 591 if (ret) ··· 567 605 count = iov_iter_count(from); 568 606 end = iocb->ki_pos + count - 1; 569 607 570 - /* 571 - * See xfs_file_dio_aio_read() for why we do a full-file flush here. 572 - */ 573 608 if (mapping->nrpages) { 574 - ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); 609 + ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end); 575 610 if (ret) 576 611 goto out; 612 + 577 613 /* 578 614 * Invalidate whole pages. This can return an error if we fail 579 615 * to invalidate a page, but this should never happen on XFS. 580 616 * Warn if it does fail. 581 617 */ 582 - ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping); 618 + ret = invalidate_inode_pages2_range(mapping, 619 + iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT); 583 620 WARN_ON_ONCE(ret); 584 621 ret = 0; 585 622 } 586 623 587 624 /* 588 625 * If we are doing unaligned IO, wait for all other IO to drain, 589 - * otherwise demote the lock if we had to flush cached pages 626 + * otherwise demote the lock if we had to take the exclusive lock 627 + * for other reasons in xfs_file_aio_write_checks. 590 628 */ 591 629 if (unaligned_io) 592 630 inode_dio_wait(inode); ··· 909 947 return error; 910 948 } 911 949 912 - /* 913 - * Flush all file writes out to disk. 914 - */ 915 - static int 916 - xfs_file_wait_for_io( 917 - struct inode *inode, 918 - loff_t offset, 919 - size_t len) 920 - { 921 - loff_t rounding; 922 - loff_t ioffset; 923 - loff_t iendoffset; 924 - loff_t bs; 925 - int ret; 926 - 927 - bs = inode->i_sb->s_blocksize; 928 - inode_dio_wait(inode); 929 - 930 - rounding = max_t(xfs_off_t, bs, PAGE_SIZE); 931 - ioffset = round_down(offset, rounding); 932 - iendoffset = round_up(offset + len, rounding) - 1; 933 - ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, 934 - iendoffset); 935 - return ret; 936 - } 937 - 938 - /* Hook up to the VFS reflink function */ 939 - STATIC int 940 - xfs_file_share_range( 941 - struct file *file_in, 942 - loff_t pos_in, 943 - struct file *file_out, 944 - loff_t pos_out, 945 - u64 len, 946 - bool is_dedupe) 947 - { 948 - struct inode *inode_in; 949 - struct inode *inode_out; 950 - ssize_t ret; 951 - loff_t bs; 952 - loff_t isize; 953 - int same_inode; 954 - loff_t blen; 955 - unsigned int flags = 0; 956 - 957 - inode_in = file_inode(file_in); 958 - inode_out = file_inode(file_out); 959 - bs = inode_out->i_sb->s_blocksize; 960 - 961 - /* Don't touch certain kinds of inodes */ 962 - if (IS_IMMUTABLE(inode_out)) 963 - return -EPERM; 964 - if (IS_SWAPFILE(inode_in) || 965 - IS_SWAPFILE(inode_out)) 966 - return -ETXTBSY; 967 - 968 - /* Reflink only works within this filesystem. */ 969 - if (inode_in->i_sb != inode_out->i_sb) 970 - return -EXDEV; 971 - same_inode = (inode_in->i_ino == inode_out->i_ino); 972 - 973 - /* Don't reflink dirs, pipes, sockets... */ 974 - if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) 975 - return -EISDIR; 976 - if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode)) 977 - return -EINVAL; 978 - if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) 979 - return -EINVAL; 980 - 981 - /* Don't share DAX file data for now. */ 982 - if (IS_DAX(inode_in) || IS_DAX(inode_out)) 983 - return -EINVAL; 984 - 985 - /* Are we going all the way to the end? */ 986 - isize = i_size_read(inode_in); 987 - if (isize == 0) 988 - return 0; 989 - if (len == 0) 990 - len = isize - pos_in; 991 - 992 - /* Ensure offsets don't wrap and the input is inside i_size */ 993 - if (pos_in + len < pos_in || pos_out + len < pos_out || 994 - pos_in + len > isize) 995 - return -EINVAL; 996 - 997 - /* Don't allow dedupe past EOF in the dest file */ 998 - if (is_dedupe) { 999 - loff_t disize; 1000 - 1001 - disize = i_size_read(inode_out); 1002 - if (pos_out >= disize || pos_out + len > disize) 1003 - return -EINVAL; 1004 - } 1005 - 1006 - /* If we're linking to EOF, continue to the block boundary. */ 1007 - if (pos_in + len == isize) 1008 - blen = ALIGN(isize, bs) - pos_in; 1009 - else 1010 - blen = len; 1011 - 1012 - /* Only reflink if we're aligned to block boundaries */ 1013 - if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) || 1014 - !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs)) 1015 - return -EINVAL; 1016 - 1017 - /* Don't allow overlapped reflink within the same file */ 1018 - if (same_inode && pos_out + blen > pos_in && pos_out < pos_in + blen) 1019 - return -EINVAL; 1020 - 1021 - /* Wait for the completion of any pending IOs on srcfile */ 1022 - ret = xfs_file_wait_for_io(inode_in, pos_in, len); 1023 - if (ret) 1024 - goto out; 1025 - ret = xfs_file_wait_for_io(inode_out, pos_out, len); 1026 - if (ret) 1027 - goto out; 1028 - 1029 - if (is_dedupe) 1030 - flags |= XFS_REFLINK_DEDUPE; 1031 - ret = xfs_reflink_remap_range(XFS_I(inode_in), pos_in, XFS_I(inode_out), 1032 - pos_out, len, flags); 1033 - if (ret < 0) 1034 - goto out; 1035 - 1036 - out: 1037 - return ret; 1038 - } 1039 - 1040 950 STATIC ssize_t 1041 951 xfs_file_copy_range( 1042 952 struct file *file_in, ··· 920 1086 { 921 1087 int error; 922 1088 923 - error = xfs_file_share_range(file_in, pos_in, file_out, pos_out, 1089 + error = xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out, 924 1090 len, false); 925 1091 if (error) 926 1092 return error; ··· 935 1101 loff_t pos_out, 936 1102 u64 len) 937 1103 { 938 - return xfs_file_share_range(file_in, pos_in, file_out, pos_out, 1104 + return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out, 939 1105 len, false); 940 1106 } 941 1107 ··· 958 1124 if (len > XFS_MAX_DEDUPE_LEN) 959 1125 len = XFS_MAX_DEDUPE_LEN; 960 1126 961 - error = xfs_file_share_range(src_file, loff, dst_file, dst_loff, 1127 + error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff, 962 1128 len, true); 963 1129 if (error) 964 1130 return error;
+4 -4
fs/xfs/xfs_icache.c
··· 1656 1656 xfs_inode_set_cowblocks_tag( 1657 1657 xfs_inode_t *ip) 1658 1658 { 1659 - trace_xfs_inode_set_eofblocks_tag(ip); 1659 + trace_xfs_inode_set_cowblocks_tag(ip); 1660 1660 return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks, 1661 - trace_xfs_perag_set_eofblocks, 1661 + trace_xfs_perag_set_cowblocks, 1662 1662 XFS_ICI_COWBLOCKS_TAG); 1663 1663 } 1664 1664 ··· 1666 1666 xfs_inode_clear_cowblocks_tag( 1667 1667 xfs_inode_t *ip) 1668 1668 { 1669 - trace_xfs_inode_clear_eofblocks_tag(ip); 1669 + trace_xfs_inode_clear_cowblocks_tag(ip); 1670 1670 return __xfs_inode_clear_eofblocks_tag(ip, 1671 - trace_xfs_perag_clear_eofblocks, XFS_ICI_COWBLOCKS_TAG); 1671 + trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG); 1672 1672 }
+41 -16
fs/xfs/xfs_iomap.c
··· 566 566 xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx, 567 567 &got, &prev); 568 568 if (!eof && got.br_startoff <= offset_fsb) { 569 + if (xfs_is_reflink_inode(ip)) { 570 + bool shared; 571 + 572 + end_fsb = min(XFS_B_TO_FSB(mp, offset + count), 573 + maxbytes_fsb); 574 + xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb); 575 + error = xfs_reflink_reserve_cow(ip, &got, &shared); 576 + if (error) 577 + goto out_unlock; 578 + } 579 + 569 580 trace_xfs_iomap_found(ip, offset, count, 0, &got); 570 581 goto done; 571 582 } ··· 972 961 struct xfs_mount *mp = ip->i_mount; 973 962 struct xfs_bmbt_irec imap; 974 963 xfs_fileoff_t offset_fsb, end_fsb; 975 - bool shared, trimmed; 976 964 int nimaps = 1, error = 0; 965 + bool shared = false, trimmed = false; 977 966 unsigned lockmode; 978 967 979 968 if (XFS_FORCED_SHUTDOWN(mp)) 980 969 return -EIO; 981 - 982 - if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) { 983 - error = xfs_reflink_reserve_cow_range(ip, offset, length); 984 - if (error < 0) 985 - return error; 986 - } 987 970 988 971 if ((flags & IOMAP_WRITE) && !IS_DAX(inode) && 989 972 !xfs_get_extsz_hint(ip)) { ··· 986 981 iomap); 987 982 } 988 983 989 - lockmode = xfs_ilock_data_map_shared(ip); 984 + /* 985 + * COW writes will allocate delalloc space, so we need to make sure 986 + * to take the lock exclusively here. 987 + */ 988 + if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) { 989 + lockmode = XFS_ILOCK_EXCL; 990 + xfs_ilock(ip, XFS_ILOCK_EXCL); 991 + } else { 992 + lockmode = xfs_ilock_data_map_shared(ip); 993 + } 990 994 991 995 ASSERT(offset <= mp->m_super->s_maxbytes); 992 996 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes) ··· 1005 991 1006 992 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1007 993 &nimaps, 0); 1008 - if (error) { 1009 - xfs_iunlock(ip, lockmode); 1010 - return error; 994 + if (error) 995 + goto out_unlock; 996 + 997 + if (flags & IOMAP_REPORT) { 998 + /* Trim the mapping to the nearest shared extent boundary. */ 999 + error = xfs_reflink_trim_around_shared(ip, &imap, &shared, 1000 + &trimmed); 1001 + if (error) 1002 + goto out_unlock; 1011 1003 } 1012 1004 1013 - /* Trim the mapping to the nearest shared extent boundary. */ 1014 - error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed); 1015 - if (error) { 1016 - xfs_iunlock(ip, lockmode); 1017 - return error; 1005 + if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) { 1006 + error = xfs_reflink_reserve_cow(ip, &imap, &shared); 1007 + if (error) 1008 + goto out_unlock; 1009 + 1010 + end_fsb = imap.br_startoff + imap.br_blockcount; 1011 + length = XFS_FSB_TO_B(mp, end_fsb) - offset; 1018 1012 } 1019 1013 1020 1014 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) { ··· 1061 1039 if (shared) 1062 1040 iomap->flags |= IOMAP_F_SHARED; 1063 1041 return 0; 1042 + out_unlock: 1043 + xfs_iunlock(ip, lockmode); 1044 + return error; 1064 1045 } 1065 1046 1066 1047 static int
+1
fs/xfs/xfs_mount.c
··· 1009 1009 out_quota: 1010 1010 xfs_qm_unmount_quotas(mp); 1011 1011 out_rtunmount: 1012 + mp->m_super->s_flags &= ~MS_ACTIVE; 1012 1013 xfs_rtunmount_inodes(mp); 1013 1014 out_rele_rip: 1014 1015 IRELE(rip);
+277 -232
fs/xfs/xfs_reflink.c
··· 182 182 if (!xfs_is_reflink_inode(ip) || 183 183 ISUNWRITTEN(irec) || 184 184 irec->br_startblock == HOLESTARTBLOCK || 185 - irec->br_startblock == DELAYSTARTBLOCK) { 185 + irec->br_startblock == DELAYSTARTBLOCK || 186 + isnullstartblock(irec->br_startblock)) { 186 187 *shared = false; 187 188 return 0; 188 189 } ··· 228 227 } 229 228 } 230 229 231 - /* Create a CoW reservation for a range of blocks within a file. */ 232 - static int 233 - __xfs_reflink_reserve_cow( 230 + /* 231 + * Trim the passed in imap to the next shared/unshared extent boundary, and 232 + * if imap->br_startoff points to a shared extent reserve space for it in the 233 + * COW fork. In this case *shared is set to true, else to false. 234 + * 235 + * Note that imap will always contain the block numbers for the existing blocks 236 + * in the data fork, as the upper layers need them for read-modify-write 237 + * operations. 238 + */ 239 + int 240 + xfs_reflink_reserve_cow( 234 241 struct xfs_inode *ip, 235 - xfs_fileoff_t *offset_fsb, 236 - xfs_fileoff_t end_fsb, 237 - bool *skipped) 242 + struct xfs_bmbt_irec *imap, 243 + bool *shared) 238 244 { 239 - struct xfs_bmbt_irec got, prev, imap; 240 - xfs_fileoff_t orig_end_fsb; 241 - int nimaps, eof = 0, error = 0; 242 - bool shared = false, trimmed = false; 245 + struct xfs_bmbt_irec got, prev; 246 + xfs_fileoff_t end_fsb, orig_end_fsb; 247 + int eof = 0, error = 0; 248 + bool trimmed; 243 249 xfs_extnum_t idx; 244 250 xfs_extlen_t align; 245 251 246 - /* Already reserved? Skip the refcount btree access. */ 247 - xfs_bmap_search_extents(ip, *offset_fsb, XFS_COW_FORK, &eof, &idx, 252 + /* 253 + * Search the COW fork extent list first. This serves two purposes: 254 + * first this implement the speculative preallocation using cowextisze, 255 + * so that we also unshared block adjacent to shared blocks instead 256 + * of just the shared blocks themselves. Second the lookup in the 257 + * extent list is generally faster than going out to the shared extent 258 + * tree. 259 + */ 260 + xfs_bmap_search_extents(ip, imap->br_startoff, XFS_COW_FORK, &eof, &idx, 248 261 &got, &prev); 249 - if (!eof && got.br_startoff <= *offset_fsb) { 250 - end_fsb = orig_end_fsb = got.br_startoff + got.br_blockcount; 251 - trace_xfs_reflink_cow_found(ip, &got); 252 - goto done; 253 - } 262 + if (!eof && got.br_startoff <= imap->br_startoff) { 263 + trace_xfs_reflink_cow_found(ip, imap); 264 + xfs_trim_extent(imap, got.br_startoff, got.br_blockcount); 254 265 255 - /* Read extent from the source file. */ 256 - nimaps = 1; 257 - error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb, 258 - &imap, &nimaps, 0); 259 - if (error) 260 - goto out_unlock; 261 - ASSERT(nimaps == 1); 266 + *shared = true; 267 + return 0; 268 + } 262 269 263 270 /* Trim the mapping to the nearest shared extent boundary. */ 264 - error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed); 271 + error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed); 265 272 if (error) 266 - goto out_unlock; 267 - 268 - end_fsb = orig_end_fsb = imap.br_startoff + imap.br_blockcount; 273 + return error; 269 274 270 275 /* Not shared? Just report the (potentially capped) extent. */ 271 - if (!shared) { 272 - *skipped = true; 273 - goto done; 274 - } 276 + if (!*shared) 277 + return 0; 275 278 276 279 /* 277 280 * Fork all the shared blocks from our write offset until the end of ··· 283 278 */ 284 279 error = xfs_qm_dqattach_locked(ip, 0); 285 280 if (error) 286 - goto out_unlock; 281 + return error; 282 + 283 + end_fsb = orig_end_fsb = imap->br_startoff + imap->br_blockcount; 287 284 288 285 align = xfs_eof_alignment(ip, xfs_get_cowextsz_hint(ip)); 289 286 if (align) 290 287 end_fsb = roundup_64(end_fsb, align); 291 288 292 289 retry: 293 - error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, *offset_fsb, 294 - end_fsb - *offset_fsb, &got, 295 - &prev, &idx, eof); 290 + error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, imap->br_startoff, 291 + end_fsb - imap->br_startoff, &got, &prev, &idx, eof); 296 292 switch (error) { 297 293 case 0: 298 294 break; 299 295 case -ENOSPC: 300 296 case -EDQUOT: 301 297 /* retry without any preallocation */ 302 - trace_xfs_reflink_cow_enospc(ip, &imap); 298 + trace_xfs_reflink_cow_enospc(ip, imap); 303 299 if (end_fsb != orig_end_fsb) { 304 300 end_fsb = orig_end_fsb; 305 301 goto retry; 306 302 } 307 303 /*FALLTHRU*/ 308 304 default: 309 - goto out_unlock; 305 + return error; 310 306 } 311 307 312 308 if (end_fsb != orig_end_fsb) 313 309 xfs_inode_set_cowblocks_tag(ip); 314 310 315 311 trace_xfs_reflink_cow_alloc(ip, &got); 316 - done: 317 - *offset_fsb = end_fsb; 318 - out_unlock: 319 - return error; 320 - } 321 - 322 - /* Create a CoW reservation for part of a file. */ 323 - int 324 - xfs_reflink_reserve_cow_range( 325 - struct xfs_inode *ip, 326 - xfs_off_t offset, 327 - xfs_off_t count) 328 - { 329 - struct xfs_mount *mp = ip->i_mount; 330 - xfs_fileoff_t offset_fsb, end_fsb; 331 - bool skipped = false; 332 - int error; 333 - 334 - trace_xfs_reflink_reserve_cow_range(ip, offset, count); 335 - 336 - offset_fsb = XFS_B_TO_FSBT(mp, offset); 337 - end_fsb = XFS_B_TO_FSB(mp, offset + count); 338 - 339 - xfs_ilock(ip, XFS_ILOCK_EXCL); 340 - while (offset_fsb < end_fsb) { 341 - error = __xfs_reflink_reserve_cow(ip, &offset_fsb, end_fsb, 342 - &skipped); 343 - if (error) { 344 - trace_xfs_reflink_reserve_cow_range_error(ip, error, 345 - _RET_IP_); 346 - break; 347 - } 348 - } 349 - xfs_iunlock(ip, XFS_ILOCK_EXCL); 350 - 351 - return error; 312 + return 0; 352 313 } 353 314 354 315 /* Allocate all CoW reservations covering a range of blocks in a file. */ ··· 329 358 struct xfs_defer_ops dfops; 330 359 struct xfs_trans *tp; 331 360 xfs_fsblock_t first_block; 332 - xfs_fileoff_t next_fsb; 333 361 int nimaps = 1, error; 334 - bool skipped = false; 362 + bool shared; 335 363 336 364 xfs_defer_init(&dfops, &first_block); 337 365 ··· 341 371 342 372 xfs_ilock(ip, XFS_ILOCK_EXCL); 343 373 344 - next_fsb = *offset_fsb; 345 - error = __xfs_reflink_reserve_cow(ip, &next_fsb, end_fsb, &skipped); 374 + /* Read extent from the source file. */ 375 + nimaps = 1; 376 + error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb, 377 + &imap, &nimaps, 0); 378 + if (error) 379 + goto out_unlock; 380 + ASSERT(nimaps == 1); 381 + 382 + error = xfs_reflink_reserve_cow(ip, &imap, &shared); 346 383 if (error) 347 384 goto out_trans_cancel; 348 385 349 - if (skipped) { 350 - *offset_fsb = next_fsb; 386 + if (!shared) { 387 + *offset_fsb = imap.br_startoff + imap.br_blockcount; 351 388 goto out_trans_cancel; 352 389 } 353 390 354 391 xfs_trans_ijoin(tp, ip, 0); 355 - error = xfs_bmapi_write(tp, ip, *offset_fsb, next_fsb - *offset_fsb, 392 + error = xfs_bmapi_write(tp, ip, imap.br_startoff, imap.br_blockcount, 356 393 XFS_BMAPI_COWFORK, &first_block, 357 394 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 358 395 &imap, &nimaps, &dfops); 359 396 if (error) 360 397 goto out_trans_cancel; 361 - 362 - /* We might not have been able to map the whole delalloc extent */ 363 - *offset_fsb = min(*offset_fsb + imap.br_blockcount, next_fsb); 364 398 365 399 error = xfs_defer_finish(&tp, &dfops, NULL); 366 400 if (error) ··· 372 398 373 399 error = xfs_trans_commit(tp); 374 400 401 + *offset_fsb = imap.br_startoff + imap.br_blockcount; 375 402 out_unlock: 376 403 xfs_iunlock(ip, XFS_ILOCK_EXCL); 377 404 return error; ··· 511 536 xfs_fileoff_t offset_fsb, 512 537 xfs_fileoff_t end_fsb) 513 538 { 514 - struct xfs_bmbt_irec irec; 515 - xfs_filblks_t count_fsb; 539 + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 540 + struct xfs_bmbt_irec got, prev, del; 541 + xfs_extnum_t idx; 516 542 xfs_fsblock_t firstfsb; 517 543 struct xfs_defer_ops dfops; 518 - int error = 0; 519 - int nimaps; 544 + int error = 0, eof = 0; 520 545 521 546 if (!xfs_is_reflink_inode(ip)) 522 547 return 0; 523 548 524 - /* Go find the old extent in the CoW fork. */ 525 - while (offset_fsb < end_fsb) { 526 - nimaps = 1; 527 - count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb); 528 - error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec, 529 - &nimaps, XFS_BMAPI_COWFORK); 530 - if (error) 531 - break; 532 - ASSERT(nimaps == 1); 549 + xfs_bmap_search_extents(ip, offset_fsb, XFS_COW_FORK, &eof, &idx, 550 + &got, &prev); 551 + if (eof) 552 + return 0; 533 553 534 - trace_xfs_reflink_cancel_cow(ip, &irec); 554 + while (got.br_startoff < end_fsb) { 555 + del = got; 556 + xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb); 557 + trace_xfs_reflink_cancel_cow(ip, &del); 535 558 536 - if (irec.br_startblock == DELAYSTARTBLOCK) { 537 - /* Free a delayed allocation. */ 538 - xfs_mod_fdblocks(ip->i_mount, irec.br_blockcount, 539 - false); 540 - ip->i_delayed_blks -= irec.br_blockcount; 541 - 542 - /* Remove the mapping from the CoW fork. */ 543 - error = xfs_bunmapi_cow(ip, &irec); 559 + if (isnullstartblock(del.br_startblock)) { 560 + error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK, 561 + &idx, &got, &del); 544 562 if (error) 545 563 break; 546 - } else if (irec.br_startblock == HOLESTARTBLOCK) { 547 - /* empty */ 548 564 } else { 549 565 xfs_trans_ijoin(*tpp, ip, 0); 550 566 xfs_defer_init(&dfops, &firstfsb); 551 567 552 568 /* Free the CoW orphan record. */ 553 569 error = xfs_refcount_free_cow_extent(ip->i_mount, 554 - &dfops, irec.br_startblock, 555 - irec.br_blockcount); 570 + &dfops, del.br_startblock, 571 + del.br_blockcount); 556 572 if (error) 557 573 break; 558 574 559 575 xfs_bmap_add_free(ip->i_mount, &dfops, 560 - irec.br_startblock, irec.br_blockcount, 576 + del.br_startblock, del.br_blockcount, 561 577 NULL); 562 578 563 579 /* Update quota accounting */ 564 580 xfs_trans_mod_dquot_byino(*tpp, ip, XFS_TRANS_DQ_BCOUNT, 565 - -(long)irec.br_blockcount); 581 + -(long)del.br_blockcount); 566 582 567 583 /* Roll the transaction */ 568 584 error = xfs_defer_finish(tpp, &dfops, ip); ··· 563 597 } 564 598 565 599 /* Remove the mapping from the CoW fork. */ 566 - error = xfs_bunmapi_cow(ip, &irec); 567 - if (error) 568 - break; 600 + xfs_bmap_del_extent_cow(ip, &idx, &got, &del); 569 601 } 570 602 571 - /* Roll on... */ 572 - offset_fsb = irec.br_startoff + irec.br_blockcount; 603 + if (++idx >= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)) 604 + break; 605 + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got); 573 606 } 607 + 608 + /* clear tag if cow fork is emptied */ 609 + if (!ifp->if_bytes) 610 + xfs_inode_clear_cowblocks_tag(ip); 574 611 575 612 return error; 576 613 } ··· 637 668 xfs_off_t offset, 638 669 xfs_off_t count) 639 670 { 640 - struct xfs_bmbt_irec irec; 641 - struct xfs_bmbt_irec uirec; 671 + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 672 + struct xfs_bmbt_irec got, prev, del; 642 673 struct xfs_trans *tp; 643 674 xfs_fileoff_t offset_fsb; 644 675 xfs_fileoff_t end_fsb; 645 - xfs_filblks_t count_fsb; 646 676 xfs_fsblock_t firstfsb; 647 677 struct xfs_defer_ops dfops; 648 - int error; 678 + int error, eof = 0; 649 679 unsigned int resblks; 650 - xfs_filblks_t ilen; 651 680 xfs_filblks_t rlen; 652 - int nimaps; 681 + xfs_extnum_t idx; 653 682 654 683 trace_xfs_reflink_end_cow(ip, offset, count); 655 684 685 + /* No COW extents? That's easy! */ 686 + if (ifp->if_bytes == 0) 687 + return 0; 688 + 656 689 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 657 690 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count); 658 - count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb); 659 691 660 692 /* Start a rolling transaction to switch the mappings */ 661 693 resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK); ··· 668 698 xfs_ilock(ip, XFS_ILOCK_EXCL); 669 699 xfs_trans_ijoin(tp, ip, 0); 670 700 671 - /* Go find the old extent in the CoW fork. */ 672 - while (offset_fsb < end_fsb) { 673 - /* Read extent from the source file */ 674 - nimaps = 1; 675 - count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb); 676 - error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec, 677 - &nimaps, XFS_BMAPI_COWFORK); 678 - if (error) 679 - goto out_cancel; 680 - ASSERT(nimaps == 1); 701 + xfs_bmap_search_extents(ip, end_fsb - 1, XFS_COW_FORK, &eof, &idx, 702 + &got, &prev); 681 703 682 - ASSERT(irec.br_startblock != DELAYSTARTBLOCK); 683 - trace_xfs_reflink_cow_remap(ip, &irec); 704 + /* If there is a hole at end_fsb - 1 go to the previous extent */ 705 + if (eof || got.br_startoff > end_fsb) { 706 + ASSERT(idx > 0); 707 + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, --idx), &got); 708 + } 684 709 685 - /* 686 - * We can have a hole in the CoW fork if part of a directio 687 - * write is CoW but part of it isn't. 688 - */ 689 - rlen = ilen = irec.br_blockcount; 690 - if (irec.br_startblock == HOLESTARTBLOCK) 710 + /* Walk backwards until we're out of the I/O range... */ 711 + while (got.br_startoff + got.br_blockcount > offset_fsb) { 712 + del = got; 713 + xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb); 714 + 715 + /* Extent delete may have bumped idx forward */ 716 + if (!del.br_blockcount) { 717 + idx--; 691 718 goto next_extent; 692 - 693 - /* Unmap the old blocks in the data fork. */ 694 - while (rlen) { 695 - xfs_defer_init(&dfops, &firstfsb); 696 - error = __xfs_bunmapi(tp, ip, irec.br_startoff, 697 - &rlen, 0, 1, &firstfsb, &dfops); 698 - if (error) 699 - goto out_defer; 700 - 701 - /* 702 - * Trim the extent to whatever got unmapped. 703 - * Remember, bunmapi works backwards. 704 - */ 705 - uirec.br_startblock = irec.br_startblock + rlen; 706 - uirec.br_startoff = irec.br_startoff + rlen; 707 - uirec.br_blockcount = irec.br_blockcount - rlen; 708 - irec.br_blockcount = rlen; 709 - trace_xfs_reflink_cow_remap_piece(ip, &uirec); 710 - 711 - /* Free the CoW orphan record. */ 712 - error = xfs_refcount_free_cow_extent(tp->t_mountp, 713 - &dfops, uirec.br_startblock, 714 - uirec.br_blockcount); 715 - if (error) 716 - goto out_defer; 717 - 718 - /* Map the new blocks into the data fork. */ 719 - error = xfs_bmap_map_extent(tp->t_mountp, &dfops, 720 - ip, &uirec); 721 - if (error) 722 - goto out_defer; 723 - 724 - /* Remove the mapping from the CoW fork. */ 725 - error = xfs_bunmapi_cow(ip, &uirec); 726 - if (error) 727 - goto out_defer; 728 - 729 - error = xfs_defer_finish(&tp, &dfops, ip); 730 - if (error) 731 - goto out_defer; 732 719 } 733 720 721 + ASSERT(!isnullstartblock(got.br_startblock)); 722 + 723 + /* Unmap the old blocks in the data fork. */ 724 + xfs_defer_init(&dfops, &firstfsb); 725 + rlen = del.br_blockcount; 726 + error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1, 727 + &firstfsb, &dfops); 728 + if (error) 729 + goto out_defer; 730 + 731 + /* Trim the extent to whatever got unmapped. */ 732 + if (rlen) { 733 + xfs_trim_extent(&del, del.br_startoff + rlen, 734 + del.br_blockcount - rlen); 735 + } 736 + trace_xfs_reflink_cow_remap(ip, &del); 737 + 738 + /* Free the CoW orphan record. */ 739 + error = xfs_refcount_free_cow_extent(tp->t_mountp, &dfops, 740 + del.br_startblock, del.br_blockcount); 741 + if (error) 742 + goto out_defer; 743 + 744 + /* Map the new blocks into the data fork. */ 745 + error = xfs_bmap_map_extent(tp->t_mountp, &dfops, ip, &del); 746 + if (error) 747 + goto out_defer; 748 + 749 + /* Remove the mapping from the CoW fork. */ 750 + xfs_bmap_del_extent_cow(ip, &idx, &got, &del); 751 + 752 + error = xfs_defer_finish(&tp, &dfops, ip); 753 + if (error) 754 + goto out_defer; 755 + 734 756 next_extent: 735 - /* Roll on... */ 736 - offset_fsb = irec.br_startoff + ilen; 757 + if (idx < 0) 758 + break; 759 + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got); 737 760 } 738 761 739 762 error = xfs_trans_commit(tp); ··· 737 774 738 775 out_defer: 739 776 xfs_defer_cancel(&dfops); 740 - out_cancel: 741 777 xfs_trans_cancel(tp); 742 778 xfs_iunlock(ip, XFS_ILOCK_EXCL); 743 779 out: ··· 1274 1312 */ 1275 1313 int 1276 1314 xfs_reflink_remap_range( 1277 - struct xfs_inode *src, 1278 - xfs_off_t srcoff, 1279 - struct xfs_inode *dest, 1280 - xfs_off_t destoff, 1281 - xfs_off_t len, 1282 - unsigned int flags) 1315 + struct file *file_in, 1316 + loff_t pos_in, 1317 + struct file *file_out, 1318 + loff_t pos_out, 1319 + u64 len, 1320 + bool is_dedupe) 1283 1321 { 1322 + struct inode *inode_in = file_inode(file_in); 1323 + struct xfs_inode *src = XFS_I(inode_in); 1324 + struct inode *inode_out = file_inode(file_out); 1325 + struct xfs_inode *dest = XFS_I(inode_out); 1284 1326 struct xfs_mount *mp = src->i_mount; 1327 + loff_t bs = inode_out->i_sb->s_blocksize; 1328 + bool same_inode = (inode_in == inode_out); 1285 1329 xfs_fileoff_t sfsbno, dfsbno; 1286 1330 xfs_filblks_t fsblen; 1287 - int error; 1288 1331 xfs_extlen_t cowextsize; 1289 - bool is_same; 1332 + loff_t isize; 1333 + ssize_t ret; 1334 + loff_t blen; 1290 1335 1291 1336 if (!xfs_sb_version_hasreflink(&mp->m_sb)) 1292 1337 return -EOPNOTSUPP; ··· 1301 1332 if (XFS_FORCED_SHUTDOWN(mp)) 1302 1333 return -EIO; 1303 1334 1304 - /* Don't reflink realtime inodes */ 1305 - if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest)) 1306 - return -EINVAL; 1307 - 1308 - if (flags & ~XFS_REFLINK_ALL) 1309 - return -EINVAL; 1310 - 1311 - trace_xfs_reflink_remap_range(src, srcoff, len, dest, destoff); 1312 - 1313 1335 /* Lock both files against IO */ 1314 - if (src->i_ino == dest->i_ino) { 1336 + if (same_inode) { 1315 1337 xfs_ilock(src, XFS_IOLOCK_EXCL); 1316 1338 xfs_ilock(src, XFS_MMAPLOCK_EXCL); 1317 1339 } else { ··· 1310 1350 xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL); 1311 1351 } 1312 1352 1353 + /* Don't touch certain kinds of inodes */ 1354 + ret = -EPERM; 1355 + if (IS_IMMUTABLE(inode_out)) 1356 + goto out_unlock; 1357 + 1358 + ret = -ETXTBSY; 1359 + if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out)) 1360 + goto out_unlock; 1361 + 1362 + 1363 + /* Don't reflink dirs, pipes, sockets... */ 1364 + ret = -EISDIR; 1365 + if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) 1366 + goto out_unlock; 1367 + ret = -EINVAL; 1368 + if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode)) 1369 + goto out_unlock; 1370 + if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) 1371 + goto out_unlock; 1372 + 1373 + /* Don't reflink realtime inodes */ 1374 + if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest)) 1375 + goto out_unlock; 1376 + 1377 + /* Don't share DAX file data for now. */ 1378 + if (IS_DAX(inode_in) || IS_DAX(inode_out)) 1379 + goto out_unlock; 1380 + 1381 + /* Are we going all the way to the end? */ 1382 + isize = i_size_read(inode_in); 1383 + if (isize == 0) { 1384 + ret = 0; 1385 + goto out_unlock; 1386 + } 1387 + 1388 + if (len == 0) 1389 + len = isize - pos_in; 1390 + 1391 + /* Ensure offsets don't wrap and the input is inside i_size */ 1392 + if (pos_in + len < pos_in || pos_out + len < pos_out || 1393 + pos_in + len > isize) 1394 + goto out_unlock; 1395 + 1396 + /* Don't allow dedupe past EOF in the dest file */ 1397 + if (is_dedupe) { 1398 + loff_t disize; 1399 + 1400 + disize = i_size_read(inode_out); 1401 + if (pos_out >= disize || pos_out + len > disize) 1402 + goto out_unlock; 1403 + } 1404 + 1405 + /* If we're linking to EOF, continue to the block boundary. */ 1406 + if (pos_in + len == isize) 1407 + blen = ALIGN(isize, bs) - pos_in; 1408 + else 1409 + blen = len; 1410 + 1411 + /* Only reflink if we're aligned to block boundaries */ 1412 + if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) || 1413 + !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs)) 1414 + goto out_unlock; 1415 + 1416 + /* Don't allow overlapped reflink within the same file */ 1417 + if (same_inode) { 1418 + if (pos_out + blen > pos_in && pos_out < pos_in + blen) 1419 + goto out_unlock; 1420 + } 1421 + 1422 + /* Wait for the completion of any pending IOs on both files */ 1423 + inode_dio_wait(inode_in); 1424 + if (!same_inode) 1425 + inode_dio_wait(inode_out); 1426 + 1427 + ret = filemap_write_and_wait_range(inode_in->i_mapping, 1428 + pos_in, pos_in + len - 1); 1429 + if (ret) 1430 + goto out_unlock; 1431 + 1432 + ret = filemap_write_and_wait_range(inode_out->i_mapping, 1433 + pos_out, pos_out + len - 1); 1434 + if (ret) 1435 + goto out_unlock; 1436 + 1437 + trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out); 1438 + 1313 1439 /* 1314 1440 * Check that the extents are the same. 1315 1441 */ 1316 - if (flags & XFS_REFLINK_DEDUPE) { 1317 - is_same = false; 1318 - error = xfs_compare_extents(VFS_I(src), srcoff, VFS_I(dest), 1319 - destoff, len, &is_same); 1320 - if (error) 1321 - goto out_error; 1442 + if (is_dedupe) { 1443 + bool is_same = false; 1444 + 1445 + ret = xfs_compare_extents(inode_in, pos_in, inode_out, pos_out, 1446 + len, &is_same); 1447 + if (ret) 1448 + goto out_unlock; 1322 1449 if (!is_same) { 1323 - error = -EBADE; 1324 - goto out_error; 1450 + ret = -EBADE; 1451 + goto out_unlock; 1325 1452 } 1326 1453 } 1327 1454 1328 - error = xfs_reflink_set_inode_flag(src, dest); 1329 - if (error) 1330 - goto out_error; 1455 + ret = xfs_reflink_set_inode_flag(src, dest); 1456 + if (ret) 1457 + goto out_unlock; 1331 1458 1332 1459 /* 1333 1460 * Invalidate the page cache so that we can clear any CoW mappings 1334 1461 * in the destination file. 1335 1462 */ 1336 - truncate_inode_pages_range(&VFS_I(dest)->i_data, destoff, 1337 - PAGE_ALIGN(destoff + len) - 1); 1463 + truncate_inode_pages_range(&inode_out->i_data, pos_out, 1464 + PAGE_ALIGN(pos_out + len) - 1); 1338 1465 1339 - dfsbno = XFS_B_TO_FSBT(mp, destoff); 1340 - sfsbno = XFS_B_TO_FSBT(mp, srcoff); 1466 + dfsbno = XFS_B_TO_FSBT(mp, pos_out); 1467 + sfsbno = XFS_B_TO_FSBT(mp, pos_in); 1341 1468 fsblen = XFS_B_TO_FSB(mp, len); 1342 - error = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen, 1343 - destoff + len); 1344 - if (error) 1345 - goto out_error; 1469 + ret = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen, 1470 + pos_out + len); 1471 + if (ret) 1472 + goto out_unlock; 1346 1473 1347 1474 /* 1348 1475 * Carry the cowextsize hint from src to dest if we're sharing the ··· 1437 1390 * has a cowextsize hint, and the destination file does not. 1438 1391 */ 1439 1392 cowextsize = 0; 1440 - if (srcoff == 0 && len == i_size_read(VFS_I(src)) && 1393 + if (pos_in == 0 && len == i_size_read(inode_in) && 1441 1394 (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) && 1442 - destoff == 0 && len >= i_size_read(VFS_I(dest)) && 1395 + pos_out == 0 && len >= i_size_read(inode_out) && 1443 1396 !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)) 1444 1397 cowextsize = src->i_d.di_cowextsize; 1445 1398 1446 - error = xfs_reflink_update_dest(dest, destoff + len, cowextsize); 1447 - if (error) 1448 - goto out_error; 1399 + ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize); 1449 1400 1450 - out_error: 1401 + out_unlock: 1451 1402 xfs_iunlock(src, XFS_MMAPLOCK_EXCL); 1452 1403 xfs_iunlock(src, XFS_IOLOCK_EXCL); 1453 1404 if (src->i_ino != dest->i_ino) { 1454 1405 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); 1455 1406 xfs_iunlock(dest, XFS_IOLOCK_EXCL); 1456 1407 } 1457 - if (error) 1458 - trace_xfs_reflink_remap_range_error(dest, error, _RET_IP_); 1459 - return error; 1408 + if (ret) 1409 + trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_); 1410 + return ret; 1460 1411 } 1461 1412 1462 1413 /*
+4 -7
fs/xfs/xfs_reflink.h
··· 26 26 extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip, 27 27 struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed); 28 28 29 - extern int xfs_reflink_reserve_cow_range(struct xfs_inode *ip, 30 - xfs_off_t offset, xfs_off_t count); 29 + extern int xfs_reflink_reserve_cow(struct xfs_inode *ip, 30 + struct xfs_bmbt_irec *imap, bool *shared); 31 31 extern int xfs_reflink_allocate_cow_range(struct xfs_inode *ip, 32 32 xfs_off_t offset, xfs_off_t count); 33 33 extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset, ··· 43 43 extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset, 44 44 xfs_off_t count); 45 45 extern int xfs_reflink_recover_cow(struct xfs_mount *mp); 46 - #define XFS_REFLINK_DEDUPE 1 /* only reflink if contents match */ 47 - #define XFS_REFLINK_ALL (XFS_REFLINK_DEDUPE) 48 - extern int xfs_reflink_remap_range(struct xfs_inode *src, xfs_off_t srcoff, 49 - struct xfs_inode *dest, xfs_off_t destoff, xfs_off_t len, 50 - unsigned int flags); 46 + extern int xfs_reflink_remap_range(struct file *file_in, loff_t pos_in, 47 + struct file *file_out, loff_t pos_out, u64 len, bool is_dedupe); 51 48 extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip, 52 49 struct xfs_trans **tpp); 53 50 extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset,
+2 -2
fs/xfs/xfs_sysfs.c
··· 512 512 }; 513 513 514 514 515 - struct kobj_type xfs_error_cfg_ktype = { 515 + static struct kobj_type xfs_error_cfg_ktype = { 516 516 .release = xfs_sysfs_release, 517 517 .sysfs_ops = &xfs_sysfs_ops, 518 518 .default_attrs = xfs_error_attrs, 519 519 }; 520 520 521 - struct kobj_type xfs_error_ktype = { 521 + static struct kobj_type xfs_error_ktype = { 522 522 .release = xfs_sysfs_release, 523 523 .sysfs_ops = &xfs_sysfs_ops, 524 524 };
+1 -3
fs/xfs/xfs_trace.h
··· 3346 3346 DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found); 3347 3347 DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc); 3348 3348 3349 - DEFINE_RW_EVENT(xfs_reflink_reserve_cow_range); 3349 + DEFINE_RW_EVENT(xfs_reflink_reserve_cow); 3350 3350 DEFINE_RW_EVENT(xfs_reflink_allocate_cow_range); 3351 3351 3352 3352 DEFINE_INODE_IREC_EVENT(xfs_reflink_bounce_dio_write); ··· 3356 3356 DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range); 3357 3357 DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow); 3358 3358 DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap); 3359 - DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_piece); 3360 3359 3361 - DEFINE_INODE_ERROR_EVENT(xfs_reflink_reserve_cow_range_error); 3362 3360 DEFINE_INODE_ERROR_EVENT(xfs_reflink_allocate_cow_range_error); 3363 3361 DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error); 3364 3362 DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error);
+1 -1
include/asm-generic/export.h
··· 70 70 #include <generated/autoksyms.h> 71 71 72 72 #define __EXPORT_SYMBOL(sym, val, sec) \ 73 - __cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym)) 73 + __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym)) 74 74 #define __cond_export_sym(sym, val, sec, conf) \ 75 75 ___cond_export_sym(sym, val, sec, conf) 76 76 #define ___cond_export_sym(sym, val, sec, enabled) \
+1
include/linux/acpi.h
··· 326 326 int acpi_pci_irq_enable (struct pci_dev *dev); 327 327 void acpi_penalize_isa_irq(int irq, int active); 328 328 bool acpi_isa_irq_available(int irq); 329 + void acpi_penalize_sci_irq(int irq, int trigger, int polarity); 329 330 void acpi_pci_irq_disable (struct pci_dev *dev); 330 331 331 332 extern int ec_read(u8 addr, u8 *val);
+1 -1
include/linux/clk-provider.h
··· 785 785 * routines, one at of_clk_init(), and one at platform device probe 786 786 */ 787 787 #define CLK_OF_DECLARE_DRIVER(name, compat, fn) \ 788 - static void name##_of_clk_init_driver(struct device_node *np) \ 788 + static void __init name##_of_clk_init_driver(struct device_node *np) \ 789 789 { \ 790 790 of_node_clear_flag(np, OF_POPULATED); \ 791 791 fn(np); \
+22
include/linux/io.h
··· 141 141 void *memremap(resource_size_t offset, size_t size, unsigned long flags); 142 142 void memunmap(void *addr); 143 143 144 + /* 145 + * On x86 PAT systems we have memory tracking that keeps track of 146 + * the allowed mappings on memory ranges. This tracking works for 147 + * all the in-kernel mapping APIs (ioremap*), but where the user 148 + * wishes to map a range from a physical device into user memory 149 + * the tracking won't be updated. This API is to be used by 150 + * drivers which remap physical device pages into userspace, 151 + * and wants to make sure they are mapped WC and not UC. 152 + */ 153 + #ifndef arch_io_reserve_memtype_wc 154 + static inline int arch_io_reserve_memtype_wc(resource_size_t base, 155 + resource_size_t size) 156 + { 157 + return 0; 158 + } 159 + 160 + static inline void arch_io_free_memtype_wc(resource_size_t base, 161 + resource_size_t size) 162 + { 163 + } 164 + #endif 165 + 144 166 #endif /* _LINUX_IO_H */
+11 -6
include/linux/iomap.h
··· 19 19 #define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ 20 20 21 21 /* 22 - * Flags for iomap mappings: 22 + * Flags for all iomap mappings: 23 23 */ 24 - #define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */ 25 - #define IOMAP_F_SHARED 0x02 /* block shared with another file */ 26 - #define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */ 24 + #define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ 25 + 26 + /* 27 + * Flags that only need to be reported for IOMAP_REPORT requests: 28 + */ 29 + #define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */ 30 + #define IOMAP_F_SHARED 0x20 /* block shared with another file */ 27 31 28 32 /* 29 33 * Magic value for blkno: ··· 46 42 /* 47 43 * Flags for iomap_begin / iomap_end. No flag implies a read. 48 44 */ 49 - #define IOMAP_WRITE (1 << 0) 50 - #define IOMAP_ZERO (1 << 1) 45 + #define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */ 46 + #define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */ 47 + #define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */ 51 48 52 49 struct iomap_ops { 53 50 /*
+2 -3
include/linux/kconfig.h
··· 31 31 * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when 32 32 * the last step cherry picks the 2nd arg, we get a zero. 33 33 */ 34 - #define config_enabled(cfg) ___is_defined(cfg) 35 34 #define __is_defined(x) ___is_defined(x) 36 35 #define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) 37 36 #define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) ··· 40 41 * otherwise. For boolean options, this is equivalent to 41 42 * IS_ENABLED(CONFIG_FOO). 42 43 */ 43 - #define IS_BUILTIN(option) config_enabled(option) 44 + #define IS_BUILTIN(option) __is_defined(option) 44 45 45 46 /* 46 47 * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 47 48 * otherwise. 48 49 */ 49 - #define IS_MODULE(option) config_enabled(option##_MODULE) 50 + #define IS_MODULE(option) __is_defined(option##_MODULE) 50 51 51 52 /* 52 53 * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
-4
include/linux/mm.h
··· 1271 1271 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1272 1272 void *buf, int len, unsigned int gup_flags); 1273 1273 1274 - long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1275 - unsigned long start, unsigned long nr_pages, 1276 - unsigned int foll_flags, struct page **pages, 1277 - struct vm_area_struct **vmas, int *nonblocking); 1278 1274 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 1279 1275 unsigned long start, unsigned long nr_pages, 1280 1276 unsigned int gup_flags, struct page **pages,
+2 -28
include/linux/mmzone.h
··· 440 440 seqlock_t span_seqlock; 441 441 #endif 442 442 443 - /* 444 - * wait_table -- the array holding the hash table 445 - * wait_table_hash_nr_entries -- the size of the hash table array 446 - * wait_table_bits -- wait_table_size == (1 << wait_table_bits) 447 - * 448 - * The purpose of all these is to keep track of the people 449 - * waiting for a page to become available and make them 450 - * runnable again when possible. The trouble is that this 451 - * consumes a lot of space, especially when so few things 452 - * wait on pages at a given time. So instead of using 453 - * per-page waitqueues, we use a waitqueue hash table. 454 - * 455 - * The bucket discipline is to sleep on the same queue when 456 - * colliding and wake all in that wait queue when removing. 457 - * When something wakes, it must check to be sure its page is 458 - * truly available, a la thundering herd. The cost of a 459 - * collision is great, but given the expected load of the 460 - * table, they should be so rare as to be outweighed by the 461 - * benefits from the saved space. 462 - * 463 - * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the 464 - * primary users of these fields, and in mm/page_alloc.c 465 - * free_area_init_core() performs the initialization of them. 466 - */ 467 - wait_queue_head_t *wait_table; 468 - unsigned long wait_table_hash_nr_entries; 469 - unsigned long wait_table_bits; 443 + int initialized; 470 444 471 445 /* Write-intensive fields used from the page allocator */ 472 446 ZONE_PADDING(_pad1_) ··· 520 546 521 547 static inline bool zone_is_initialized(struct zone *zone) 522 548 { 523 - return !!zone->wait_table; 549 + return zone->initialized; 524 550 } 525 551 526 552 static inline bool zone_is_empty(struct zone *zone)
+1
include/linux/perf_event.h
··· 1257 1257 extern void perf_event_enable(struct perf_event *event); 1258 1258 extern void perf_event_disable(struct perf_event *event); 1259 1259 extern void perf_event_disable_local(struct perf_event *event); 1260 + extern void perf_event_disable_inatomic(struct perf_event *event); 1260 1261 extern void perf_event_task_tick(void); 1261 1262 #else /* !CONFIG_PERF_EVENTS: */ 1262 1263 static inline void *
+2 -2
ipc/msgutil.c
··· 53 53 size_t alen; 54 54 55 55 alen = min(len, DATALEN_MSG); 56 - msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL); 56 + msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL_ACCOUNT); 57 57 if (msg == NULL) 58 58 return NULL; 59 59 ··· 65 65 while (len > 0) { 66 66 struct msg_msgseg *seg; 67 67 alen = min(len, DATALEN_SEG); 68 - seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL); 68 + seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT); 69 69 if (seg == NULL) 70 70 goto out_err; 71 71 *pseg = seg;
+17 -6
kernel/events/core.c
··· 1960 1960 } 1961 1961 EXPORT_SYMBOL_GPL(perf_event_disable); 1962 1962 1963 + void perf_event_disable_inatomic(struct perf_event *event) 1964 + { 1965 + event->pending_disable = 1; 1966 + irq_work_queue(&event->pending); 1967 + } 1968 + 1963 1969 static void perf_set_shadow_time(struct perf_event *event, 1964 1970 struct perf_event_context *ctx, 1965 1971 u64 tstamp) ··· 7081 7075 if (events && atomic_dec_and_test(&event->event_limit)) { 7082 7076 ret = 1; 7083 7077 event->pending_kill = POLL_HUP; 7084 - event->pending_disable = 1; 7085 - irq_work_queue(&event->pending); 7078 + 7079 + perf_event_disable_inatomic(event); 7086 7080 } 7087 7081 7088 7082 READ_ONCE(event->overflow_handler)(event, data, regs); ··· 8861 8855 8862 8856 void perf_pmu_unregister(struct pmu *pmu) 8863 8857 { 8858 + int remove_device; 8859 + 8864 8860 mutex_lock(&pmus_lock); 8861 + remove_device = pmu_bus_running; 8865 8862 list_del_rcu(&pmu->entry); 8866 8863 mutex_unlock(&pmus_lock); 8867 8864 ··· 8878 8869 free_percpu(pmu->pmu_disable_count); 8879 8870 if (pmu->type >= PERF_TYPE_MAX) 8880 8871 idr_remove(&pmu_idr, pmu->type); 8881 - if (pmu->nr_addr_filters) 8882 - device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); 8883 - device_del(pmu->dev); 8884 - put_device(pmu->dev); 8872 + if (remove_device) { 8873 + if (pmu->nr_addr_filters) 8874 + device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); 8875 + device_del(pmu->dev); 8876 + put_device(pmu->dev); 8877 + } 8885 8878 free_pmu_context(pmu); 8886 8879 } 8887 8880 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
+8 -1
kernel/kcov.c
··· 53 53 /* 54 54 * We are interested in code coverage as a function of a syscall inputs, 55 55 * so we ignore code executed in interrupts. 56 + * The checks for whether we are in an interrupt are open-coded, because 57 + * 1. We can't use in_interrupt() here, since it also returns true 58 + * when we are inside local_bh_disable() section. 59 + * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()), 60 + * since that leads to slower generated code (three separate tests, 61 + * one for each of the flags). 56 62 */ 57 - if (!t || in_interrupt()) 63 + if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET 64 + | NMI_MASK))) 58 65 return; 59 66 mode = READ_ONCE(t->kcov_mode); 60 67 if (mode == KCOV_MODE_TRACE) {
+2 -2
kernel/power/suspend.c
··· 498 498 499 499 #ifndef CONFIG_SUSPEND_SKIP_SYNC 500 500 trace_suspend_resume(TPS("sync_filesystems"), 0, true); 501 - printk(KERN_INFO "PM: Syncing filesystems ... "); 501 + pr_info("PM: Syncing filesystems ... "); 502 502 sys_sync(); 503 - printk("done.\n"); 503 + pr_cont("done.\n"); 504 504 trace_suspend_resume(TPS("sync_filesystems"), 0, false); 505 505 #endif 506 506
+16
kernel/sched/core.c
··· 7515 7515 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 7516 7516 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); 7517 7517 7518 + #define WAIT_TABLE_BITS 8 7519 + #define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS) 7520 + static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned; 7521 + 7522 + wait_queue_head_t *bit_waitqueue(void *word, int bit) 7523 + { 7524 + const int shift = BITS_PER_LONG == 32 ? 5 : 6; 7525 + unsigned long val = (unsigned long)word << shift | bit; 7526 + 7527 + return bit_wait_table + hash_long(val, WAIT_TABLE_BITS); 7528 + } 7529 + EXPORT_SYMBOL(bit_waitqueue); 7530 + 7518 7531 void __init sched_init(void) 7519 7532 { 7520 7533 int i, j; 7521 7534 unsigned long alloc_size = 0, ptr; 7535 + 7536 + for (i = 0; i < WAIT_TABLE_SIZE; i++) 7537 + init_waitqueue_head(bit_wait_table + i); 7522 7538 7523 7539 #ifdef CONFIG_FAIR_GROUP_SCHED 7524 7540 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
-3
kernel/sched/fair.c
··· 8839 8839 { 8840 8840 struct sched_entity *se; 8841 8841 struct cfs_rq *cfs_rq; 8842 - struct rq *rq; 8843 8842 int i; 8844 8843 8845 8844 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); ··· 8853 8854 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); 8854 8855 8855 8856 for_each_possible_cpu(i) { 8856 - rq = cpu_rq(i); 8857 - 8858 8857 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), 8859 8858 GFP_KERNEL, cpu_to_node(i)); 8860 8859 if (!cfs_rq)
-10
kernel/sched/wait.c
··· 480 480 } 481 481 EXPORT_SYMBOL(wake_up_bit); 482 482 483 - wait_queue_head_t *bit_waitqueue(void *word, int bit) 484 - { 485 - const int shift = BITS_PER_LONG == 32 ? 5 : 6; 486 - const struct zone *zone = page_zone(virt_to_page(word)); 487 - unsigned long val = (unsigned long)word << shift | bit; 488 - 489 - return &zone->wait_table[hash_long(val, zone->wait_table_bits)]; 490 - } 491 - EXPORT_SYMBOL(bit_waitqueue); 492 - 493 483 /* 494 484 * Manipulate the atomic_t address to produce a better bit waitqueue table hash 495 485 * index (we're keying off bit -1, but that would produce a horrible hash
+1 -1
kernel/softirq.c
··· 58 58 DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 59 59 60 60 const char * const softirq_to_name[NR_SOFTIRQS] = { 61 - "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", 61 + "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", 62 62 "TASKLET", "SCHED", "HRTIMER", "RCU" 63 63 }; 64 64
+46 -32
kernel/time/timer.c
··· 878 878 879 879 #ifdef CONFIG_NO_HZ_COMMON 880 880 static inline struct timer_base * 881 - __get_target_base(struct timer_base *base, unsigned tflags) 881 + get_target_base(struct timer_base *base, unsigned tflags) 882 882 { 883 883 #ifdef CONFIG_SMP 884 884 if ((tflags & TIMER_PINNED) || !base->migration_enabled) ··· 891 891 892 892 static inline void forward_timer_base(struct timer_base *base) 893 893 { 894 + unsigned long jnow = READ_ONCE(jiffies); 895 + 894 896 /* 895 897 * We only forward the base when it's idle and we have a delta between 896 898 * base clock and jiffies. 897 899 */ 898 - if (!base->is_idle || (long) (jiffies - base->clk) < 2) 900 + if (!base->is_idle || (long) (jnow - base->clk) < 2) 899 901 return; 900 902 901 903 /* 902 904 * If the next expiry value is > jiffies, then we fast forward to 903 905 * jiffies otherwise we forward to the next expiry value. 904 906 */ 905 - if (time_after(base->next_expiry, jiffies)) 906 - base->clk = jiffies; 907 + if (time_after(base->next_expiry, jnow)) 908 + base->clk = jnow; 907 909 else 908 910 base->clk = base->next_expiry; 909 911 } 910 912 #else 911 913 static inline struct timer_base * 912 - __get_target_base(struct timer_base *base, unsigned tflags) 914 + get_target_base(struct timer_base *base, unsigned tflags) 913 915 { 914 916 return get_timer_this_cpu_base(tflags); 915 917 } ··· 919 917 static inline void forward_timer_base(struct timer_base *base) { } 920 918 #endif 921 919 922 - static inline struct timer_base * 923 - get_target_base(struct timer_base *base, unsigned tflags) 924 - { 925 - struct timer_base *target = __get_target_base(base, tflags); 926 - 927 - forward_timer_base(target); 928 - return target; 929 - } 930 920 931 921 /* 932 922 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means ··· 937 943 { 938 944 for (;;) { 939 945 struct timer_base *base; 940 - u32 tf = timer->flags; 946 + u32 tf; 947 + 948 + /* 949 + * We need to use READ_ONCE() here, otherwise the compiler 950 + * might re-read @tf between the check for TIMER_MIGRATING 951 + * and spin_lock(). 952 + */ 953 + tf = READ_ONCE(timer->flags); 941 954 942 955 if (!(tf & TIMER_MIGRATING)) { 943 956 base = get_timer_base(tf); ··· 965 964 unsigned long clk = 0, flags; 966 965 int ret = 0; 967 966 967 + BUG_ON(!timer->function); 968 + 968 969 /* 969 970 * This is a common optimization triggered by the networking code - if 970 971 * the timer is re-modified to have the same timeout or ends up in the ··· 975 972 if (timer_pending(timer)) { 976 973 if (timer->expires == expires) 977 974 return 1; 978 - /* 979 - * Take the current timer_jiffies of base, but without holding 980 - * the lock! 981 - */ 982 - base = get_timer_base(timer->flags); 983 - clk = base->clk; 984 975 976 + /* 977 + * We lock timer base and calculate the bucket index right 978 + * here. If the timer ends up in the same bucket, then we 979 + * just update the expiry time and avoid the whole 980 + * dequeue/enqueue dance. 981 + */ 982 + base = lock_timer_base(timer, &flags); 983 + 984 + clk = base->clk; 985 985 idx = calc_wheel_index(expires, clk); 986 986 987 987 /* ··· 994 988 */ 995 989 if (idx == timer_get_idx(timer)) { 996 990 timer->expires = expires; 997 - return 1; 991 + ret = 1; 992 + goto out_unlock; 998 993 } 994 + } else { 995 + base = lock_timer_base(timer, &flags); 999 996 } 1000 997 1001 998 timer_stats_timer_set_start_info(timer); 1002 - BUG_ON(!timer->function); 1003 - 1004 - base = lock_timer_base(timer, &flags); 1005 999 1006 1000 ret = detach_if_pending(timer, base, false); 1007 1001 if (!ret && pending_only) ··· 1031 1025 } 1032 1026 } 1033 1027 1028 + /* Try to forward a stale timer base clock */ 1029 + forward_timer_base(base); 1030 + 1034 1031 timer->expires = expires; 1035 1032 /* 1036 1033 * If 'idx' was calculated above and the base time did not advance 1037 - * between calculating 'idx' and taking the lock, only enqueue_timer() 1038 - * and trigger_dyntick_cpu() is required. Otherwise we need to 1039 - * (re)calculate the wheel index via internal_add_timer(). 1034 + * between calculating 'idx' and possibly switching the base, only 1035 + * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise 1036 + * we need to (re)calculate the wheel index via 1037 + * internal_add_timer(). 1040 1038 */ 1041 1039 if (idx != UINT_MAX && clk == base->clk) { 1042 1040 enqueue_timer(base, timer, idx); ··· 1520 1510 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); 1521 1511 base->next_expiry = nextevt; 1522 1512 /* 1523 - * We have a fresh next event. Check whether we can forward the base: 1513 + * We have a fresh next event. Check whether we can forward the 1514 + * base. We can only do that when @basej is past base->clk 1515 + * otherwise we might rewind base->clk. 1524 1516 */ 1525 - if (time_after(nextevt, jiffies)) 1526 - base->clk = jiffies; 1527 - else if (time_after(nextevt, base->clk)) 1528 - base->clk = nextevt; 1517 + if (time_after(basej, base->clk)) { 1518 + if (time_after(nextevt, basej)) 1519 + base->clk = basej; 1520 + else if (time_after(nextevt, base->clk)) 1521 + base->clk = nextevt; 1522 + } 1529 1523 1530 1524 if (time_before_eq(nextevt, basej)) { 1531 1525 expires = basem;
+1
lib/Kconfig.debug
··· 198 198 int "Warn for stack frames larger than (needs gcc 4.4)" 199 199 range 0 8192 200 200 default 0 if KASAN 201 + default 2048 if GCC_PLUGIN_LATENT_ENTROPY 201 202 default 1024 if !64BIT 202 203 default 2048 if 64BIT 203 204 help
+2 -1
lib/genalloc.c
··· 292 292 struct gen_pool_chunk *chunk; 293 293 unsigned long addr = 0; 294 294 int order = pool->min_alloc_order; 295 - int nbits, start_bit = 0, end_bit, remain; 295 + int nbits, start_bit, end_bit, remain; 296 296 297 297 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 298 298 BUG_ON(in_nmi()); ··· 307 307 if (size > atomic_read(&chunk->avail)) 308 308 continue; 309 309 310 + start_bit = 0; 310 311 end_bit = chunk_size(chunk) >> order; 311 312 retry: 312 313 start_bit = algo(chunk->bits, end_bit, start_bit,
+1 -1
lib/stackdepot.c
··· 50 50 STACK_ALLOC_ALIGN) 51 51 #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \ 52 52 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS) 53 - #define STACK_ALLOC_SLABS_CAP 1024 53 + #define STACK_ALLOC_SLABS_CAP 8192 54 54 #define STACK_ALLOC_MAX_SLABS \ 55 55 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ 56 56 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
+1 -1
mm/Kconfig
··· 187 187 bool "Allow for memory hot-add" 188 188 depends on SPARSEMEM || X86_64_ACPI_NUMA 189 189 depends on ARCH_ENABLE_MEMORY_HOTPLUG 190 - depends on !KASAN 190 + depends on COMPILE_TEST || !KASAN 191 191 192 192 config MEMORY_HOTPLUG_SPARSE 193 193 def_bool y
+1 -3
mm/filemap.c
··· 790 790 */ 791 791 wait_queue_head_t *page_waitqueue(struct page *page) 792 792 { 793 - const struct zone *zone = page_zone(page); 794 - 795 - return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; 793 + return bit_waitqueue(page, 0); 796 794 } 797 795 EXPORT_SYMBOL(page_waitqueue); 798 796
+1 -2
mm/gup.c
··· 526 526 * instead of __get_user_pages. __get_user_pages should be used only if 527 527 * you need some special @gup_flags. 528 528 */ 529 - long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 529 + static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 530 530 unsigned long start, unsigned long nr_pages, 531 531 unsigned int gup_flags, struct page **pages, 532 532 struct vm_area_struct **vmas, int *nonblocking) ··· 631 631 } while (nr_pages); 632 632 return i; 633 633 } 634 - EXPORT_SYMBOL(__get_user_pages); 635 634 636 635 bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) 637 636 {
+5 -2
mm/kmemleak.c
··· 1453 1453 1454 1454 read_lock(&tasklist_lock); 1455 1455 do_each_thread(g, p) { 1456 - scan_block(task_stack_page(p), task_stack_page(p) + 1457 - THREAD_SIZE, NULL); 1456 + void *stack = try_get_task_stack(p); 1457 + if (stack) { 1458 + scan_block(stack, stack + THREAD_SIZE, NULL); 1459 + put_task_stack(p); 1460 + } 1458 1461 } while_each_thread(g, p); 1459 1462 read_unlock(&tasklist_lock); 1460 1463 }
+2
mm/list_lru.c
··· 554 554 err = memcg_init_list_lru(lru, memcg_aware); 555 555 if (err) { 556 556 kfree(lru->node); 557 + /* Do this so a list_lru_destroy() doesn't crash: */ 558 + lru->node = NULL; 557 559 goto out; 558 560 } 559 561
+9
mm/memcontrol.c
··· 1917 1917 current->flags & PF_EXITING)) 1918 1918 goto force; 1919 1919 1920 + /* 1921 + * Prevent unbounded recursion when reclaim operations need to 1922 + * allocate memory. This might exceed the limits temporarily, 1923 + * but we prefer facilitating memory reclaim and getting back 1924 + * under the limit over triggering OOM kills in these cases. 1925 + */ 1926 + if (unlikely(current->flags & PF_MEMALLOC)) 1927 + goto force; 1928 + 1920 1929 if (unlikely(task_in_memcg_oom(current))) 1921 1930 goto nomem; 1922 1931
-29
mm/memory_hotplug.c
··· 268 268 unsigned long i, pfn, end_pfn, nr_pages; 269 269 int node = pgdat->node_id; 270 270 struct page *page; 271 - struct zone *zone; 272 271 273 272 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; 274 273 page = virt_to_page(pgdat); 275 274 276 275 for (i = 0; i < nr_pages; i++, page++) 277 276 get_page_bootmem(node, page, NODE_INFO); 278 - 279 - zone = &pgdat->node_zones[0]; 280 - for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { 281 - if (zone_is_initialized(zone)) { 282 - nr_pages = zone->wait_table_hash_nr_entries 283 - * sizeof(wait_queue_head_t); 284 - nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; 285 - page = virt_to_page(zone->wait_table); 286 - 287 - for (i = 0; i < nr_pages; i++, page++) 288 - get_page_bootmem(node, page, NODE_INFO); 289 - } 290 - } 291 277 292 278 pfn = pgdat->node_start_pfn; 293 279 end_pfn = pgdat_end_pfn(pgdat); ··· 2117 2131 unsigned long start_pfn = pgdat->node_start_pfn; 2118 2132 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 2119 2133 unsigned long pfn; 2120 - int i; 2121 2134 2122 2135 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 2123 2136 unsigned long section_nr = pfn_to_section_nr(pfn); ··· 2143 2158 */ 2144 2159 node_set_offline(nid); 2145 2160 unregister_one_node(nid); 2146 - 2147 - /* free waittable in each zone */ 2148 - for (i = 0; i < MAX_NR_ZONES; i++) { 2149 - struct zone *zone = pgdat->node_zones + i; 2150 - 2151 - /* 2152 - * wait_table may be allocated from boot memory, 2153 - * here only free if it's allocated by vmalloc. 2154 - */ 2155 - if (is_vmalloc_addr(zone->wait_table)) { 2156 - vfree(zone->wait_table); 2157 - zone->wait_table = NULL; 2158 - } 2159 - } 2160 2161 } 2161 2162 EXPORT_SYMBOL(try_offline_node); 2162 2163
+1 -1
mm/nommu.c
··· 109 109 return PAGE_SIZE << compound_order(page); 110 110 } 111 111 112 - long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 112 + static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 113 113 unsigned long start, unsigned long nr_pages, 114 114 unsigned int foll_flags, struct page **pages, 115 115 struct vm_area_struct **vmas, int *nonblocking)
+11 -120
mm/page_alloc.c
··· 4224 4224 } 4225 4225 4226 4226 *p = '\0'; 4227 - printk("(%s) ", tmp); 4227 + printk(KERN_CONT "(%s) ", tmp); 4228 4228 } 4229 4229 4230 4230 /* ··· 4335 4335 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 4336 4336 4337 4337 show_node(zone); 4338 - printk("%s" 4338 + printk(KERN_CONT 4339 + "%s" 4339 4340 " free:%lukB" 4340 4341 " min:%lukB" 4341 4342 " low:%lukB" ··· 4383 4382 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 4384 4383 printk("lowmem_reserve[]:"); 4385 4384 for (i = 0; i < MAX_NR_ZONES; i++) 4386 - printk(" %ld", zone->lowmem_reserve[i]); 4387 - printk("\n"); 4385 + printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); 4386 + printk(KERN_CONT "\n"); 4388 4387 } 4389 4388 4390 4389 for_each_populated_zone(zone) { ··· 4395 4394 if (skip_free_areas_node(filter, zone_to_nid(zone))) 4396 4395 continue; 4397 4396 show_node(zone); 4398 - printk("%s: ", zone->name); 4397 + printk(KERN_CONT "%s: ", zone->name); 4399 4398 4400 4399 spin_lock_irqsave(&zone->lock, flags); 4401 4400 for (order = 0; order < MAX_ORDER; order++) { ··· 4413 4412 } 4414 4413 spin_unlock_irqrestore(&zone->lock, flags); 4415 4414 for (order = 0; order < MAX_ORDER; order++) { 4416 - printk("%lu*%lukB ", nr[order], K(1UL) << order); 4415 + printk(KERN_CONT "%lu*%lukB ", 4416 + nr[order], K(1UL) << order); 4417 4417 if (nr[order]) 4418 4418 show_migration_types(types[order]); 4419 4419 } 4420 - printk("= %lukB\n", K(total)); 4420 + printk(KERN_CONT "= %lukB\n", K(total)); 4421 4421 } 4422 4422 4423 4423 hugetlb_show_meminfo(); ··· 4979 4977 } 4980 4978 4981 4979 /* 4982 - * Helper functions to size the waitqueue hash table. 4983 - * Essentially these want to choose hash table sizes sufficiently 4984 - * large so that collisions trying to wait on pages are rare. 4985 - * But in fact, the number of active page waitqueues on typical 4986 - * systems is ridiculously low, less than 200. So this is even 4987 - * conservative, even though it seems large. 4988 - * 4989 - * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 4990 - * waitqueues, i.e. the size of the waitq table given the number of pages. 4991 - */ 4992 - #define PAGES_PER_WAITQUEUE 256 4993 - 4994 - #ifndef CONFIG_MEMORY_HOTPLUG 4995 - static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 4996 - { 4997 - unsigned long size = 1; 4998 - 4999 - pages /= PAGES_PER_WAITQUEUE; 5000 - 5001 - while (size < pages) 5002 - size <<= 1; 5003 - 5004 - /* 5005 - * Once we have dozens or even hundreds of threads sleeping 5006 - * on IO we've got bigger problems than wait queue collision. 5007 - * Limit the size of the wait table to a reasonable size. 5008 - */ 5009 - size = min(size, 4096UL); 5010 - 5011 - return max(size, 4UL); 5012 - } 5013 - #else 5014 - /* 5015 - * A zone's size might be changed by hot-add, so it is not possible to determine 5016 - * a suitable size for its wait_table. So we use the maximum size now. 5017 - * 5018 - * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 5019 - * 5020 - * i386 (preemption config) : 4096 x 16 = 64Kbyte. 5021 - * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 5022 - * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 5023 - * 5024 - * The maximum entries are prepared when a zone's memory is (512K + 256) pages 5025 - * or more by the traditional way. (See above). It equals: 5026 - * 5027 - * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 5028 - * ia64(16K page size) : = ( 8G + 4M)byte. 5029 - * powerpc (64K page size) : = (32G +16M)byte. 5030 - */ 5031 - static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 5032 - { 5033 - return 4096UL; 5034 - } 5035 - #endif 5036 - 5037 - /* 5038 - * This is an integer logarithm so that shifts can be used later 5039 - * to extract the more random high bits from the multiplicative 5040 - * hash function before the remainder is taken. 5041 - */ 5042 - static inline unsigned long wait_table_bits(unsigned long size) 5043 - { 5044 - return ffz(~size); 5045 - } 5046 - 5047 - /* 5048 4980 * Initially all pages are reserved - free ones are freed 5049 4981 * up by free_all_bootmem() once the early boot process is 5050 4982 * done. Non-atomic initialization, single-pass. ··· 5240 5304 alloc_percpu(struct per_cpu_nodestat); 5241 5305 } 5242 5306 5243 - static noinline __ref 5244 - int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 5245 - { 5246 - int i; 5247 - size_t alloc_size; 5248 - 5249 - /* 5250 - * The per-page waitqueue mechanism uses hashed waitqueues 5251 - * per zone. 5252 - */ 5253 - zone->wait_table_hash_nr_entries = 5254 - wait_table_hash_nr_entries(zone_size_pages); 5255 - zone->wait_table_bits = 5256 - wait_table_bits(zone->wait_table_hash_nr_entries); 5257 - alloc_size = zone->wait_table_hash_nr_entries 5258 - * sizeof(wait_queue_head_t); 5259 - 5260 - if (!slab_is_available()) { 5261 - zone->wait_table = (wait_queue_head_t *) 5262 - memblock_virt_alloc_node_nopanic( 5263 - alloc_size, zone->zone_pgdat->node_id); 5264 - } else { 5265 - /* 5266 - * This case means that a zone whose size was 0 gets new memory 5267 - * via memory hot-add. 5268 - * But it may be the case that a new node was hot-added. In 5269 - * this case vmalloc() will not be able to use this new node's 5270 - * memory - this wait_table must be initialized to use this new 5271 - * node itself as well. 5272 - * To use this new node's memory, further consideration will be 5273 - * necessary. 5274 - */ 5275 - zone->wait_table = vmalloc(alloc_size); 5276 - } 5277 - if (!zone->wait_table) 5278 - return -ENOMEM; 5279 - 5280 - for (i = 0; i < zone->wait_table_hash_nr_entries; ++i) 5281 - init_waitqueue_head(zone->wait_table + i); 5282 - 5283 - return 0; 5284 - } 5285 - 5286 5307 static __meminit void zone_pcp_init(struct zone *zone) 5287 5308 { 5288 5309 /* ··· 5260 5367 unsigned long size) 5261 5368 { 5262 5369 struct pglist_data *pgdat = zone->zone_pgdat; 5263 - int ret; 5264 - ret = zone_wait_table_init(zone, size); 5265 - if (ret) 5266 - return ret; 5370 + 5267 5371 pgdat->nr_zones = zone_idx(zone) + 1; 5268 5372 5269 5373 zone->zone_start_pfn = zone_start_pfn; ··· 5272 5382 zone_start_pfn, (zone_start_pfn + size)); 5273 5383 5274 5384 zone_init_free_lists(zone); 5385 + zone->initialized = 1; 5275 5386 5276 5387 return 0; 5277 5388 }
+28 -17
mm/slab.c
··· 233 233 spin_lock_init(&parent->list_lock); 234 234 parent->free_objects = 0; 235 235 parent->free_touched = 0; 236 + parent->num_slabs = 0; 236 237 } 237 238 238 239 #define MAKE_LIST(cachep, listp, slab, nodeid) \ ··· 967 966 * guaranteed to be valid until irq is re-enabled, because it will be 968 967 * freed after synchronize_sched(). 969 968 */ 970 - if (force_change) 969 + if (old_shared && force_change) 971 970 synchronize_sched(); 972 971 973 972 fail: ··· 1383 1382 for_each_kmem_cache_node(cachep, node, n) { 1384 1383 unsigned long active_objs = 0, num_objs = 0, free_objects = 0; 1385 1384 unsigned long active_slabs = 0, num_slabs = 0; 1385 + unsigned long num_slabs_partial = 0, num_slabs_free = 0; 1386 + unsigned long num_slabs_full; 1386 1387 1387 1388 spin_lock_irqsave(&n->list_lock, flags); 1388 - list_for_each_entry(page, &n->slabs_full, lru) { 1389 - active_objs += cachep->num; 1390 - active_slabs++; 1391 - } 1389 + num_slabs = n->num_slabs; 1392 1390 list_for_each_entry(page, &n->slabs_partial, lru) { 1393 1391 active_objs += page->active; 1394 - active_slabs++; 1392 + num_slabs_partial++; 1395 1393 } 1396 1394 list_for_each_entry(page, &n->slabs_free, lru) 1397 - num_slabs++; 1395 + num_slabs_free++; 1398 1396 1399 1397 free_objects += n->free_objects; 1400 1398 spin_unlock_irqrestore(&n->list_lock, flags); 1401 1399 1402 - num_slabs += active_slabs; 1403 1400 num_objs = num_slabs * cachep->num; 1401 + active_slabs = num_slabs - num_slabs_free; 1402 + num_slabs_full = num_slabs - 1403 + (num_slabs_partial + num_slabs_free); 1404 + active_objs += (num_slabs_full * cachep->num); 1405 + 1404 1406 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n", 1405 1407 node, active_slabs, num_slabs, active_objs, num_objs, 1406 1408 free_objects); ··· 2318 2314 2319 2315 page = list_entry(p, struct page, lru); 2320 2316 list_del(&page->lru); 2317 + n->num_slabs--; 2321 2318 /* 2322 2319 * Safe to drop the lock. The slab is no longer linked 2323 2320 * to the cache. ··· 2757 2752 list_add_tail(&page->lru, &(n->slabs_free)); 2758 2753 else 2759 2754 fixup_slab_list(cachep, n, page, &list); 2755 + 2756 + n->num_slabs++; 2760 2757 STATS_INC_GROWN(cachep); 2761 2758 n->free_objects += cachep->num - page->active; 2762 2759 spin_unlock(&n->list_lock); ··· 3450 3443 3451 3444 page = list_last_entry(&n->slabs_free, struct page, lru); 3452 3445 list_move(&page->lru, list); 3446 + n->num_slabs--; 3453 3447 } 3454 3448 } 3455 3449 ··· 4107 4099 unsigned long num_objs; 4108 4100 unsigned long active_slabs = 0; 4109 4101 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 4102 + unsigned long num_slabs_partial = 0, num_slabs_free = 0; 4103 + unsigned long num_slabs_full = 0; 4110 4104 const char *name; 4111 4105 char *error = NULL; 4112 4106 int node; ··· 4121 4111 check_irq_on(); 4122 4112 spin_lock_irq(&n->list_lock); 4123 4113 4124 - list_for_each_entry(page, &n->slabs_full, lru) { 4125 - if (page->active != cachep->num && !error) 4126 - error = "slabs_full accounting error"; 4127 - active_objs += cachep->num; 4128 - active_slabs++; 4129 - } 4114 + num_slabs += n->num_slabs; 4115 + 4130 4116 list_for_each_entry(page, &n->slabs_partial, lru) { 4131 4117 if (page->active == cachep->num && !error) 4132 4118 error = "slabs_partial accounting error"; 4133 4119 if (!page->active && !error) 4134 4120 error = "slabs_partial accounting error"; 4135 4121 active_objs += page->active; 4136 - active_slabs++; 4122 + num_slabs_partial++; 4137 4123 } 4124 + 4138 4125 list_for_each_entry(page, &n->slabs_free, lru) { 4139 4126 if (page->active && !error) 4140 4127 error = "slabs_free accounting error"; 4141 - num_slabs++; 4128 + num_slabs_free++; 4142 4129 } 4130 + 4143 4131 free_objects += n->free_objects; 4144 4132 if (n->shared) 4145 4133 shared_avail += n->shared->avail; 4146 4134 4147 4135 spin_unlock_irq(&n->list_lock); 4148 4136 } 4149 - num_slabs += active_slabs; 4150 4137 num_objs = num_slabs * cachep->num; 4138 + active_slabs = num_slabs - num_slabs_free; 4139 + num_slabs_full = num_slabs - (num_slabs_partial + num_slabs_free); 4140 + active_objs += (num_slabs_full * cachep->num); 4141 + 4151 4142 if (num_objs - active_objs != free_objects && !error) 4152 4143 error = "free_objects accounting error"; 4153 4144
+1
mm/slab.h
··· 432 432 struct list_head slabs_partial; /* partial list first, better asm code */ 433 433 struct list_head slabs_full; 434 434 struct list_head slabs_free; 435 + unsigned long num_slabs; 435 436 unsigned long free_objects; 436 437 unsigned int free_limit; 437 438 unsigned int colour_next; /* Per-node cache coloring */
+2
mm/vmscan.c
··· 3043 3043 sc.gfp_mask, 3044 3044 sc.reclaim_idx); 3045 3045 3046 + current->flags |= PF_MEMALLOC; 3046 3047 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 3048 + current->flags &= ~PF_MEMALLOC; 3047 3049 3048 3050 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 3049 3051
+1 -1
security/keys/Kconfig
··· 41 41 bool "Large payload keys" 42 42 depends on KEYS 43 43 depends on TMPFS 44 - select CRYPTO 44 + depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y) 45 45 select CRYPTO_AES 46 46 select CRYPTO_ECB 47 47 select CRYPTO_RNG
+32 -27
security/keys/big_key.c
··· 9 9 * 2 of the Licence, or (at your option) any later version. 10 10 */ 11 11 12 + #define pr_fmt(fmt) "big_key: "fmt 12 13 #include <linux/init.h> 13 14 #include <linux/seq_file.h> 14 15 #include <linux/file.h> ··· 342 341 */ 343 342 static int __init big_key_init(void) 344 343 { 345 - return register_key_type(&key_type_big_key); 346 - } 344 + struct crypto_skcipher *cipher; 345 + struct crypto_rng *rng; 346 + int ret; 347 347 348 - /* 349 - * Initialize big_key crypto and RNG algorithms 350 - */ 351 - static int __init big_key_crypto_init(void) 352 - { 353 - int ret = -EINVAL; 354 - 355 - /* init RNG */ 356 - big_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0); 357 - if (IS_ERR(big_key_rng)) { 358 - big_key_rng = NULL; 359 - return -EFAULT; 348 + rng = crypto_alloc_rng(big_key_rng_name, 0, 0); 349 + if (IS_ERR(rng)) { 350 + pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng)); 351 + return PTR_ERR(rng); 360 352 } 361 353 354 + big_key_rng = rng; 355 + 362 356 /* seed RNG */ 363 - ret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng)); 364 - if (ret) 365 - goto error; 357 + ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng)); 358 + if (ret) { 359 + pr_err("Can't reset rng: %d\n", ret); 360 + goto error_rng; 361 + } 366 362 367 363 /* init block cipher */ 368 - big_key_skcipher = crypto_alloc_skcipher(big_key_alg_name, 369 - 0, CRYPTO_ALG_ASYNC); 370 - if (IS_ERR(big_key_skcipher)) { 371 - big_key_skcipher = NULL; 372 - ret = -EFAULT; 373 - goto error; 364 + cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC); 365 + if (IS_ERR(cipher)) { 366 + ret = PTR_ERR(cipher); 367 + pr_err("Can't alloc crypto: %d\n", ret); 368 + goto error_rng; 369 + } 370 + 371 + big_key_skcipher = cipher; 372 + 373 + ret = register_key_type(&key_type_big_key); 374 + if (ret < 0) { 375 + pr_err("Can't register type: %d\n", ret); 376 + goto error_cipher; 374 377 } 375 378 376 379 return 0; 377 380 378 - error: 381 + error_cipher: 382 + crypto_free_skcipher(big_key_skcipher); 383 + error_rng: 379 384 crypto_free_rng(big_key_rng); 380 - big_key_rng = NULL; 381 385 return ret; 382 386 } 383 387 384 - device_initcall(big_key_init); 385 - late_initcall(big_key_crypto_init); 388 + late_initcall(big_key_init);
+1 -1
security/keys/proc.c
··· 181 181 struct timespec now; 182 182 unsigned long timo; 183 183 key_ref_t key_ref, skey_ref; 184 - char xbuf[12]; 184 + char xbuf[16]; 185 185 int rc; 186 186 187 187 struct keyring_search_context ctx = {
+2 -2
sound/core/seq/seq_timer.c
··· 448 448 449 449 ktime_get_ts64(&tm); 450 450 tm = timespec64_sub(tm, tmr->last_update); 451 - cur_time.tv_nsec = tm.tv_nsec; 452 - cur_time.tv_sec = tm.tv_sec; 451 + cur_time.tv_nsec += tm.tv_nsec; 452 + cur_time.tv_sec += tm.tv_sec; 453 453 snd_seq_sanity_real_time(&cur_time); 454 454 } 455 455 spin_unlock_irqrestore(&tmr->lock, flags);
+1 -1
sound/pci/asihpi/hpioctl.c
··· 111 111 return -EINVAL; 112 112 113 113 hm = kmalloc(sizeof(*hm), GFP_KERNEL); 114 - hr = kmalloc(sizeof(*hr), GFP_KERNEL); 114 + hr = kzalloc(sizeof(*hr), GFP_KERNEL); 115 115 if (!hm || !hr) { 116 116 err = -ENOMEM; 117 117 goto out;
+5 -2
sound/pci/hda/hda_intel.c
··· 341 341 342 342 /* quirks for Nvidia */ 343 343 #define AZX_DCAPS_PRESET_NVIDIA \ 344 - (AZX_DCAPS_NO_MSI | /*AZX_DCAPS_ALIGN_BUFSIZE |*/ \ 345 - AZX_DCAPS_NO_64BIT | AZX_DCAPS_CORBRP_SELF_CLEAR |\ 344 + (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\ 346 345 AZX_DCAPS_SNOOP_TYPE(NVIDIA)) 347 346 348 347 #define AZX_DCAPS_PRESET_CTHDA \ ··· 1714 1715 pci_dev_put(p_smbus); 1715 1716 } 1716 1717 } 1718 + 1719 + /* NVidia hardware normally only supports up to 40 bits of DMA */ 1720 + if (chip->pci->vendor == PCI_VENDOR_ID_NVIDIA) 1721 + dma_bits = 40; 1717 1722 1718 1723 /* disable 64bit DMA address on some devices */ 1719 1724 if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
+27 -3
sound/pci/hda/patch_realtek.c
··· 5811 5811 #define ALC295_STANDARD_PINS \ 5812 5812 {0x12, 0xb7a60130}, \ 5813 5813 {0x14, 0x90170110}, \ 5814 - {0x17, 0x21014020}, \ 5815 - {0x18, 0x21a19030}, \ 5816 5814 {0x21, 0x04211020} 5817 5815 5818 5816 #define ALC298_STANDARD_PINS \ ··· 5857 5859 {0x1b, 0x02011020}, 5858 5860 {0x21, 0x0221101f}), 5859 5861 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5862 + {0x14, 0x90170110}, 5863 + {0x1b, 0x01011020}, 5864 + {0x21, 0x0221101f}), 5865 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5860 5866 {0x14, 0x90170130}, 5861 5867 {0x1b, 0x01014020}, 5868 + {0x21, 0x0221103f}), 5869 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5870 + {0x14, 0x90170130}, 5871 + {0x1b, 0x01011020}, 5862 5872 {0x21, 0x0221103f}), 5863 5873 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5864 5874 {0x14, 0x90170130}, ··· 6045 6039 ALC292_STANDARD_PINS, 6046 6040 {0x13, 0x90a60140}), 6047 6041 SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 6048 - ALC295_STANDARD_PINS), 6042 + ALC295_STANDARD_PINS, 6043 + {0x17, 0x21014020}, 6044 + {0x18, 0x21a19030}), 6045 + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 6046 + ALC295_STANDARD_PINS, 6047 + {0x17, 0x21014040}, 6048 + {0x18, 0x21a19050}), 6049 6049 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 6050 6050 ALC298_STANDARD_PINS, 6051 6051 {0x17, 0x90170110}), ··· 6625 6613 ALC891_FIXUP_HEADSET_MODE, 6626 6614 ALC891_FIXUP_DELL_MIC_NO_PRESENCE, 6627 6615 ALC662_FIXUP_ACER_VERITON, 6616 + ALC892_FIXUP_ASROCK_MOBO, 6628 6617 }; 6629 6618 6630 6619 static const struct hda_fixup alc662_fixups[] = { ··· 6902 6889 { } 6903 6890 } 6904 6891 }, 6892 + [ALC892_FIXUP_ASROCK_MOBO] = { 6893 + .type = HDA_FIXUP_PINS, 6894 + .v.pins = (const struct hda_pintbl[]) { 6895 + { 0x15, 0x40f000f0 }, /* disabled */ 6896 + { 0x16, 0x40f000f0 }, /* disabled */ 6897 + { 0x18, 0x01014011 }, /* LO */ 6898 + { 0x1a, 0x01014012 }, /* LO */ 6899 + { } 6900 + } 6901 + }, 6905 6902 }; 6906 6903 6907 6904 static const struct snd_pci_quirk alc662_fixup_tbl[] = { ··· 6949 6926 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), 6950 6927 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), 6951 6928 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), 6929 + SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO), 6952 6930 SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68), 6953 6931 SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON), 6954 6932 SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
+17
sound/usb/quirks-table.h
··· 2907 2907 AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"), 2908 2908 AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), 2909 2909 2910 + /* Syntek STK1160 */ 2911 + { 2912 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | 2913 + USB_DEVICE_ID_MATCH_INT_CLASS | 2914 + USB_DEVICE_ID_MATCH_INT_SUBCLASS, 2915 + .idVendor = 0x05e1, 2916 + .idProduct = 0x0408, 2917 + .bInterfaceClass = USB_CLASS_AUDIO, 2918 + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, 2919 + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { 2920 + .vendor_name = "Syntek", 2921 + .product_name = "STK1160", 2922 + .ifnum = QUIRK_ANY_INTERFACE, 2923 + .type = QUIRK_AUDIO_ALIGN_TRANSFER 2924 + } 2925 + }, 2926 + 2910 2927 /* Digidesign Mbox */ 2911 2928 { 2912 2929 /* Thanks to Clemens Ladisch <clemens@ladisch.de> */
+1 -1
tools/objtool/builtin-check.c
··· 754 754 if (insn->type == INSN_JUMP_UNCONDITIONAL && 755 755 insn->jump_dest && 756 756 (insn->jump_dest->offset <= insn->offset || 757 - insn->jump_dest->offset >= orig_insn->offset)) 757 + insn->jump_dest->offset > orig_insn->offset)) 758 758 break; 759 759 760 760 text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
+4 -6
virt/kvm/kvm_main.c
··· 1346 1346 static int get_user_page_nowait(unsigned long start, int write, 1347 1347 struct page **page) 1348 1348 { 1349 - int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; 1349 + int flags = FOLL_NOWAIT | FOLL_HWPOISON; 1350 1350 1351 1351 if (write) 1352 1352 flags |= FOLL_WRITE; 1353 1353 1354 - return __get_user_pages(current, current->mm, start, 1, flags, page, 1355 - NULL, NULL); 1354 + return get_user_pages(start, 1, flags, page, NULL); 1356 1355 } 1357 1356 1358 1357 static inline int check_user_page_hwpoison(unsigned long addr) 1359 1358 { 1360 - int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1359 + int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 1361 1360 1362 - rc = __get_user_pages(current, current->mm, addr, 1, 1363 - flags, NULL, NULL, NULL); 1361 + rc = get_user_pages(addr, 1, flags, NULL, NULL); 1364 1362 return rc == -EHWPOISON; 1365 1363 } 1366 1364